Index: src/x64/macro-assembler-x64.cc |
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc |
index 837afe81bbd1eb2f1bd7ded219babe7aac76260b..96f7f37fcf5630e7917e530347b71b51b2d7f01d 100644 |
--- a/src/x64/macro-assembler-x64.cc |
+++ b/src/x64/macro-assembler-x64.cc |
@@ -1245,12 +1245,10 @@ void MacroAssembler::SmiAdd(Register dst, |
// No overflow checking. Use only when it's known that |
// overflowing is impossible. |
ASSERT(!dst.is(src2)); |
- if (dst.is(src1)) { |
- addq(dst, src2); |
- } else { |
+ if (!dst.is(src1)) { |
movq(dst, src1); |
- addq(dst, src2); |
} |
+ addq(dst, src2); |
Assert(no_overflow, "Smi addition overflow"); |
} |
@@ -1259,12 +1257,10 @@ void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) { |
// No overflow checking. Use only when it's known that |
// overflowing is impossible (e.g., subtracting two positive smis). |
ASSERT(!dst.is(src2)); |
- if (dst.is(src1)) { |
- subq(dst, src2); |
- } else { |
+ if (!dst.is(src1)) { |
movq(dst, src1); |
- subq(dst, src2); |
} |
+ subq(dst, src2); |
Assert(no_overflow, "Smi subtraction overflow"); |
} |
@@ -1274,12 +1270,10 @@ void MacroAssembler::SmiSub(Register dst, |
const Operand& src2) { |
// No overflow checking. Use only when it's known that |
// overflowing is impossible (e.g., subtracting two positive smis). |
- if (dst.is(src1)) { |
- subq(dst, src2); |
- } else { |
+ if (!dst.is(src1)) { |
movq(dst, src1); |
- subq(dst, src2); |
} |
+ subq(dst, src2); |
Assert(no_overflow, "Smi subtraction overflow"); |
} |
@@ -1466,6 +1460,12 @@ SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst, |
} |
+void MacroAssembler::AddSmiField(Register dst, const Operand& src) { |
+ addl(dst, Operand(src, kSmiShift / kBitsPerByte)); |
William Hesse
2011/03/24 12:02:21
ASSERT(kSmiShift % kBitsPerByte == 0);
Lasse Reichstein
2011/03/24 12:14:08
Done.
|
+} |
+ |
+ |
+ |
void MacroAssembler::Move(Register dst, Register src) { |
if (!dst.is(src)) { |
movq(dst, src); |
@@ -2701,6 +2701,69 @@ void MacroAssembler::AllocateAsciiConsString(Register result, |
} |
+// Copy memory, byte-by-byte, from source to destination. Not optimized for |
+// long or aligned copies. The contents of scratch and length are destroyed. |
+// Source and destination are incremented by length. |
+// A simpler loop is faster on small copies, but 30% slower on large ones. |
+// The cld() instruction must have been emitted, to set the direction flag(), |
+// before calling this function. |
+void MacroAssembler::CopyBytes(Register destination, |
+ Register source, |
+ Register length, |
+ int min_length, |
+ Register scratch) { |
+ ASSERT(min_length >= 0); |
+ if (FLAG_debug_code) { |
+ cmpl(length, Immediate(min_length)); |
+ Assert(greater_equal, "Invalid min_length"); |
+ } |
+ Label loop, done, short_string, short_loop; |
+ // Experimentation shows that the short string loop is faster if length < 10. |
William Hesse
2011/03/24 12:02:21
Comment disagrees with chosen constant.
Lasse Reichstein
2011/03/24 12:14:08
Comment removed.
We should do tests to see what th
|
+ const int kLongStringLimit = 20; |
+ if (min_length <= kLongStringLimit) { |
+ cmpl(length, Immediate(kLongStringLimit)); |
+ j(less_equal, &short_string); |
+ } |
+ |
+ ASSERT(source.is(rsi)); |
+ ASSERT(destination.is(rdi)); |
+ ASSERT(length.is(rcx)); |
+ |
+ // Because source is 8-byte aligned in our uses of this function, |
+ // we keep source aligned for the rep movs operation by copying the odd bytes |
+ // at the end of the ranges. |
+ movq(scratch, length); |
+ shrl(length, Immediate(3)); |
+ repmovsq(); |
+ // Move remaining bytes of length. |
+ andl(scratch, Immediate(0x7)); |
+ movq(length, Operand(source, scratch, times_1, -8)); |
+ movq(Operand(destination, scratch, times_1, -8), length); |
+ addq(destination, scratch); |
+ |
+ if (min_length <= kLongStringLimit) { |
+ jmp(&done); |
+ |
+ bind(&short_string); |
+ if (min_length == 0) { |
+ testl(length, length); |
+ j(zero, &done); |
+ } |
+ lea(scratch, Operand(destination, length, times_1, 0)); |
+ |
+ bind(&short_loop); |
+ movb(length, Operand(source, 0)); |
+ movb(Operand(destination, 0), length); |
+ incq(source); |
+ incq(destination); |
+ cmpq(destination, scratch); |
+ j(not_equal, &short_loop); |
+ |
+ bind(&done); |
+ } |
+} |
+ |
+ |
void MacroAssembler::LoadContext(Register dst, int context_chain_length) { |
if (context_chain_length > 0) { |
// Move up the chain of contexts to the context containing the slot. |