Index: src/arm/macro-assembler-arm.cc |
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc |
index 7628f23a5be9f137b24bc184bc7c2ba664f8ef5c..e1ff88e93f1bb8ca64809357203f0dfa23c7d17e 100644 |
--- a/src/arm/macro-assembler-arm.cc |
+++ b/src/arm/macro-assembler-arm.cc |
@@ -3186,37 +3186,52 @@ void MacroAssembler::CopyFields(Register dst, |
} |
} |
- |
void MacroAssembler::CopyBytes(Register src, |
Register dst, |
Register length, |
Register scratch) { |
- Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done; |
+ Label word_loop, byte_loop, byte_loop_1, done; |
+ |
Benedikt Meurer
2013/09/03 11:54:35
Please add ASSERTs that src, dst, length and scrat
|
+ cmp(length, Operand(8)); |
+ b(lt, &byte_loop); |
// Align src before copying in word size chunks. |
- bind(&align_loop); |
- cmp(length, Operand::Zero()); |
- b(eq, &done); |
- bind(&align_loop_1); |
tst(src, Operand(kPointerSize - 1)); |
Rodolph Perfetta
2013/09/03 14:47:07
This code could be improve further, see the tail e
|
b(eq, &word_loop); |
+ |
+ ldrb(scratch, MemOperand(src, 1, PostIndex)); |
+ sub(length, length, Operand(1)); |
+ tst(src, Operand(kPointerSize - 1)); |
+ strb(scratch, MemOperand(dst, 1, PostIndex)); |
+ b(eq, &word_loop); |
+ |
ldrb(scratch, MemOperand(src, 1, PostIndex)); |
+ sub(length, length, Operand(1)); |
+ tst(src, Operand(kPointerSize - 1)); |
+ strb(scratch, MemOperand(dst, 1, PostIndex)); |
+ b(eq, &word_loop); |
+ |
+ ldrb(scratch, MemOperand(src, 1, PostIndex)); |
+ sub(length, length, Operand(1)); |
strb(scratch, MemOperand(dst, 1, PostIndex)); |
- sub(length, length, Operand(1), SetCC); |
- b(ne, &byte_loop_1); |
// Copy bytes in word size chunks. |
bind(&word_loop); |
- if (emit_debug_code()) { |
- tst(src, Operand(kPointerSize - 1)); |
- Assert(eq, kExpectingAlignmentForCopyBytes); |
- } |
- cmp(length, Operand(kPointerSize)); |
- b(lt, &byte_loop); |
- ldr(scratch, MemOperand(src, kPointerSize, PostIndex)); |
+ |
if (CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) { |
+ ldr(scratch, MemOperand(src, kPointerSize, PostIndex)); |
+ sub(length, length, Operand(kPointerSize)); |
+ cmp(length, Operand(kPointerSize)); |
str(scratch, MemOperand(dst, kPointerSize, PostIndex)); |
} else { |
+ if (emit_debug_code()) { |
+ tst(src, Operand(kPointerSize - 1)); |
+ Assert(eq, kExpectingAlignmentForCopyBytes); |
+ } |
+ cmp(length, Operand(kPointerSize)); |
+ b(lt, &byte_loop); |
+ ldr(scratch, MemOperand(src, kPointerSize, PostIndex)); |
+ sub(length, length, Operand(kPointerSize)); |
strb(scratch, MemOperand(dst, 1, PostIndex)); |
mov(scratch, Operand(scratch, LSR, 8)); |
strb(scratch, MemOperand(dst, 1, PostIndex)); |
@@ -3225,8 +3240,8 @@ void MacroAssembler::CopyBytes(Register src, |
mov(scratch, Operand(scratch, LSR, 8)); |
strb(scratch, MemOperand(dst, 1, PostIndex)); |
} |
- sub(length, length, Operand(kPointerSize)); |
- b(&word_loop); |
+ |
+ b(ge, &word_loop); |
// Copy the last bytes if any left. |
bind(&byte_loop); |
Rodolph Perfetta
2013/09/03 14:47:07
here too you could avoid the small loop.
|
@@ -3234,13 +3249,12 @@ void MacroAssembler::CopyBytes(Register src, |
b(eq, &done); |
bind(&byte_loop_1); |
ldrb(scratch, MemOperand(src, 1, PostIndex)); |
- strb(scratch, MemOperand(dst, 1, PostIndex)); |
sub(length, length, Operand(1), SetCC); |
+ strb(scratch, MemOperand(dst, 1, PostIndex)); |
b(ne, &byte_loop_1); |
bind(&done); |
} |
- |
void MacroAssembler::InitializeFieldsWithFiller(Register start_offset, |
Register end_offset, |
Register filler) { |