Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(266)

Side by Side Diff: src/x64/macro-assembler-x64.cc

Issue 6729016: Implemented FastAsciiStringJoin in X64 full codegen. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 9 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« src/x64/full-codegen-x64.cc ('K') | « src/x64/macro-assembler-x64.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 1227 matching lines...) Expand 10 before | Expand all | Expand 10 after
1238 } 1238 }
1239 } 1239 }
1240 1240
1241 1241
1242 void MacroAssembler::SmiAdd(Register dst, 1242 void MacroAssembler::SmiAdd(Register dst,
1243 Register src1, 1243 Register src1,
1244 Register src2) { 1244 Register src2) {
1245 // No overflow checking. Use only when it's known that 1245 // No overflow checking. Use only when it's known that
1246 // overflowing is impossible. 1246 // overflowing is impossible.
1247 ASSERT(!dst.is(src2)); 1247 ASSERT(!dst.is(src2));
1248 if (dst.is(src1)) { 1248 if (!dst.is(src1)) {
1249 addq(dst, src2);
1250 } else {
1251 movq(dst, src1); 1249 movq(dst, src1);
1252 addq(dst, src2);
1253 } 1250 }
1251 addq(dst, src2);
1254 Assert(no_overflow, "Smi addition overflow"); 1252 Assert(no_overflow, "Smi addition overflow");
1255 } 1253 }
1256 1254
1257 1255
1258 void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) { 1256 void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
1259 // No overflow checking. Use only when it's known that 1257 // No overflow checking. Use only when it's known that
1260 // overflowing is impossible (e.g., subtracting two positive smis). 1258 // overflowing is impossible (e.g., subtracting two positive smis).
1261 ASSERT(!dst.is(src2)); 1259 ASSERT(!dst.is(src2));
1262 if (dst.is(src1)) { 1260 if (!dst.is(src1)) {
1263 subq(dst, src2);
1264 } else {
1265 movq(dst, src1); 1261 movq(dst, src1);
1266 subq(dst, src2);
1267 } 1262 }
1263 subq(dst, src2);
1268 Assert(no_overflow, "Smi subtraction overflow"); 1264 Assert(no_overflow, "Smi subtraction overflow");
1269 } 1265 }
1270 1266
1271 1267
1272 void MacroAssembler::SmiSub(Register dst, 1268 void MacroAssembler::SmiSub(Register dst,
1273 Register src1, 1269 Register src1,
1274 const Operand& src2) { 1270 const Operand& src2) {
1275 // No overflow checking. Use only when it's known that 1271 // No overflow checking. Use only when it's known that
1276 // overflowing is impossible (e.g., subtracting two positive smis). 1272 // overflowing is impossible (e.g., subtracting two positive smis).
1277 if (dst.is(src1)) { 1273 if (!dst.is(src1)) {
1278 subq(dst, src2);
1279 } else {
1280 movq(dst, src1); 1274 movq(dst, src1);
1281 subq(dst, src2);
1282 } 1275 }
1276 subq(dst, src2);
1283 Assert(no_overflow, "Smi subtraction overflow"); 1277 Assert(no_overflow, "Smi subtraction overflow");
1284 } 1278 }
1285 1279
1286 1280
1287 void MacroAssembler::SmiNot(Register dst, Register src) { 1281 void MacroAssembler::SmiNot(Register dst, Register src) {
1288 ASSERT(!dst.is(kScratchRegister)); 1282 ASSERT(!dst.is(kScratchRegister));
1289 ASSERT(!src.is(kScratchRegister)); 1283 ASSERT(!src.is(kScratchRegister));
1290 // Set tag and padding bits before negating, so that they are zero afterwards. 1284 // Set tag and padding bits before negating, so that they are zero afterwards.
1291 movl(kScratchRegister, Immediate(~0)); 1285 movl(kScratchRegister, Immediate(~0));
1292 if (dst.is(src)) { 1286 if (dst.is(src)) {
(...skipping 166 matching lines...) Expand 10 before | Expand all | Expand 10 after
1459 neg(dst); 1453 neg(dst);
1460 if (shift < kSmiShift) { 1454 if (shift < kSmiShift) {
1461 sar(dst, Immediate(kSmiShift - shift)); 1455 sar(dst, Immediate(kSmiShift - shift));
1462 } else { 1456 } else {
1463 shl(dst, Immediate(shift - kSmiShift)); 1457 shl(dst, Immediate(shift - kSmiShift));
1464 } 1458 }
1465 return SmiIndex(dst, times_1); 1459 return SmiIndex(dst, times_1);
1466 } 1460 }
1467 1461
1468 1462
1463 void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
1464 addl(dst, Operand(src, kSmiShift / kBitsPerByte));
William Hesse 2011/03/24 12:02:21 ASSERT(kSmiShift % kBitsPerByte == 0);
Lasse Reichstein 2011/03/24 12:14:08 Done.
1465 }
1466
1467
1468
1469 void MacroAssembler::Move(Register dst, Register src) { 1469 void MacroAssembler::Move(Register dst, Register src) {
1470 if (!dst.is(src)) { 1470 if (!dst.is(src)) {
1471 movq(dst, src); 1471 movq(dst, src);
1472 } 1472 }
1473 } 1473 }
1474 1474
1475 1475
1476 void MacroAssembler::Move(Register dst, Handle<Object> source) { 1476 void MacroAssembler::Move(Register dst, Handle<Object> source) {
1477 ASSERT(!source->IsFailure()); 1477 ASSERT(!source->IsFailure());
1478 if (source->IsSmi()) { 1478 if (source->IsSmi()) {
(...skipping 1215 matching lines...) Expand 10 before | Expand all | Expand 10 after
2694 scratch2, 2694 scratch2,
2695 gc_required, 2695 gc_required,
2696 TAG_OBJECT); 2696 TAG_OBJECT);
2697 2697
2698 // Set the map. The other fields are left uninitialized. 2698 // Set the map. The other fields are left uninitialized.
2699 LoadRoot(kScratchRegister, Heap::kConsAsciiStringMapRootIndex); 2699 LoadRoot(kScratchRegister, Heap::kConsAsciiStringMapRootIndex);
2700 movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister); 2700 movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
2701 } 2701 }
2702 2702
2703 2703
2704 // Copy memory, byte-by-byte, from source to destination. Not optimized for
2705 // long or aligned copies. The contents of scratch and length are destroyed.
2706 // Source and destination are incremented by length.
2707 // A simpler loop is faster on small copies, but 30% slower on large ones.
2708 // The cld() instruction must have been emitted, to set the direction flag(),
2709 // before calling this function.
2710 void MacroAssembler::CopyBytes(Register destination,
2711 Register source,
2712 Register length,
2713 int min_length,
2714 Register scratch) {
2715 ASSERT(min_length >= 0);
2716 if (FLAG_debug_code) {
2717 cmpl(length, Immediate(min_length));
2718 Assert(greater_equal, "Invalid min_length");
2719 }
2720 Label loop, done, short_string, short_loop;
2721 // Experimentation shows that the short string loop is faster if length < 10.
William Hesse 2011/03/24 12:02:21 Comment disagrees with chosen constant.
Lasse Reichstein 2011/03/24 12:14:08 Comment removed. We should do tests to see what th
2722 const int kLongStringLimit = 20;
2723 if (min_length <= kLongStringLimit) {
2724 cmpl(length, Immediate(kLongStringLimit));
2725 j(less_equal, &short_string);
2726 }
2727
2728 ASSERT(source.is(rsi));
2729 ASSERT(destination.is(rdi));
2730 ASSERT(length.is(rcx));
2731
2732 // Because source is 8-byte aligned in our uses of this function,
2733 // we keep source aligned for the rep movs operation by copying the odd bytes
2734 // at the end of the ranges.
2735 movq(scratch, length);
2736 shrl(length, Immediate(3));
2737 repmovsq();
2738 // Move remaining bytes of length.
2739 andl(scratch, Immediate(0x7));
2740 movq(length, Operand(source, scratch, times_1, -8));
2741 movq(Operand(destination, scratch, times_1, -8), length);
2742 addq(destination, scratch);
2743
2744 if (min_length <= kLongStringLimit) {
2745 jmp(&done);
2746
2747 bind(&short_string);
2748 if (min_length == 0) {
2749 testl(length, length);
2750 j(zero, &done);
2751 }
2752 lea(scratch, Operand(destination, length, times_1, 0));
2753
2754 bind(&short_loop);
2755 movb(length, Operand(source, 0));
2756 movb(Operand(destination, 0), length);
2757 incq(source);
2758 incq(destination);
2759 cmpq(destination, scratch);
2760 j(not_equal, &short_loop);
2761
2762 bind(&done);
2763 }
2764 }
2765
2766
2704 void MacroAssembler::LoadContext(Register dst, int context_chain_length) { 2767 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2705 if (context_chain_length > 0) { 2768 if (context_chain_length > 0) {
2706 // Move up the chain of contexts to the context containing the slot. 2769 // Move up the chain of contexts to the context containing the slot.
2707 movq(dst, Operand(rsi, Context::SlotOffset(Context::CLOSURE_INDEX))); 2770 movq(dst, Operand(rsi, Context::SlotOffset(Context::CLOSURE_INDEX)));
2708 // Load the function context (which is the incoming, outer context). 2771 // Load the function context (which is the incoming, outer context).
2709 movq(dst, FieldOperand(dst, JSFunction::kContextOffset)); 2772 movq(dst, FieldOperand(dst, JSFunction::kContextOffset));
2710 for (int i = 1; i < context_chain_length; i++) { 2773 for (int i = 1; i < context_chain_length; i++) {
2711 movq(dst, Operand(dst, Context::SlotOffset(Context::CLOSURE_INDEX))); 2774 movq(dst, Operand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
2712 movq(dst, FieldOperand(dst, JSFunction::kContextOffset)); 2775 movq(dst, FieldOperand(dst, JSFunction::kContextOffset));
2713 } 2776 }
(...skipping 143 matching lines...) Expand 10 before | Expand all | Expand 10 after
2857 CPU::FlushICache(address_, size_); 2920 CPU::FlushICache(address_, size_);
2858 2921
2859 // Check that the code was patched as expected. 2922 // Check that the code was patched as expected.
2860 ASSERT(masm_.pc_ == address_ + size_); 2923 ASSERT(masm_.pc_ == address_ + size_);
2861 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); 2924 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
2862 } 2925 }
2863 2926
2864 } } // namespace v8::internal 2927 } } // namespace v8::internal
2865 2928
2866 #endif // V8_TARGET_ARCH_X64 2929 #endif // V8_TARGET_ARCH_X64
OLDNEW
« src/x64/full-codegen-x64.cc ('K') | « src/x64/macro-assembler-x64.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698