Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Side by Side Diff: src/mips64/macro-assembler-mips64.cc

Issue 1334793004: MIPS64: Add big-endian support for mips64. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include <limits.h> // For LONG_MIN, LONG_MAX. 5 #include <limits.h> // For LONG_MIN, LONG_MAX.
6 6
7 #if V8_TARGET_ARCH_MIPS64 7 #if V8_TARGET_ARCH_MIPS64
8 8
9 #include "src/base/division-by-constant.h" 9 #include "src/base/division-by-constant.h"
10 #include "src/bootstrapper.h" 10 #include "src/bootstrapper.h"
(...skipping 1193 matching lines...) Expand 10 before | Expand all | Expand 10 after
1204 } 1204 }
1205 1205
1206 1206
1207 // Do 64-bit load from unaligned address. Note this only handles 1207 // Do 64-bit load from unaligned address. Note this only handles
1208 // the specific case of 32-bit aligned, but not 64-bit aligned. 1208 // the specific case of 32-bit aligned, but not 64-bit aligned.
1209 void MacroAssembler::Uld(Register rd, const MemOperand& rs, Register scratch) { 1209 void MacroAssembler::Uld(Register rd, const MemOperand& rs, Register scratch) {
1210 // Assert fail if the offset from start of object IS actually aligned. 1210 // Assert fail if the offset from start of object IS actually aligned.
1211 // ONLY use with known misalignment, since there is performance cost. 1211 // ONLY use with known misalignment, since there is performance cost.
1212 DCHECK((rs.offset() + kHeapObjectTag) & (kPointerSize - 1)); 1212 DCHECK((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
1213 // TODO(plind): endian dependency. 1213 // TODO(plind): endian dependency.
1214 lwu(rd, rs); 1214 if (kArchEndian == kLittle) {
1215 lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2)); 1215 lwu(rd, rs);
1216 dsll32(scratch, scratch, 0); 1216 lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1217 dsll32(scratch, scratch, 0);
1218 } else {
1219 lw(rd, rs);
1220 lwu(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1221 dsll32(rd, rd, 0);
1222 }
1217 Daddu(rd, rd, scratch); 1223 Daddu(rd, rd, scratch);
1218 } 1224 }
1219 1225
1220 1226
1221 // Do 64-bit store to unaligned address. Note this only handles 1227 // Do 64-bit store to unaligned address. Note this only handles
1222 // the specific case of 32-bit aligned, but not 64-bit aligned. 1228 // the specific case of 32-bit aligned, but not 64-bit aligned.
1223 void MacroAssembler::Usd(Register rd, const MemOperand& rs, Register scratch) { 1229 void MacroAssembler::Usd(Register rd, const MemOperand& rs, Register scratch) {
1224 // Assert fail if the offset from start of object IS actually aligned. 1230 // Assert fail if the offset from start of object IS actually aligned.
1225 // ONLY use with known misalignment, since there is performance cost. 1231 // ONLY use with known misalignment, since there is performance cost.
1226 DCHECK((rs.offset() + kHeapObjectTag) & (kPointerSize - 1)); 1232 DCHECK((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
1227 // TODO(plind): endian dependency. 1233 // TODO(plind): endian dependency.
1228 sw(rd, rs); 1234 if (kArchEndian == kLittle) {
1229 dsrl32(scratch, rd, 0); 1235 sw(rd, rs);
1230 sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2)); 1236 dsrl32(scratch, rd, 0);
1237 sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1238 } else {
1239 sw(rd, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1240 dsrl32(scratch, rd, 0);
1241 sw(scratch, rs);
1242 }
1231 } 1243 }
1232 1244
1233 1245
1234 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) { 1246 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
1235 AllowDeferredHandleDereference smi_check; 1247 AllowDeferredHandleDereference smi_check;
1236 if (value->IsSmi()) { 1248 if (value->IsSmi()) {
1237 li(dst, Operand(value), mode); 1249 li(dst, Operand(value), mode);
1238 } else { 1250 } else {
1239 DCHECK(value->IsHeapObject()); 1251 DCHECK(value->IsHeapObject());
1240 if (isolate()->heap()->InNewSpace(*value)) { 1252 if (isolate()->heap()->InNewSpace(*value)) {
(...skipping 2541 matching lines...) Expand 10 before | Expand all | Expand 10 after
3782 And(scratch, src, kPointerSize - 1); 3794 And(scratch, src, kPointerSize - 1);
3783 Assert(eq, kExpectingAlignmentForCopyBytes, 3795 Assert(eq, kExpectingAlignmentForCopyBytes,
3784 scratch, Operand(zero_reg)); 3796 scratch, Operand(zero_reg));
3785 } 3797 }
3786 Branch(&byte_loop, lt, length, Operand(kPointerSize)); 3798 Branch(&byte_loop, lt, length, Operand(kPointerSize));
3787 ld(scratch, MemOperand(src)); 3799 ld(scratch, MemOperand(src));
3788 Daddu(src, src, kPointerSize); 3800 Daddu(src, src, kPointerSize);
3789 3801
3790 // TODO(kalmard) check if this can be optimized to use sw in most cases. 3802 // TODO(kalmard) check if this can be optimized to use sw in most cases.
3791 // Can't use unaligned access - copy byte by byte. 3803 // Can't use unaligned access - copy byte by byte.
3792 sb(scratch, MemOperand(dst, 0)); 3804 if (kArchEndian == kLittle) {
3793 dsrl(scratch, scratch, 8); 3805 sb(scratch, MemOperand(dst, 0));
3794 sb(scratch, MemOperand(dst, 1)); 3806 dsrl(scratch, scratch, 8);
3795 dsrl(scratch, scratch, 8); 3807 sb(scratch, MemOperand(dst, 1));
3796 sb(scratch, MemOperand(dst, 2)); 3808 dsrl(scratch, scratch, 8);
3797 dsrl(scratch, scratch, 8); 3809 sb(scratch, MemOperand(dst, 2));
3798 sb(scratch, MemOperand(dst, 3)); 3810 dsrl(scratch, scratch, 8);
3799 dsrl(scratch, scratch, 8); 3811 sb(scratch, MemOperand(dst, 3));
3800 sb(scratch, MemOperand(dst, 4)); 3812 dsrl(scratch, scratch, 8);
3801 dsrl(scratch, scratch, 8); 3813 sb(scratch, MemOperand(dst, 4));
3802 sb(scratch, MemOperand(dst, 5)); 3814 dsrl(scratch, scratch, 8);
3803 dsrl(scratch, scratch, 8); 3815 sb(scratch, MemOperand(dst, 5));
3804 sb(scratch, MemOperand(dst, 6)); 3816 dsrl(scratch, scratch, 8);
3805 dsrl(scratch, scratch, 8); 3817 sb(scratch, MemOperand(dst, 6));
3806 sb(scratch, MemOperand(dst, 7)); 3818 dsrl(scratch, scratch, 8);
3819 sb(scratch, MemOperand(dst, 7));
3820 } else {
3821 sb(scratch, MemOperand(dst, 7));
3822 dsrl(scratch, scratch, 8);
3823 sb(scratch, MemOperand(dst, 6));
3824 dsrl(scratch, scratch, 8);
3825 sb(scratch, MemOperand(dst, 5));
3826 dsrl(scratch, scratch, 8);
3827 sb(scratch, MemOperand(dst, 4));
3828 dsrl(scratch, scratch, 8);
3829 sb(scratch, MemOperand(dst, 3));
3830 dsrl(scratch, scratch, 8);
3831 sb(scratch, MemOperand(dst, 2));
3832 dsrl(scratch, scratch, 8);
3833 sb(scratch, MemOperand(dst, 1));
3834 dsrl(scratch, scratch, 8);
3835 sb(scratch, MemOperand(dst, 0));
3836 }
3807 Daddu(dst, dst, 8); 3837 Daddu(dst, dst, 8);
3808 3838
3809 Dsubu(length, length, Operand(kPointerSize)); 3839 Dsubu(length, length, Operand(kPointerSize));
3810 Branch(&word_loop); 3840 Branch(&word_loop);
3811 3841
3812 // Copy the last bytes if any left. 3842 // Copy the last bytes if any left.
3813 bind(&byte_loop); 3843 bind(&byte_loop);
3814 Branch(&done, eq, length, Operand(zero_reg)); 3844 Branch(&done, eq, length, Operand(zero_reg));
3815 bind(&byte_loop_1); 3845 bind(&byte_loop_1);
3816 lbu(scratch, MemOperand(src)); 3846 lbu(scratch, MemOperand(src));
(...skipping 177 matching lines...) Expand 10 before | Expand all | Expand 10 after
3994 4024
3995 void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell, 4025 void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
3996 Label* miss) { 4026 Label* miss) {
3997 GetWeakValue(value, cell); 4027 GetWeakValue(value, cell);
3998 JumpIfSmi(value, miss); 4028 JumpIfSmi(value, miss);
3999 } 4029 }
4000 4030
4001 4031
4002 void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) { 4032 void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
4003 if (IsMipsSoftFloatABI) { 4033 if (IsMipsSoftFloatABI) {
4004 Move(dst, v0, v1); 4034 if (kArchEndian == kLittle) {
4035 Move(dst, v0, v1);
4036 } else {
4037 Move(dst, v1, v0);
4038 }
4005 } else { 4039 } else {
4006 Move(dst, f0); // Reg f0 is o32 ABI FP return value. 4040 Move(dst, f0); // Reg f0 is o32 ABI FP return value.
4007 } 4041 }
4008 } 4042 }
4009 4043
4010 4044
4011 void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) { 4045 void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
4012 if (IsMipsSoftFloatABI) { 4046 if (IsMipsSoftFloatABI) {
4013 Move(dst, a0, a1); 4047 if (kArchEndian == kLittle) {
4048 Move(dst, a0, a1);
4049 } else {
4050 Move(dst, a1, a0);
4051 }
4014 } else { 4052 } else {
4015 Move(dst, f12); // Reg f12 is o32 ABI FP first argument value. 4053 Move(dst, f12); // Reg f12 is n64 ABI FP first argument value.
4016 } 4054 }
4017 } 4055 }
4018 4056
4019 4057
4020 void MacroAssembler::MovToFloatParameter(DoubleRegister src) { 4058 void MacroAssembler::MovToFloatParameter(DoubleRegister src) {
4021 if (!IsMipsSoftFloatABI) { 4059 if (!IsMipsSoftFloatABI) {
4022 Move(f12, src); 4060 Move(f12, src);
4023 } else { 4061 } else {
4024 Move(a0, a1, src); 4062 if (kArchEndian == kLittle) {
4063 Move(a0, a1, src);
4064 } else {
4065 Move(a1, a0, src);
4066 }
4025 } 4067 }
4026 } 4068 }
4027 4069
4028 4070
4029 void MacroAssembler::MovToFloatResult(DoubleRegister src) { 4071 void MacroAssembler::MovToFloatResult(DoubleRegister src) {
4030 if (!IsMipsSoftFloatABI) { 4072 if (!IsMipsSoftFloatABI) {
4031 Move(f0, src); 4073 Move(f0, src);
4032 } else { 4074 } else {
4033 Move(v0, v1, src); 4075 if (kArchEndian == kLittle) {
4076 Move(v0, v1, src);
4077 } else {
4078 Move(v1, v0, src);
4079 }
4034 } 4080 }
4035 } 4081 }
4036 4082
4037 4083
4038 void MacroAssembler::MovToFloatParameters(DoubleRegister src1, 4084 void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
4039 DoubleRegister src2) { 4085 DoubleRegister src2) {
4040 if (!IsMipsSoftFloatABI) { 4086 if (!IsMipsSoftFloatABI) {
4041 const DoubleRegister fparg2 = (kMipsAbi == kN64) ? f13 : f14; 4087 const DoubleRegister fparg2 = (kMipsAbi == kN64) ? f13 : f14;
4042 if (src2.is(f12)) { 4088 if (src2.is(f12)) {
4043 DCHECK(!src1.is(fparg2)); 4089 DCHECK(!src1.is(fparg2));
4044 Move(fparg2, src2); 4090 Move(fparg2, src2);
4045 Move(f12, src1); 4091 Move(f12, src1);
4046 } else { 4092 } else {
4047 Move(f12, src1); 4093 Move(f12, src1);
4048 Move(fparg2, src2); 4094 Move(fparg2, src2);
4049 } 4095 }
4050 } else { 4096 } else {
4051 Move(a0, a1, src1); 4097 if (kArchEndian == kLittle) {
4052 Move(a2, a3, src2); 4098 Move(a0, a1, src1);
4099 Move(a2, a3, src2);
4100 } else {
4101 Move(a1, a0, src1);
4102 Move(a3, a2, src2);
4103 }
4053 } 4104 }
4054 } 4105 }
4055 4106
4056 4107
4057 // ----------------------------------------------------------------------------- 4108 // -----------------------------------------------------------------------------
4058 // JavaScript invokes. 4109 // JavaScript invokes.
4059 4110
4060 void MacroAssembler::InvokePrologue(const ParameterCount& expected, 4111 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
4061 const ParameterCount& actual, 4112 const ParameterCount& actual,
4062 Handle<Code> code_constant, 4113 Handle<Code> code_constant,
(...skipping 2144 matching lines...) Expand 10 before | Expand all | Expand 10 after
6207 if (mag.shift > 0) sra(result, result, mag.shift); 6258 if (mag.shift > 0) sra(result, result, mag.shift);
6208 srl(at, dividend, 31); 6259 srl(at, dividend, 31);
6209 Addu(result, result, Operand(at)); 6260 Addu(result, result, Operand(at));
6210 } 6261 }
6211 6262
6212 6263
6213 } // namespace internal 6264 } // namespace internal
6214 } // namespace v8 6265 } // namespace v8
6215 6266
6216 #endif // V8_TARGET_ARCH_MIPS64 6267 #endif // V8_TARGET_ARCH_MIPS64
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698