| OLD | NEW |
| 1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include <assert.h> // For assert | 5 #include <assert.h> // For assert |
| 6 #include <limits.h> // For LONG_MIN, LONG_MAX. | 6 #include <limits.h> // For LONG_MIN, LONG_MAX. |
| 7 | 7 |
| 8 #if V8_TARGET_ARCH_S390 | 8 #if V8_TARGET_ARCH_S390 |
| 9 | 9 |
| 10 #include "src/base/bits.h" | 10 #include "src/base/bits.h" |
| (...skipping 2170 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2181 bind(&smi_value); | 2181 bind(&smi_value); |
| 2182 SmiToDouble(double_scratch, value_reg); | 2182 SmiToDouble(double_scratch, value_reg); |
| 2183 | 2183 |
| 2184 bind(&store); | 2184 bind(&store); |
| 2185 SmiToDoubleArrayOffset(scratch1, key_reg); | 2185 SmiToDoubleArrayOffset(scratch1, key_reg); |
| 2186 StoreDouble(double_scratch, | 2186 StoreDouble(double_scratch, |
| 2187 FieldMemOperand(elements_reg, scratch1, | 2187 FieldMemOperand(elements_reg, scratch1, |
| 2188 FixedDoubleArray::kHeaderSize - elements_offset)); | 2188 FixedDoubleArray::kHeaderSize - elements_offset)); |
| 2189 } | 2189 } |
| 2190 | 2190 |
| 2191 void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left, | |
| 2192 Register right, | |
| 2193 Register overflow_dst, | |
| 2194 Register scratch) { | |
| 2195 DCHECK(!dst.is(overflow_dst)); | |
| 2196 DCHECK(!dst.is(scratch)); | |
| 2197 DCHECK(!overflow_dst.is(scratch)); | |
| 2198 DCHECK(!overflow_dst.is(left)); | |
| 2199 DCHECK(!overflow_dst.is(right)); | |
| 2200 | |
| 2201 // TODO(joransiu): Optimize paths for left == right. | |
| 2202 bool left_is_right = left.is(right); | |
| 2203 | |
| 2204 // C = A+B; C overflows if A/B have same sign and C has diff sign than A | |
| 2205 if (dst.is(left)) { | |
| 2206 LoadRR(scratch, left); // Preserve left. | |
| 2207 AddP(dst, left, right); // Left is overwritten. | |
| 2208 XorP(overflow_dst, scratch, dst); // Original left. | |
| 2209 if (!left_is_right) XorP(scratch, dst, right); | |
| 2210 } else if (dst.is(right)) { | |
| 2211 LoadRR(scratch, right); // Preserve right. | |
| 2212 AddP(dst, left, right); // Right is overwritten. | |
| 2213 XorP(overflow_dst, dst, left); | |
| 2214 if (!left_is_right) XorP(scratch, dst, scratch); | |
| 2215 } else { | |
| 2216 AddP(dst, left, right); | |
| 2217 XorP(overflow_dst, dst, left); | |
| 2218 if (!left_is_right) XorP(scratch, dst, right); | |
| 2219 } | |
| 2220 if (!left_is_right) AndP(overflow_dst, scratch, overflow_dst); | |
| 2221 LoadAndTestRR(overflow_dst, overflow_dst); | |
| 2222 } | |
| 2223 | |
| 2224 void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left, | |
| 2225 intptr_t right, | |
| 2226 Register overflow_dst, | |
| 2227 Register scratch) { | |
| 2228 DCHECK(!dst.is(overflow_dst)); | |
| 2229 DCHECK(!dst.is(scratch)); | |
| 2230 DCHECK(!overflow_dst.is(scratch)); | |
| 2231 DCHECK(!overflow_dst.is(left)); | |
| 2232 | |
| 2233 mov(r1, Operand(right)); | |
| 2234 AddAndCheckForOverflow(dst, left, r1, overflow_dst, scratch); | |
| 2235 } | |
| 2236 | |
| 2237 void MacroAssembler::SubAndCheckForOverflow(Register dst, Register left, | |
| 2238 Register right, | |
| 2239 Register overflow_dst, | |
| 2240 Register scratch) { | |
| 2241 DCHECK(!dst.is(overflow_dst)); | |
| 2242 DCHECK(!dst.is(scratch)); | |
| 2243 DCHECK(!overflow_dst.is(scratch)); | |
| 2244 DCHECK(!overflow_dst.is(left)); | |
| 2245 DCHECK(!overflow_dst.is(right)); | |
| 2246 | |
| 2247 // C = A-B; C overflows if A/B have diff signs and C has diff sign than A | |
| 2248 if (dst.is(left)) { | |
| 2249 LoadRR(scratch, left); // Preserve left. | |
| 2250 SubP(dst, left, right); // Left is overwritten. | |
| 2251 XorP(overflow_dst, dst, scratch); | |
| 2252 XorP(scratch, right); | |
| 2253 AndP(overflow_dst, scratch /*, SetRC*/); | |
| 2254 LoadAndTestRR(overflow_dst, overflow_dst); | |
| 2255 // Should be okay to remove rc | |
| 2256 } else if (dst.is(right)) { | |
| 2257 LoadRR(scratch, right); // Preserve right. | |
| 2258 SubP(dst, left, right); // Right is overwritten. | |
| 2259 XorP(overflow_dst, dst, left); | |
| 2260 XorP(scratch, left); | |
| 2261 AndP(overflow_dst, scratch /*, SetRC*/); | |
| 2262 LoadAndTestRR(overflow_dst, overflow_dst); | |
| 2263 // Should be okay to remove rc | |
| 2264 } else { | |
| 2265 SubP(dst, left, right); | |
| 2266 XorP(overflow_dst, dst, left); | |
| 2267 XorP(scratch, left, right); | |
| 2268 AndP(overflow_dst, scratch /*, SetRC*/); | |
| 2269 LoadAndTestRR(overflow_dst, overflow_dst); | |
| 2270 // Should be okay to remove rc | |
| 2271 } | |
| 2272 } | |
| 2273 | |
| 2274 void MacroAssembler::CompareMap(Register obj, Register scratch, Handle<Map> map, | 2191 void MacroAssembler::CompareMap(Register obj, Register scratch, Handle<Map> map, |
| 2275 Label* early_success) { | 2192 Label* early_success) { |
| 2276 LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); | 2193 LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); |
| 2277 CompareMap(obj, map, early_success); | 2194 CompareMap(obj, map, early_success); |
| 2278 } | 2195 } |
| 2279 | 2196 |
| 2280 void MacroAssembler::CompareMap(Register obj_map, Handle<Map> map, | 2197 void MacroAssembler::CompareMap(Register obj_map, Handle<Map> map, |
| 2281 Label* early_success) { | 2198 Label* early_success) { |
| 2282 mov(r0, Operand(map)); | 2199 mov(r0, Operand(map)); |
| 2283 CmpP(r0, FieldMemOperand(obj_map, HeapObject::kMapOffset)); | 2200 CmpP(r0, FieldMemOperand(obj_map, HeapObject::kMapOffset)); |
| (...skipping 1868 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4152 #if V8_TARGET_ARCH_S390X | 4069 #if V8_TARGET_ARCH_S390X |
| 4153 sgfr(dst, src); | 4070 sgfr(dst, src); |
| 4154 #else | 4071 #else |
| 4155 sr(dst, src); | 4072 sr(dst, src); |
| 4156 #endif | 4073 #endif |
| 4157 } | 4074 } |
| 4158 | 4075 |
| 4159 // Subtract 32-bit (Register = Register - Register) | 4076 // Subtract 32-bit (Register = Register - Register) |
| 4160 void MacroAssembler::Sub32(Register dst, Register src1, Register src2) { | 4077 void MacroAssembler::Sub32(Register dst, Register src1, Register src2) { |
| 4161 // Use non-clobbering version if possible | 4078 // Use non-clobbering version if possible |
| 4162 if (CpuFeatures::IsSupported(DISTINCT_OPS) && !dst.is(src1)) { | 4079 if (CpuFeatures::IsSupported(DISTINCT_OPS)) { |
| 4163 srk(dst, src1, src2); | 4080 srk(dst, src1, src2); |
| 4164 return; | 4081 return; |
| 4165 } | 4082 } |
| 4166 if (!dst.is(src1) && !dst.is(src2)) lr(dst, src1); | 4083 if (!dst.is(src1) && !dst.is(src2)) lr(dst, src1); |
| 4167 // In scenario where we have dst = src - dst, we need to swap and negate | 4084 // In scenario where we have dst = src - dst, we need to swap and negate |
| 4168 if (!dst.is(src1) && dst.is(src2)) { | 4085 if (!dst.is(src1) && dst.is(src2)) { |
| 4169 sr(dst, src1); // dst = (dst - src) | 4086 Label done; |
| 4170 lcr(dst, dst); // dst = -dst | 4087 lcr(dst, dst); // dst = -dst |
| 4088 b(overflow, &done); |
| 4089 ar(dst, src1); // dst = dst + src |
| 4090 bind(&done); |
| 4171 } else { | 4091 } else { |
| 4172 sr(dst, src2); | 4092 sr(dst, src2); |
| 4173 } | 4093 } |
| 4174 } | 4094 } |
| 4175 | 4095 |
| 4176 // Subtract Pointer Sized (Register = Register - Register) | 4096 // Subtract Pointer Sized (Register = Register - Register) |
| 4177 void MacroAssembler::SubP(Register dst, Register src1, Register src2) { | 4097 void MacroAssembler::SubP(Register dst, Register src1, Register src2) { |
| 4178 // Use non-clobbering version if possible | 4098 // Use non-clobbering version if possible |
| 4179 if (CpuFeatures::IsSupported(DISTINCT_OPS) && !dst.is(src1)) { | 4099 if (CpuFeatures::IsSupported(DISTINCT_OPS)) { |
| 4180 SubP_RRR(dst, src1, src2); | 4100 SubP_RRR(dst, src1, src2); |
| 4181 return; | 4101 return; |
| 4182 } | 4102 } |
| 4183 if (!dst.is(src1) && !dst.is(src2)) LoadRR(dst, src1); | 4103 if (!dst.is(src1) && !dst.is(src2)) LoadRR(dst, src1); |
| 4184 // In scenario where we have dst = src - dst, we need to swap and negate | 4104 // In scenario where we have dst = src - dst, we need to swap and negate |
| 4185 if (!dst.is(src1) && dst.is(src2)) { | 4105 if (!dst.is(src1) && dst.is(src2)) { |
| 4186 SubP(dst, src1); // dst = (dst - src) | 4106 Label done; |
| 4187 LoadComplementRR(dst, dst); // dst = -dst | 4107 LoadComplementRR(dst, dst); // dst = -dst |
| 4108 b(overflow, &done); |
| 4109 AddP(dst, src1); // dst = dst + src |
| 4110 bind(&done); |
| 4188 } else { | 4111 } else { |
| 4189 SubP(dst, src2); | 4112 SubP(dst, src2); |
| 4190 } | 4113 } |
| 4191 } | 4114 } |
| 4192 | 4115 |
| 4193 // Subtract Pointer Size with src extension | 4116 // Subtract Pointer Size with src extension |
| 4194 // (Register dst(ptr) = Register dst (ptr) - Register src (32 | 32->64)) | 4117 // (Register dst(ptr) = Register dst (ptr) - Register src (32 | 32->64)) |
| 4195 // src is treated as a 32-bit signed integer, which is sign extended to | 4118 // src is treated as a 32-bit signed integer, which is sign extended to |
| 4196 // 64-bit if necessary. | 4119 // 64-bit if necessary. |
| 4197 void MacroAssembler::SubP_ExtendSrc(Register dst, Register src1, | 4120 void MacroAssembler::SubP_ExtendSrc(Register dst, Register src1, |
| 4198 Register src2) { | 4121 Register src2) { |
| 4199 #if V8_TARGET_ARCH_S390X | 4122 #if V8_TARGET_ARCH_S390X |
| 4200 if (!dst.is(src1) && !dst.is(src2)) LoadRR(dst, src1); | 4123 if (!dst.is(src1) && !dst.is(src2)) LoadRR(dst, src1); |
| 4201 | 4124 |
| 4202 // In scenario where we have dst = src - dst, we need to swap and negate | 4125 // In scenario where we have dst = src - dst, we need to swap and negate |
| 4203 if (!dst.is(src1) && dst.is(src2)) { | 4126 if (!dst.is(src1) && dst.is(src2)) { |
| 4204 lgfr(dst, dst); // Sign extend this operand first. | 4127 lgfr(dst, dst); // Sign extend this operand first. |
| 4205 SubP(dst, src1); // dst = (dst - src) | |
| 4206 LoadComplementRR(dst, dst); // dst = -dst | 4128 LoadComplementRR(dst, dst); // dst = -dst |
| 4129 AddP(dst, src1); // dst = -dst + src |
| 4207 } else { | 4130 } else { |
| 4208 sgfr(dst, src2); | 4131 sgfr(dst, src2); |
| 4209 } | 4132 } |
| 4210 #else | 4133 #else |
| 4211 SubP(dst, src1, src2); | 4134 SubP(dst, src1, src2); |
| 4212 #endif | 4135 #endif |
| 4213 } | 4136 } |
| 4214 | 4137 |
| 4215 // Subtract 32-bit (Register-Memory) | 4138 // Subtract 32-bit (Register-Memory) |
| 4216 void MacroAssembler::Sub32(Register dst, const MemOperand& opnd) { | 4139 void MacroAssembler::Sub32(Register dst, const MemOperand& opnd) { |
| (...skipping 958 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5175 | 5098 |
| 5176 // Load And Test Pointer Sized (Reg <- Mem) | 5099 // Load And Test Pointer Sized (Reg <- Mem) |
| 5177 void MacroAssembler::LoadAndTestP(Register dst, const MemOperand& mem) { | 5100 void MacroAssembler::LoadAndTestP(Register dst, const MemOperand& mem) { |
| 5178 #if V8_TARGET_ARCH_S390X | 5101 #if V8_TARGET_ARCH_S390X |
| 5179 ltg(dst, mem); | 5102 ltg(dst, mem); |
| 5180 #else | 5103 #else |
| 5181 lt_z(dst, mem); | 5104 lt_z(dst, mem); |
| 5182 #endif | 5105 #endif |
| 5183 } | 5106 } |
| 5184 | 5107 |
| 5108 // Load On Condition Pointer Sized (Reg <- Reg) |
| 5109 void MacroAssembler::LoadOnConditionP(Condition cond, Register dst, |
| 5110 Register src) { |
| 5111 #if V8_TARGET_ARCH_S390X |
| 5112 locgr(cond, dst, src); |
| 5113 #else |
| 5114 locr(cond, dst, src); |
| 5115 #endif |
| 5116 } |
| 5117 |
| 5185 // Load Double Precision (64-bit) Floating Point number from memory | 5118 // Load Double Precision (64-bit) Floating Point number from memory |
| 5186 void MacroAssembler::LoadDouble(DoubleRegister dst, const MemOperand& mem) { | 5119 void MacroAssembler::LoadDouble(DoubleRegister dst, const MemOperand& mem) { |
| 5187 // for 32bit and 64bit we all use 64bit floating point regs | 5120 // for 32bit and 64bit we all use 64bit floating point regs |
| 5188 if (is_uint12(mem.offset())) { | 5121 if (is_uint12(mem.offset())) { |
| 5189 ld(dst, mem); | 5122 ld(dst, mem); |
| 5190 } else { | 5123 } else { |
| 5191 ldy(dst, mem); | 5124 ldy(dst, mem); |
| 5192 } | 5125 } |
| 5193 } | 5126 } |
| 5194 | 5127 |
| (...skipping 378 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5573 } | 5506 } |
| 5574 if (mag.shift > 0) ShiftRightArith(result, result, Operand(mag.shift)); | 5507 if (mag.shift > 0) ShiftRightArith(result, result, Operand(mag.shift)); |
| 5575 ExtractBit(r0, dividend, 31); | 5508 ExtractBit(r0, dividend, 31); |
| 5576 AddP(result, r0); | 5509 AddP(result, r0); |
| 5577 } | 5510 } |
| 5578 | 5511 |
| 5579 } // namespace internal | 5512 } // namespace internal |
| 5580 } // namespace v8 | 5513 } // namespace v8 |
| 5581 | 5514 |
| 5582 #endif // V8_TARGET_ARCH_S390 | 5515 #endif // V8_TARGET_ARCH_S390 |
| OLD | NEW |