| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include <limits.h> // For LONG_MIN, LONG_MAX. | 5 #include <limits.h> // For LONG_MIN, LONG_MAX. |
| 6 | 6 |
| 7 #if V8_TARGET_ARCH_ARM | 7 #if V8_TARGET_ARCH_ARM |
| 8 | 8 |
| 9 #include "src/base/bits.h" | 9 #include "src/base/bits.h" |
| 10 #include "src/base/division-by-constant.h" | 10 #include "src/base/division-by-constant.h" |
| (...skipping 269 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 280 void MacroAssembler::And(Register dst, Register src1, const Operand& src2, | 280 void MacroAssembler::And(Register dst, Register src1, const Operand& src2, |
| 281 Condition cond) { | 281 Condition cond) { |
| 282 if (!src2.is_reg() && | 282 if (!src2.is_reg() && |
| 283 !src2.must_output_reloc_info(this) && | 283 !src2.must_output_reloc_info(this) && |
| 284 src2.immediate() == 0) { | 284 src2.immediate() == 0) { |
| 285 mov(dst, Operand::Zero(), LeaveCC, cond); | 285 mov(dst, Operand::Zero(), LeaveCC, cond); |
| 286 } else if (!(src2.instructions_required(this) == 1) && | 286 } else if (!(src2.instructions_required(this) == 1) && |
| 287 !src2.must_output_reloc_info(this) && | 287 !src2.must_output_reloc_info(this) && |
| 288 CpuFeatures::IsSupported(ARMv7) && | 288 CpuFeatures::IsSupported(ARMv7) && |
| 289 base::bits::IsPowerOfTwo32(src2.immediate() + 1)) { | 289 base::bits::IsPowerOfTwo32(src2.immediate() + 1)) { |
| 290 CpuFeatureScope scope(this, ARMv7); |
| 290 ubfx(dst, src1, 0, | 291 ubfx(dst, src1, 0, |
| 291 WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond); | 292 WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond); |
| 292 } else { | 293 } else { |
| 293 and_(dst, src1, src2, LeaveCC, cond); | 294 and_(dst, src1, src2, LeaveCC, cond); |
| 294 } | 295 } |
| 295 } | 296 } |
| 296 | 297 |
| 297 | 298 |
| 298 void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width, | 299 void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width, |
| 299 Condition cond) { | 300 Condition cond) { |
| 300 DCHECK(lsb < 32); | 301 DCHECK(lsb < 32); |
| 301 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) { | 302 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) { |
| 302 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); | 303 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); |
| 303 and_(dst, src1, Operand(mask), LeaveCC, cond); | 304 and_(dst, src1, Operand(mask), LeaveCC, cond); |
| 304 if (lsb != 0) { | 305 if (lsb != 0) { |
| 305 mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond); | 306 mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond); |
| 306 } | 307 } |
| 307 } else { | 308 } else { |
| 309 CpuFeatureScope scope(this, ARMv7); |
| 308 ubfx(dst, src1, lsb, width, cond); | 310 ubfx(dst, src1, lsb, width, cond); |
| 309 } | 311 } |
| 310 } | 312 } |
| 311 | 313 |
| 312 | 314 |
| 313 void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width, | 315 void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width, |
| 314 Condition cond) { | 316 Condition cond) { |
| 315 DCHECK(lsb < 32); | 317 DCHECK(lsb < 32); |
| 316 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) { | 318 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) { |
| 317 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); | 319 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); |
| 318 and_(dst, src1, Operand(mask), LeaveCC, cond); | 320 and_(dst, src1, Operand(mask), LeaveCC, cond); |
| 319 int shift_up = 32 - lsb - width; | 321 int shift_up = 32 - lsb - width; |
| 320 int shift_down = lsb + shift_up; | 322 int shift_down = lsb + shift_up; |
| 321 if (shift_up != 0) { | 323 if (shift_up != 0) { |
| 322 mov(dst, Operand(dst, LSL, shift_up), LeaveCC, cond); | 324 mov(dst, Operand(dst, LSL, shift_up), LeaveCC, cond); |
| 323 } | 325 } |
| 324 if (shift_down != 0) { | 326 if (shift_down != 0) { |
| 325 mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond); | 327 mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond); |
| 326 } | 328 } |
| 327 } else { | 329 } else { |
| 330 CpuFeatureScope scope(this, ARMv7); |
| 328 sbfx(dst, src1, lsb, width, cond); | 331 sbfx(dst, src1, lsb, width, cond); |
| 329 } | 332 } |
| 330 } | 333 } |
| 331 | 334 |
| 332 | 335 |
| 333 void MacroAssembler::Bfi(Register dst, | 336 void MacroAssembler::Bfi(Register dst, |
| 334 Register src, | 337 Register src, |
| 335 Register scratch, | 338 Register scratch, |
| 336 int lsb, | 339 int lsb, |
| 337 int width, | 340 int width, |
| 338 Condition cond) { | 341 Condition cond) { |
| 339 DCHECK(0 <= lsb && lsb < 32); | 342 DCHECK(0 <= lsb && lsb < 32); |
| 340 DCHECK(0 <= width && width < 32); | 343 DCHECK(0 <= width && width < 32); |
| 341 DCHECK(lsb + width < 32); | 344 DCHECK(lsb + width < 32); |
| 342 DCHECK(!scratch.is(dst)); | 345 DCHECK(!scratch.is(dst)); |
| 343 if (width == 0) return; | 346 if (width == 0) return; |
| 344 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) { | 347 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) { |
| 345 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); | 348 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); |
| 346 bic(dst, dst, Operand(mask)); | 349 bic(dst, dst, Operand(mask)); |
| 347 and_(scratch, src, Operand((1 << width) - 1)); | 350 and_(scratch, src, Operand((1 << width) - 1)); |
| 348 mov(scratch, Operand(scratch, LSL, lsb)); | 351 mov(scratch, Operand(scratch, LSL, lsb)); |
| 349 orr(dst, dst, scratch); | 352 orr(dst, dst, scratch); |
| 350 } else { | 353 } else { |
| 354 CpuFeatureScope scope(this, ARMv7); |
| 351 bfi(dst, src, lsb, width, cond); | 355 bfi(dst, src, lsb, width, cond); |
| 352 } | 356 } |
| 353 } | 357 } |
| 354 | 358 |
| 355 | 359 |
| 356 void MacroAssembler::Bfc(Register dst, Register src, int lsb, int width, | 360 void MacroAssembler::Bfc(Register dst, Register src, int lsb, int width, |
| 357 Condition cond) { | 361 Condition cond) { |
| 358 DCHECK(lsb < 32); | 362 DCHECK(lsb < 32); |
| 359 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) { | 363 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) { |
| 360 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); | 364 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); |
| 361 bic(dst, src, Operand(mask)); | 365 bic(dst, src, Operand(mask)); |
| 362 } else { | 366 } else { |
| 367 CpuFeatureScope scope(this, ARMv7); |
| 363 Move(dst, src, cond); | 368 Move(dst, src, cond); |
| 364 bfc(dst, lsb, width, cond); | 369 bfc(dst, lsb, width, cond); |
| 365 } | 370 } |
| 366 } | 371 } |
| 367 | 372 |
| 368 | 373 |
| 369 void MacroAssembler::Load(Register dst, | 374 void MacroAssembler::Load(Register dst, |
| 370 const MemOperand& src, | 375 const MemOperand& src, |
| 371 Representation r) { | 376 Representation r) { |
| 372 DCHECK(!r.IsDouble()); | 377 DCHECK(!r.IsDouble()); |
| (...skipping 29 matching lines...) Expand all Loading... |
| 402 } | 407 } |
| 403 } | 408 } |
| 404 | 409 |
| 405 | 410 |
| 406 void MacroAssembler::LoadRoot(Register destination, | 411 void MacroAssembler::LoadRoot(Register destination, |
| 407 Heap::RootListIndex index, | 412 Heap::RootListIndex index, |
| 408 Condition cond) { | 413 Condition cond) { |
| 409 if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) && | 414 if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) && |
| 410 isolate()->heap()->RootCanBeTreatedAsConstant(index) && | 415 isolate()->heap()->RootCanBeTreatedAsConstant(index) && |
| 411 !predictable_code_size()) { | 416 !predictable_code_size()) { |
| 417 CpuFeatureScope scope(this, MOVW_MOVT_IMMEDIATE_LOADS); |
| 412 // The CPU supports fast immediate values, and this root will never | 418 // The CPU supports fast immediate values, and this root will never |
| 413 // change. We will load it as a relocatable immediate value. | 419 // change. We will load it as a relocatable immediate value. |
| 414 Handle<Object> root = isolate()->heap()->root_handle(index); | 420 Handle<Object> root = isolate()->heap()->root_handle(index); |
| 415 mov(destination, Operand(root), LeaveCC, cond); | 421 mov(destination, Operand(root), LeaveCC, cond); |
| 416 return; | 422 return; |
| 417 } | 423 } |
| 418 ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), cond); | 424 ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), cond); |
| 419 } | 425 } |
| 420 | 426 |
| 421 | 427 |
| (...skipping 2222 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2644 // that the constants for the maximum number of digits for an array index | 2650 // that the constants for the maximum number of digits for an array index |
| 2645 // cached in the hash field and the number of bits reserved for it does not | 2651 // cached in the hash field and the number of bits reserved for it does not |
| 2646 // conflict. | 2652 // conflict. |
| 2647 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) < | 2653 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) < |
| 2648 (1 << String::kArrayIndexValueBits)); | 2654 (1 << String::kArrayIndexValueBits)); |
| 2649 DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash); | 2655 DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash); |
| 2650 } | 2656 } |
| 2651 | 2657 |
| 2652 | 2658 |
| 2653 void MacroAssembler::SmiToDouble(LowDwVfpRegister value, Register smi) { | 2659 void MacroAssembler::SmiToDouble(LowDwVfpRegister value, Register smi) { |
| 2654 if (CpuFeatures::IsSupported(VFP3)) { | 2660 if (CpuFeatures::IsSupported(VFPv3)) { |
| 2661 CpuFeatureScope scope(this, VFPv3); |
| 2655 vmov(value.low(), smi); | 2662 vmov(value.low(), smi); |
| 2656 vcvt_f64_s32(value, 1); | 2663 vcvt_f64_s32(value, 1); |
| 2657 } else { | 2664 } else { |
| 2658 SmiUntag(ip, smi); | 2665 SmiUntag(ip, smi); |
| 2659 vmov(value.low(), ip); | 2666 vmov(value.low(), ip); |
| 2660 vcvt_f64_s32(value, value.low()); | 2667 vcvt_f64_s32(value, value.low()); |
| 2661 } | 2668 } |
| 2662 } | 2669 } |
| 2663 | 2670 |
| 2664 | 2671 |
| (...skipping 136 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2801 TruncateHeapNumberToI(result, object); | 2808 TruncateHeapNumberToI(result, object); |
| 2802 | 2809 |
| 2803 bind(&done); | 2810 bind(&done); |
| 2804 } | 2811 } |
| 2805 | 2812 |
| 2806 | 2813 |
| 2807 void MacroAssembler::GetLeastBitsFromSmi(Register dst, | 2814 void MacroAssembler::GetLeastBitsFromSmi(Register dst, |
| 2808 Register src, | 2815 Register src, |
| 2809 int num_least_bits) { | 2816 int num_least_bits) { |
| 2810 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) { | 2817 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) { |
| 2818 CpuFeatureScope scope(this, ARMv7); |
| 2811 ubfx(dst, src, kSmiTagSize, num_least_bits); | 2819 ubfx(dst, src, kSmiTagSize, num_least_bits); |
| 2812 } else { | 2820 } else { |
| 2813 SmiUntag(dst, src); | 2821 SmiUntag(dst, src); |
| 2814 and_(dst, dst, Operand((1 << num_least_bits) - 1)); | 2822 and_(dst, dst, Operand((1 << num_least_bits) - 1)); |
| 2815 } | 2823 } |
| 2816 } | 2824 } |
| 2817 | 2825 |
| 2818 | 2826 |
| 2819 void MacroAssembler::GetLeastBitsFromInt32(Register dst, | 2827 void MacroAssembler::GetLeastBitsFromInt32(Register dst, |
| 2820 Register src, | 2828 Register src, |
| (...skipping 589 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3410 | 3418 |
| 3411 | 3419 |
| 3412 void MacroAssembler::CheckFor32DRegs(Register scratch) { | 3420 void MacroAssembler::CheckFor32DRegs(Register scratch) { |
| 3413 mov(scratch, Operand(ExternalReference::cpu_features())); | 3421 mov(scratch, Operand(ExternalReference::cpu_features())); |
| 3414 ldr(scratch, MemOperand(scratch)); | 3422 ldr(scratch, MemOperand(scratch)); |
| 3415 tst(scratch, Operand(1u << VFP32DREGS)); | 3423 tst(scratch, Operand(1u << VFP32DREGS)); |
| 3416 } | 3424 } |
| 3417 | 3425 |
| 3418 | 3426 |
| 3419 void MacroAssembler::SaveFPRegs(Register location, Register scratch) { | 3427 void MacroAssembler::SaveFPRegs(Register location, Register scratch) { |
| 3428 CpuFeatureScope scope(this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported); |
| 3420 CheckFor32DRegs(scratch); | 3429 CheckFor32DRegs(scratch); |
| 3421 vstm(db_w, location, d16, d31, ne); | 3430 vstm(db_w, location, d16, d31, ne); |
| 3422 sub(location, location, Operand(16 * kDoubleSize), LeaveCC, eq); | 3431 sub(location, location, Operand(16 * kDoubleSize), LeaveCC, eq); |
| 3423 vstm(db_w, location, d0, d15); | 3432 vstm(db_w, location, d0, d15); |
| 3424 } | 3433 } |
| 3425 | 3434 |
| 3426 | 3435 |
| 3427 void MacroAssembler::RestoreFPRegs(Register location, Register scratch) { | 3436 void MacroAssembler::RestoreFPRegs(Register location, Register scratch) { |
| 3437 CpuFeatureScope scope(this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported); |
| 3428 CheckFor32DRegs(scratch); | 3438 CheckFor32DRegs(scratch); |
| 3429 vldm(ia_w, location, d0, d15); | 3439 vldm(ia_w, location, d0, d15); |
| 3430 vldm(ia_w, location, d16, d31, ne); | 3440 vldm(ia_w, location, d16, d31, ne); |
| 3431 add(location, location, Operand(16 * kDoubleSize), LeaveCC, eq); | 3441 add(location, location, Operand(16 * kDoubleSize), LeaveCC, eq); |
| 3432 } | 3442 } |
| 3433 | 3443 |
| 3434 template <typename T> | 3444 template <typename T> |
| 3435 void MacroAssembler::FloatMaxHelper(T result, T left, T right, | 3445 void MacroAssembler::FloatMaxHelper(T result, T left, T right, |
| 3436 Label* out_of_line) { | 3446 Label* out_of_line) { |
| 3437 // This trivial case is caught sooner, so that the out-of-line code can be | 3447 // This trivial case is caught sooner, so that the out-of-line code can be |
| (...skipping 729 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4167 } | 4177 } |
| 4168 } | 4178 } |
| 4169 if (mag.shift > 0) mov(result, Operand(result, ASR, mag.shift)); | 4179 if (mag.shift > 0) mov(result, Operand(result, ASR, mag.shift)); |
| 4170 add(result, result, Operand(dividend, LSR, 31)); | 4180 add(result, result, Operand(dividend, LSR, 31)); |
| 4171 } | 4181 } |
| 4172 | 4182 |
| 4173 } // namespace internal | 4183 } // namespace internal |
| 4174 } // namespace v8 | 4184 } // namespace v8 |
| 4175 | 4185 |
| 4176 #endif // V8_TARGET_ARCH_ARM | 4186 #endif // V8_TARGET_ARCH_ARM |
| OLD | NEW |