Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
| 2 // | 2 // |
| 3 // Redistribution and use in source and binary forms, with or without | 3 // Redistribution and use in source and binary forms, with or without |
| 4 // modification, are permitted provided that the following conditions are | 4 // modification, are permitted provided that the following conditions are |
| 5 // met: | 5 // met: |
| 6 // | 6 // |
| 7 // * Redistributions of source code must retain the above copyright | 7 // * Redistributions of source code must retain the above copyright |
| 8 // notice, this list of conditions and the following disclaimer. | 8 // notice, this list of conditions and the following disclaimer. |
| 9 // * Redistributions in binary form must reproduce the above | 9 // * Redistributions in binary form must reproduce the above |
| 10 // copyright notice, this list of conditions and the following | 10 // copyright notice, this list of conditions and the following |
| (...skipping 250 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 261 match &= !reg3.IsValid() || reg3.IsSameSizeAndType(reg1); | 261 match &= !reg3.IsValid() || reg3.IsSameSizeAndType(reg1); |
| 262 match &= !reg4.IsValid() || reg4.IsSameSizeAndType(reg1); | 262 match &= !reg4.IsValid() || reg4.IsSameSizeAndType(reg1); |
| 263 match &= !reg5.IsValid() || reg5.IsSameSizeAndType(reg1); | 263 match &= !reg5.IsValid() || reg5.IsSameSizeAndType(reg1); |
| 264 match &= !reg6.IsValid() || reg6.IsSameSizeAndType(reg1); | 264 match &= !reg6.IsValid() || reg6.IsSameSizeAndType(reg1); |
| 265 match &= !reg7.IsValid() || reg7.IsSameSizeAndType(reg1); | 265 match &= !reg7.IsValid() || reg7.IsSameSizeAndType(reg1); |
| 266 match &= !reg8.IsValid() || reg8.IsSameSizeAndType(reg1); | 266 match &= !reg8.IsValid() || reg8.IsSameSizeAndType(reg1); |
| 267 return match; | 267 return match; |
| 268 } | 268 } |
| 269 | 269 |
| 270 | 270 |
| 271 void Operand::initialize_handle(Handle<Object> handle) { | 271 void Immediate::InitializeHandle(Handle<Object> handle) { |
| 272 AllowDeferredHandleDereference using_raw_address; | 272 AllowDeferredHandleDereference using_raw_address; |
| 273 | 273 |
| 274 // Verify all Objects referred by code are NOT in new space. | 274 // Verify all Objects referred by code are NOT in new space. |
| 275 Object* obj = *handle; | 275 Object* obj = *handle; |
| 276 if (obj->IsHeapObject()) { | 276 if (obj->IsHeapObject()) { |
| 277 ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj)); | 277 ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj)); |
| 278 immediate_ = reinterpret_cast<intptr_t>(handle.location()); | 278 value_ = reinterpret_cast<intptr_t>(handle.location()); |
| 279 rmode_ = RelocInfo::EMBEDDED_OBJECT; | 279 rmode_ = RelocInfo::EMBEDDED_OBJECT; |
| 280 } else { | 280 } else { |
| 281 STATIC_ASSERT(sizeof(intptr_t) == sizeof(int64_t)); | 281 STATIC_ASSERT(sizeof(intptr_t) == sizeof(int64_t)); |
| 282 immediate_ = reinterpret_cast<intptr_t>(obj); | 282 value_ = reinterpret_cast<intptr_t>(obj); |
| 283 rmode_ = RelocInfo::NONE64; | 283 rmode_ = RelocInfo::NONE64; |
| 284 } | 284 } |
| 285 } | 285 } |
| 286 | 286 |
| 287 | 287 |
| 288 bool Operand::NeedsRelocation(const Assembler* assembler) const { | 288 bool Operand::NeedsRelocation(const Assembler* assembler) const { |
| 289 if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) { | 289 RelocInfo::Mode rmode = immediate_.rmode(); |
| 290 | |
| 291 if (rmode == RelocInfo::EXTERNAL_REFERENCE) { | |
| 290 return assembler->serializer_enabled(); | 292 return assembler->serializer_enabled(); |
| 291 } | 293 } |
| 292 | 294 |
| 293 return !RelocInfo::IsNone(rmode_); | 295 return !RelocInfo::IsNone(rmode); |
| 294 } | 296 } |
| 295 | 297 |
| 296 | 298 |
| 297 // Assembler | 299 // Assembler |
| 298 | 300 |
| 299 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size) | 301 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size) |
| 300 : AssemblerBase(isolate, buffer, buffer_size), | 302 : AssemblerBase(isolate, buffer, buffer_size), |
| 301 recorded_ast_id_(TypeFeedbackId::None()), | 303 recorded_ast_id_(TypeFeedbackId::None()), |
| 302 unresolved_branches_(), | 304 unresolved_branches_(), |
| 303 positions_recorder_(this) { | 305 positions_recorder_(this) { |
| (...skipping 1162 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1466 LoadStore(rt, src, StoreOpFor(rt)); | 1468 LoadStore(rt, src, StoreOpFor(rt)); |
| 1467 } | 1469 } |
| 1468 | 1470 |
| 1469 | 1471 |
| 1470 void Assembler::ldrsw(const Register& rt, const MemOperand& src) { | 1472 void Assembler::ldrsw(const Register& rt, const MemOperand& src) { |
| 1471 ASSERT(rt.Is64Bits()); | 1473 ASSERT(rt.Is64Bits()); |
| 1472 LoadStore(rt, src, LDRSW_x); | 1474 LoadStore(rt, src, LDRSW_x); |
| 1473 } | 1475 } |
| 1474 | 1476 |
| 1475 | 1477 |
| 1476 void Assembler::ldr(const Register& rt, uint64_t imm) { | 1478 void Assembler::ldr_pcrel(const CPURegister& rt, int imm19) { |
| 1477 // TODO(all): Constant pool may be garbage collected. Hence we cannot store | 1479 // The pattern 'ldr xzr, #offset' is used to indicate the beginning of a |
| 1478 // arbitrary values in them. Manually move it for now. Fix | 1480 // constant pool. It should not be emitted. |
| 1479 // MacroAssembler::Fmov when this is implemented. | 1481 ASSERT(!rt.IsZero()); |
| 1480 UNIMPLEMENTED(); | 1482 Emit(LoadLiteralOpFor(rt) | ImmLLiteral(imm19) | Rt(rt)); |
|
rmcilroy
2014/06/06 22:48:58
How about having the argument be a byte offset fro
Rodolph Perfetta (ARM)
2014/06/09 14:12:21
It is more natural I agree but up to now at the as
rmcilroy
2014/06/09 14:22:10
I prefer that it stays consistent with what's alre
| |
| 1481 } | 1483 } |
| 1482 | 1484 |
| 1483 | 1485 |
| 1484 void Assembler::ldr(const FPRegister& ft, double imm) { | 1486 void Assembler::ldr(const CPURegister& rt, const Immediate& imm) { |
| 1485 // TODO(all): Constant pool may be garbage collected. Hence we cannot store | 1487 // Currently we only support 64-bit literals. |
| 1486 // arbitrary values in them. Manually move it for now. Fix | 1488 ASSERT(rt.Is64Bits()); |
| 1487 // MacroAssembler::Fmov when this is implemented. | 1489 |
| 1488 UNIMPLEMENTED(); | 1490 RecordRelocInfo(imm.rmode(), imm.value()); |
| 1491 BlockConstPoolFor(1); | |
| 1492 // The load will be patched when the constpool is emitted, patching code | |
| 1493 // expect a load literal with offset 0. | |
| 1494 ldr_pcrel(rt, 0); | |
| 1489 } | 1495 } |
| 1490 | 1496 |
| 1491 | 1497 |
| 1492 void Assembler::ldr(const FPRegister& ft, float imm) { | |
| 1493 // TODO(all): Constant pool may be garbage collected. Hence we cannot store | |
| 1494 // arbitrary values in them. Manually move it for now. Fix | |
| 1495 // MacroAssembler::Fmov when this is implemented. | |
| 1496 UNIMPLEMENTED(); | |
| 1497 } | |
| 1498 | |
| 1499 | |
| 1500 void Assembler::mov(const Register& rd, const Register& rm) { | 1498 void Assembler::mov(const Register& rd, const Register& rm) { |
| 1501 // Moves involving the stack pointer are encoded as add immediate with | 1499 // Moves involving the stack pointer are encoded as add immediate with |
| 1502 // second operand of zero. Otherwise, orr with first operand zr is | 1500 // second operand of zero. Otherwise, orr with first operand zr is |
| 1503 // used. | 1501 // used. |
| 1504 if (rd.IsSP() || rm.IsSP()) { | 1502 if (rd.IsSP() || rm.IsSP()) { |
| 1505 add(rd, rm, 0); | 1503 add(rd, rm, 0); |
| 1506 } else { | 1504 } else { |
| 1507 orr(rd, AppropriateZeroRegFor(rd), rm); | 1505 orr(rd, AppropriateZeroRegFor(rd), rm); |
| 1508 } | 1506 } |
| 1509 } | 1507 } |
| (...skipping 402 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1912 | 1910 |
| 1913 | 1911 |
| 1914 void Assembler::AddSub(const Register& rd, | 1912 void Assembler::AddSub(const Register& rd, |
| 1915 const Register& rn, | 1913 const Register& rn, |
| 1916 const Operand& operand, | 1914 const Operand& operand, |
| 1917 FlagsUpdate S, | 1915 FlagsUpdate S, |
| 1918 AddSubOp op) { | 1916 AddSubOp op) { |
| 1919 ASSERT(rd.SizeInBits() == rn.SizeInBits()); | 1917 ASSERT(rd.SizeInBits() == rn.SizeInBits()); |
| 1920 ASSERT(!operand.NeedsRelocation(this)); | 1918 ASSERT(!operand.NeedsRelocation(this)); |
| 1921 if (operand.IsImmediate()) { | 1919 if (operand.IsImmediate()) { |
| 1922 int64_t immediate = operand.immediate(); | 1920 int64_t immediate = operand.ImmediateValue(); |
| 1923 ASSERT(IsImmAddSub(immediate)); | 1921 ASSERT(IsImmAddSub(immediate)); |
| 1924 Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd); | 1922 Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd); |
| 1925 Emit(SF(rd) | AddSubImmediateFixed | op | Flags(S) | | 1923 Emit(SF(rd) | AddSubImmediateFixed | op | Flags(S) | |
| 1926 ImmAddSub(immediate) | dest_reg | RnSP(rn)); | 1924 ImmAddSub(immediate) | dest_reg | RnSP(rn)); |
| 1927 } else if (operand.IsShiftedRegister()) { | 1925 } else if (operand.IsShiftedRegister()) { |
| 1928 ASSERT(operand.reg().SizeInBits() == rd.SizeInBits()); | 1926 ASSERT(operand.reg().SizeInBits() == rd.SizeInBits()); |
| 1929 ASSERT(operand.shift() != ROR); | 1927 ASSERT(operand.shift() != ROR); |
| 1930 | 1928 |
| 1931 // For instructions of the form: | 1929 // For instructions of the form: |
| 1932 // add/sub wsp, <Wn>, <Wm> [, LSL #0-3 ] | 1930 // add/sub wsp, <Wn>, <Wm> [, LSL #0-3 ] |
| (...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2008 } | 2006 } |
| 2009 | 2007 |
| 2010 | 2008 |
| 2011 void Assembler::Logical(const Register& rd, | 2009 void Assembler::Logical(const Register& rd, |
| 2012 const Register& rn, | 2010 const Register& rn, |
| 2013 const Operand& operand, | 2011 const Operand& operand, |
| 2014 LogicalOp op) { | 2012 LogicalOp op) { |
| 2015 ASSERT(rd.SizeInBits() == rn.SizeInBits()); | 2013 ASSERT(rd.SizeInBits() == rn.SizeInBits()); |
| 2016 ASSERT(!operand.NeedsRelocation(this)); | 2014 ASSERT(!operand.NeedsRelocation(this)); |
| 2017 if (operand.IsImmediate()) { | 2015 if (operand.IsImmediate()) { |
| 2018 int64_t immediate = operand.immediate(); | 2016 int64_t immediate = operand.ImmediateValue(); |
| 2019 unsigned reg_size = rd.SizeInBits(); | 2017 unsigned reg_size = rd.SizeInBits(); |
| 2020 | 2018 |
| 2021 ASSERT(immediate != 0); | 2019 ASSERT(immediate != 0); |
| 2022 ASSERT(immediate != -1); | 2020 ASSERT(immediate != -1); |
| 2023 ASSERT(rd.Is64Bits() || is_uint32(immediate)); | 2021 ASSERT(rd.Is64Bits() || is_uint32(immediate)); |
| 2024 | 2022 |
| 2025 // If the operation is NOT, invert the operation and immediate. | 2023 // If the operation is NOT, invert the operation and immediate. |
| 2026 if ((op & NOT) == NOT) { | 2024 if ((op & NOT) == NOT) { |
| 2027 op = static_cast<LogicalOp>(op & ~NOT); | 2025 op = static_cast<LogicalOp>(op & ~NOT); |
| 2028 immediate = rd.Is64Bits() ? ~immediate : (~immediate & kWRegMask); | 2026 immediate = rd.Is64Bits() ? ~immediate : (~immediate & kWRegMask); |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2060 | 2058 |
| 2061 | 2059 |
| 2062 void Assembler::ConditionalCompare(const Register& rn, | 2060 void Assembler::ConditionalCompare(const Register& rn, |
| 2063 const Operand& operand, | 2061 const Operand& operand, |
| 2064 StatusFlags nzcv, | 2062 StatusFlags nzcv, |
| 2065 Condition cond, | 2063 Condition cond, |
| 2066 ConditionalCompareOp op) { | 2064 ConditionalCompareOp op) { |
| 2067 Instr ccmpop; | 2065 Instr ccmpop; |
| 2068 ASSERT(!operand.NeedsRelocation(this)); | 2066 ASSERT(!operand.NeedsRelocation(this)); |
| 2069 if (operand.IsImmediate()) { | 2067 if (operand.IsImmediate()) { |
| 2070 int64_t immediate = operand.immediate(); | 2068 int64_t immediate = operand.ImmediateValue(); |
| 2071 ASSERT(IsImmConditionalCompare(immediate)); | 2069 ASSERT(IsImmConditionalCompare(immediate)); |
| 2072 ccmpop = ConditionalCompareImmediateFixed | op | ImmCondCmp(immediate); | 2070 ccmpop = ConditionalCompareImmediateFixed | op | ImmCondCmp(immediate); |
| 2073 } else { | 2071 } else { |
| 2074 ASSERT(operand.IsShiftedRegister() && (operand.shift_amount() == 0)); | 2072 ASSERT(operand.IsShiftedRegister() && (operand.shift_amount() == 0)); |
| 2075 ccmpop = ConditionalCompareRegisterFixed | op | Rm(operand.reg()); | 2073 ccmpop = ConditionalCompareRegisterFixed | op | Rm(operand.reg()); |
| 2076 } | 2074 } |
| 2077 Emit(SF(rn) | ccmpop | Cond(cond) | Rn(rn) | Nzcv(nzcv)); | 2075 Emit(SF(rn) | ccmpop | Cond(cond) | Rn(rn) | Nzcv(nzcv)); |
| 2078 } | 2076 } |
| 2079 | 2077 |
| 2080 | 2078 |
| (...skipping 181 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2262 return is_int9(offset); | 2260 return is_int9(offset); |
| 2263 } | 2261 } |
| 2264 | 2262 |
| 2265 | 2263 |
| 2266 bool Assembler::IsImmLSScaled(ptrdiff_t offset, LSDataSize size) { | 2264 bool Assembler::IsImmLSScaled(ptrdiff_t offset, LSDataSize size) { |
| 2267 bool offset_is_size_multiple = (((offset >> size) << size) == offset); | 2265 bool offset_is_size_multiple = (((offset >> size) << size) == offset); |
| 2268 return offset_is_size_multiple && is_uint12(offset >> size); | 2266 return offset_is_size_multiple && is_uint12(offset >> size); |
| 2269 } | 2267 } |
| 2270 | 2268 |
| 2271 | 2269 |
| 2272 void Assembler::LoadLiteral(const CPURegister& rt, int offset_from_pc) { | |
| 2273 ASSERT((offset_from_pc & ((1 << kLiteralEntrySizeLog2) - 1)) == 0); | |
| 2274 // The pattern 'ldr xzr, #offset' is used to indicate the beginning of a | |
| 2275 // constant pool. It should not be emitted. | |
| 2276 ASSERT(!rt.Is(xzr)); | |
| 2277 Emit(LDR_x_lit | | |
| 2278 ImmLLiteral(offset_from_pc >> kLiteralEntrySizeLog2) | | |
| 2279 Rt(rt)); | |
| 2280 } | |
| 2281 | |
| 2282 | |
| 2283 void Assembler::LoadRelocatedValue(const CPURegister& rt, | |
| 2284 const Operand& operand, | |
| 2285 LoadLiteralOp op) { | |
| 2286 int64_t imm = operand.immediate(); | |
| 2287 ASSERT(is_int32(imm) || is_uint32(imm) || (rt.Is64Bits())); | |
| 2288 RecordRelocInfo(operand.rmode(), imm); | |
| 2289 BlockConstPoolFor(1); | |
| 2290 Emit(op | ImmLLiteral(0) | Rt(rt)); | |
| 2291 } | |
| 2292 | |
| 2293 | |
| 2294 // Test if a given value can be encoded in the immediate field of a logical | 2270 // Test if a given value can be encoded in the immediate field of a logical |
| 2295 // instruction. | 2271 // instruction. |
| 2296 // If it can be encoded, the function returns true, and values pointed to by n, | 2272 // If it can be encoded, the function returns true, and values pointed to by n, |
| 2297 // imm_s and imm_r are updated with immediates encoded in the format required | 2273 // imm_s and imm_r are updated with immediates encoded in the format required |
| 2298 // by the corresponding fields in the logical instruction. | 2274 // by the corresponding fields in the logical instruction. |
| 2299 // If it can not be encoded, the function returns false, and the values pointed | 2275 // If it can not be encoded, the function returns false, and the values pointed |
| 2300 // to by n, imm_s and imm_r are undefined. | 2276 // to by n, imm_s and imm_r are undefined. |
| 2301 bool Assembler::IsImmLogical(uint64_t value, | 2277 bool Assembler::IsImmLogical(uint64_t value, |
| 2302 unsigned width, | 2278 unsigned width, |
| 2303 unsigned* n, | 2279 unsigned* n, |
| (...skipping 603 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2907 adr(rd, 0); | 2883 adr(rd, 0); |
| 2908 MovInt64(scratch, target_offset); | 2884 MovInt64(scratch, target_offset); |
| 2909 add(rd, rd, scratch); | 2885 add(rd, rd, scratch); |
| 2910 } | 2886 } |
| 2911 } | 2887 } |
| 2912 | 2888 |
| 2913 | 2889 |
| 2914 } } // namespace v8::internal | 2890 } } // namespace v8::internal |
| 2915 | 2891 |
| 2916 #endif // V8_TARGET_ARCH_ARM64 | 2892 #endif // V8_TARGET_ARCH_ARM64 |
| OLD | NEW |