OLD | NEW |
---|---|
1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 194 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
205 // V8-specific load/store helpers. | 205 // V8-specific load/store helpers. |
206 void Load(const Register& rt, const MemOperand& addr, Representation r); | 206 void Load(const Register& rt, const MemOperand& addr, Representation r); |
207 void Store(const Register& rt, const MemOperand& addr, Representation r); | 207 void Store(const Register& rt, const MemOperand& addr, Representation r); |
208 | 208 |
209 // Remaining instructions are simple pass-through calls to the assembler. | 209 // Remaining instructions are simple pass-through calls to the assembler. |
210 inline void Adr(const Register& rd, Label* label); | 210 inline void Adr(const Register& rd, Label* label); |
211 inline void Asr(const Register& rd, const Register& rn, unsigned shift); | 211 inline void Asr(const Register& rd, const Register& rn, unsigned shift); |
212 inline void Asr(const Register& rd, const Register& rn, const Register& rm); | 212 inline void Asr(const Register& rd, const Register& rn, const Register& rm); |
213 inline void B(Label* label); | 213 inline void B(Label* label); |
214 inline void B(Condition cond, Label* label); | 214 inline void B(Condition cond, Label* label); |
215 inline void B(Label* label, Condition cond); | 215 void B(Label* label, Condition cond); |
216 inline void Bfi(const Register& rd, | 216 inline void Bfi(const Register& rd, |
217 const Register& rn, | 217 const Register& rn, |
218 unsigned lsb, | 218 unsigned lsb, |
219 unsigned width); | 219 unsigned width); |
220 inline void Bfxil(const Register& rd, | 220 inline void Bfxil(const Register& rd, |
221 const Register& rn, | 221 const Register& rn, |
222 unsigned lsb, | 222 unsigned lsb, |
223 unsigned width); | 223 unsigned width); |
224 inline void Bind(Label* label); | 224 inline void Bind(Label* label); |
225 inline void Bl(Label* label); | 225 inline void Bl(Label* label); |
226 inline void Blr(const Register& xn); | 226 inline void Blr(const Register& xn); |
227 inline void Br(const Register& xn); | 227 inline void Br(const Register& xn); |
228 inline void Brk(int code); | 228 inline void Brk(int code); |
229 inline void Cbnz(const Register& rt, Label* label); | 229 void Cbnz(const Register& rt, Label* label); |
230 inline void Cbz(const Register& rt, Label* label); | 230 void Cbz(const Register& rt, Label* label); |
231 inline void Cinc(const Register& rd, const Register& rn, Condition cond); | 231 inline void Cinc(const Register& rd, const Register& rn, Condition cond); |
232 inline void Cinv(const Register& rd, const Register& rn, Condition cond); | 232 inline void Cinv(const Register& rd, const Register& rn, Condition cond); |
233 inline void Cls(const Register& rd, const Register& rn); | 233 inline void Cls(const Register& rd, const Register& rn); |
234 inline void Clz(const Register& rd, const Register& rn); | 234 inline void Clz(const Register& rd, const Register& rn); |
235 inline void Cneg(const Register& rd, const Register& rn, Condition cond); | 235 inline void Cneg(const Register& rd, const Register& rn, Condition cond); |
236 inline void CzeroX(const Register& rd, Condition cond); | 236 inline void CzeroX(const Register& rd, Condition cond); |
237 inline void CmovX(const Register& rd, const Register& rn, Condition cond); | 237 inline void CmovX(const Register& rd, const Register& rn, Condition cond); |
238 inline void Cset(const Register& rd, Condition cond); | 238 inline void Cset(const Register& rd, Condition cond); |
239 inline void Csetm(const Register& rd, Condition cond); | 239 inline void Csetm(const Register& rd, Condition cond); |
240 inline void Csinc(const Register& rd, | 240 inline void Csinc(const Register& rd, |
(...skipping 152 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
393 const Register& rm); | 393 const Register& rm); |
394 inline void Stnp(const CPURegister& rt, | 394 inline void Stnp(const CPURegister& rt, |
395 const CPURegister& rt2, | 395 const CPURegister& rt2, |
396 const MemOperand& dst); | 396 const MemOperand& dst); |
397 inline void Stp(const CPURegister& rt, | 397 inline void Stp(const CPURegister& rt, |
398 const CPURegister& rt2, | 398 const CPURegister& rt2, |
399 const MemOperand& dst); | 399 const MemOperand& dst); |
400 inline void Sxtb(const Register& rd, const Register& rn); | 400 inline void Sxtb(const Register& rd, const Register& rn); |
401 inline void Sxth(const Register& rd, const Register& rn); | 401 inline void Sxth(const Register& rd, const Register& rn); |
402 inline void Sxtw(const Register& rd, const Register& rn); | 402 inline void Sxtw(const Register& rd, const Register& rn); |
403 inline void Tbnz(const Register& rt, unsigned bit_pos, Label* label); | 403 void Tbnz(const Register& rt, unsigned bit_pos, Label* label); |
404 inline void Tbz(const Register& rt, unsigned bit_pos, Label* label); | 404 void Tbz(const Register& rt, unsigned bit_pos, Label* label); |
405 inline void Ubfiz(const Register& rd, | 405 inline void Ubfiz(const Register& rd, |
406 const Register& rn, | 406 const Register& rn, |
407 unsigned lsb, | 407 unsigned lsb, |
408 unsigned width); | 408 unsigned width); |
409 inline void Ubfx(const Register& rd, | 409 inline void Ubfx(const Register& rd, |
410 const Register& rn, | 410 const Register& rn, |
411 unsigned lsb, | 411 unsigned lsb, |
412 unsigned width); | 412 unsigned width); |
413 inline void Ucvtf(const FPRegister& fd, | 413 inline void Ucvtf(const FPRegister& fd, |
414 const Register& rn, | 414 const Register& rn, |
415 unsigned fbits = 0); | 415 unsigned fbits = 0); |
416 inline void Udiv(const Register& rd, const Register& rn, const Register& rm); | 416 inline void Udiv(const Register& rd, const Register& rn, const Register& rm); |
417 inline void Umaddl(const Register& rd, | 417 inline void Umaddl(const Register& rd, |
418 const Register& rn, | 418 const Register& rn, |
419 const Register& rm, | 419 const Register& rm, |
420 const Register& ra); | 420 const Register& ra); |
421 inline void Umsubl(const Register& rd, | 421 inline void Umsubl(const Register& rd, |
422 const Register& rn, | 422 const Register& rn, |
423 const Register& rm, | 423 const Register& rm, |
424 const Register& ra); | 424 const Register& ra); |
425 inline void Unreachable(); | |
426 inline void Uxtb(const Register& rd, const Register& rn); | 425 inline void Uxtb(const Register& rd, const Register& rn); |
427 inline void Uxth(const Register& rd, const Register& rn); | 426 inline void Uxth(const Register& rd, const Register& rn); |
428 inline void Uxtw(const Register& rd, const Register& rn); | 427 inline void Uxtw(const Register& rd, const Register& rn); |
429 | 428 |
430 // Pseudo-instructions ------------------------------------------------------ | 429 // Pseudo-instructions ------------------------------------------------------ |
431 | 430 |
432 // Compute rd = abs(rm). | 431 // Compute rd = abs(rm). |
433 // This function clobbers the condition flags. | 432 // This function clobbers the condition flags. |
434 // | 433 // |
435 // If rm is the minimum representable value, the result is not representable. | 434 // If rm is the minimum representable value, the result is not representable. |
(...skipping 1631 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2067 // Scratch registers used internally by the MacroAssembler. | 2066 // Scratch registers used internally by the MacroAssembler. |
2068 Register tmp0_; | 2067 Register tmp0_; |
2069 Register tmp1_; | 2068 Register tmp1_; |
2070 FPRegister fptmp0_; | 2069 FPRegister fptmp0_; |
2071 | 2070 |
2072 void InitializeNewString(Register string, | 2071 void InitializeNewString(Register string, |
2073 Register length, | 2072 Register length, |
2074 Heap::RootListIndex map_index, | 2073 Heap::RootListIndex map_index, |
2075 Register scratch1, | 2074 Register scratch1, |
2076 Register scratch2); | 2075 Register scratch2); |
2076 | |
2077 public: | |
2078 // Far branches resolving. | |
2079 // | |
2080 // The various classes of branch instructions with immediate offsets have | |
2081 // different ranges. While the Assembler will fail to assemble a branch | |
2082 // exceeding its range, the MacroAssembler offers a mechanism to resolve | |
2083 // branches to too distant targets, either by tweaking the generated code to | |
2084 // use branch instructions with wider ranges or generating veneers. | |
2085 // | |
2086 // Currently branches to distant targets are resolved using unconditional | |
2087 // branch isntructions with a range of +-128MB. If that becomes too little | |
2088 // (!), the mechanism can be extended to generate special veneers for really | |
2089 // far targets. | |
2090 | |
2091 // Returns true if we should emit a veneer as soon as possible for a branch | |
2092 // which can at most reach to specified pc. | |
2093 bool ShouldEmitVeneer(int max_reachable_pc, | |
2094 int margin = kVeneerDistanceMargin); | |
2095 | |
2096 // The maximum code size generated for a veneer. Currently one branch | |
2097 // instruction. This is for code size checking purposes, and can be extended | |
2098 // in the future for example if we decide to add nops between the veneers. | |
2099 static const int kMaxVeneerCodeSize = 1 * kInstructionSize; | |
2100 | |
2101 // Emits veneers for branches that are approaching their maximum range. | |
2102 // If need_protection is true, the veneers are protected by a branch jumping | |
2103 // over the code. | |
2104 void EmitVeneers(bool need_protection); | |
2105 void EmitVeneersGuard(); | |
2106 // Checks wether veneers need to be emitted at this point. | |
2107 void CheckVeneers(bool need_protection); | |
2108 | |
2109 // Helps resolve branching to labels potentially out of range. | |
2110 // If the label is not bound, it registers the information necessary to later | |
2111 // be able to emit a veneer for this branch if necessary. | |
2112 // If the label is bound, it returns true if the label (or the previous link | |
2113 // in the label chain) is out of range. In that case the caller is responsible | |
2114 // for generating appropriate code. | |
2115 // Otherwise it returns false. | |
2116 // This function also checks wether veneers need to be emitted. | |
2117 bool NeedExtraInstructionsOrRegisterBranch(Label *label, | |
2118 ImmBranchType branch_type); | |
2119 | |
2120 private: | |
2121 // We generate a veneer for a branch if we reach within this distance of the | |
2122 // limit of the range. | |
2123 static const int kVeneerDistanceMargin = 2 * KB; | |
ulan
2014/02/18 10:17:40
To check my understanding:
if there are no branch
Alexandre Rames
2014/02/18 11:59:20
Yes. This would fire a CHECK.
My take was that we
| |
2124 int unresolved_branches_first_limit() const { | |
2125 ASSERT(!unresolved_branches_.empty()); | |
2126 return unresolved_branches_.begin()->first; | |
2127 } | |
2077 }; | 2128 }; |
2078 | 2129 |
2079 | 2130 |
2080 // Use this scope when you need a one-to-one mapping bewteen methods and | 2131 // Use this scope when you need a one-to-one mapping bewteen methods and |
2081 // instructions. This scope prevents the MacroAssembler from being called and | 2132 // instructions. This scope prevents the MacroAssembler from being called and |
2082 // literal pools from being emitted. It also asserts the number of instructions | 2133 // literal pools from being emitted. It also asserts the number of instructions |
2083 // emitted is what you specified when creating the scope. | 2134 // emitted is what you specified when creating the scope. |
2084 class InstructionAccurateScope BASE_EMBEDDED { | 2135 class InstructionAccurateScope BASE_EMBEDDED { |
2085 public: | 2136 public: |
2086 explicit InstructionAccurateScope(MacroAssembler* masm) | 2137 InstructionAccurateScope(MacroAssembler* masm, size_t count = 0) |
2087 : masm_(masm), size_(0) { | 2138 : masm_(masm), size_(count * kInstructionSize) { |
2088 masm_->StartBlockConstPool(); | 2139 masm_->StartBlockConstPool(); |
2089 #ifdef DEBUG | 2140 #ifdef DEBUG |
2141 if (count != 0) { | |
2142 masm_->bind(&start_); | |
2143 } | |
2090 previous_allow_macro_instructions_ = masm_->allow_macro_instructions(); | 2144 previous_allow_macro_instructions_ = masm_->allow_macro_instructions(); |
2091 masm_->set_allow_macro_instructions(false); | 2145 masm_->set_allow_macro_instructions(false); |
2092 #endif | 2146 #endif |
2093 } | |
2094 | |
2095 InstructionAccurateScope(MacroAssembler* masm, size_t count) | |
2096 : masm_(masm), size_(count * kInstructionSize) { | |
2097 masm_->StartBlockConstPool(); | |
2098 #ifdef DEBUG | |
2099 masm_->bind(&start_); | |
2100 previous_allow_macro_instructions_ = masm_->allow_macro_instructions(); | |
2101 masm_->set_allow_macro_instructions(false); | |
2102 #endif | |
2103 } | 2147 } |
2104 | 2148 |
2105 ~InstructionAccurateScope() { | 2149 ~InstructionAccurateScope() { |
2106 masm_->EndBlockConstPool(); | 2150 masm_->EndBlockConstPool(); |
2107 #ifdef DEBUG | 2151 #ifdef DEBUG |
2108 if (start_.is_bound()) { | 2152 if (start_.is_bound()) { |
2109 ASSERT(masm_->SizeOfCodeGeneratedSince(&start_) == size_); | 2153 ASSERT(masm_->SizeOfCodeGeneratedSince(&start_) == size_); |
2110 } | 2154 } |
2111 masm_->set_allow_macro_instructions(previous_allow_macro_instructions_); | 2155 masm_->set_allow_macro_instructions(previous_allow_macro_instructions_); |
2112 #endif | 2156 #endif |
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2185 #error "Unsupported option" | 2229 #error "Unsupported option" |
2186 #define CODE_COVERAGE_STRINGIFY(x) #x | 2230 #define CODE_COVERAGE_STRINGIFY(x) #x |
2187 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x) | 2231 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x) |
2188 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__) | 2232 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__) |
2189 #define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm-> | 2233 #define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm-> |
2190 #else | 2234 #else |
2191 #define ACCESS_MASM(masm) masm-> | 2235 #define ACCESS_MASM(masm) masm-> |
2192 #endif | 2236 #endif |
2193 | 2237 |
2194 #endif // V8_A64_MACRO_ASSEMBLER_A64_H_ | 2238 #endif // V8_A64_MACRO_ASSEMBLER_A64_H_ |
OLD | NEW |