OLD | NEW |
1 // Copyright (c) 1994-2006 Sun Microsystems Inc. | 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. |
2 // All Rights Reserved. | 2 // All Rights Reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // - Redistributions of source code must retain the above copyright notice, | 8 // - Redistributions of source code must retain the above copyright notice, |
9 // this list of conditions and the following disclaimer. | 9 // this list of conditions and the following disclaimer. |
10 // | 10 // |
(...skipping 23 matching lines...) Expand all Loading... |
34 | 34 |
35 | 35 |
36 #ifndef V8_MIPS_ASSEMBLER_MIPS_H_ | 36 #ifndef V8_MIPS_ASSEMBLER_MIPS_H_ |
37 #define V8_MIPS_ASSEMBLER_MIPS_H_ | 37 #define V8_MIPS_ASSEMBLER_MIPS_H_ |
38 | 38 |
39 #include <stdio.h> | 39 #include <stdio.h> |
40 #include "assembler.h" | 40 #include "assembler.h" |
41 #include "constants-mips.h" | 41 #include "constants-mips.h" |
42 #include "serialize.h" | 42 #include "serialize.h" |
43 | 43 |
44 using namespace assembler::mips; | |
45 | |
46 namespace v8 { | 44 namespace v8 { |
47 namespace internal { | 45 namespace internal { |
48 | 46 |
49 // CPU Registers. | 47 // CPU Registers. |
50 // | 48 // |
51 // 1) We would prefer to use an enum, but enum values are assignment- | 49 // 1) We would prefer to use an enum, but enum values are assignment- |
52 // compatible with int, which has caused code-generation bugs. | 50 // compatible with int, which has caused code-generation bugs. |
53 // | 51 // |
54 // 2) We would prefer to use a class instead of a struct but we don't like | 52 // 2) We would prefer to use a class instead of a struct but we don't like |
55 // the register initialization to depend on the particular initialization | 53 // the register initialization to depend on the particular initialization |
(...skipping 10 matching lines...) Expand all Loading... |
66 // such that we use an enum in optimized mode, and the struct in debug | 64 // such that we use an enum in optimized mode, and the struct in debug |
67 // mode. This way we get the compile-time error checking in debug mode | 65 // mode. This way we get the compile-time error checking in debug mode |
68 // and best performance in optimized code. | 66 // and best performance in optimized code. |
69 | 67 |
70 | 68 |
71 // ----------------------------------------------------------------------------- | 69 // ----------------------------------------------------------------------------- |
72 // Implementation of Register and FPURegister | 70 // Implementation of Register and FPURegister |
73 | 71 |
74 // Core register. | 72 // Core register. |
75 struct Register { | 73 struct Register { |
| 74 static const int kNumRegisters = v8::internal::kNumRegisters; |
| 75 static const int kNumAllocatableRegisters = 14; // v0 through t7 |
| 76 |
| 77 static int ToAllocationIndex(Register reg) { |
| 78 return reg.code() - 2; // zero_reg and 'at' are skipped. |
| 79 } |
| 80 |
| 81 static Register FromAllocationIndex(int index) { |
| 82 ASSERT(index >= 0 && index < kNumAllocatableRegisters); |
| 83 return from_code(index + 2); // zero_reg and 'at' are skipped. |
| 84 } |
| 85 |
| 86 static const char* AllocationIndexToString(int index) { |
| 87 ASSERT(index >= 0 && index < kNumAllocatableRegisters); |
| 88 const char* const names[] = { |
| 89 "v0", |
| 90 "v1", |
| 91 "a0", |
| 92 "a1", |
| 93 "a2", |
| 94 "a3", |
| 95 "t0", |
| 96 "t1", |
| 97 "t2", |
| 98 "t3", |
| 99 "t4", |
| 100 "t5", |
| 101 "t6", |
| 102 "t7", |
| 103 }; |
| 104 return names[index]; |
| 105 } |
| 106 |
| 107 static Register from_code(int code) { |
| 108 Register r = { code }; |
| 109 return r; |
| 110 } |
| 111 |
76 bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; } | 112 bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; } |
77 bool is(Register reg) const { return code_ == reg.code_; } | 113 bool is(Register reg) const { return code_ == reg.code_; } |
78 int code() const { | 114 int code() const { |
79 ASSERT(is_valid()); | 115 ASSERT(is_valid()); |
80 return code_; | 116 return code_; |
81 } | 117 } |
82 int bit() const { | 118 int bit() const { |
83 ASSERT(is_valid()); | 119 ASSERT(is_valid()); |
84 return 1 << code_; | 120 return 1 << code_; |
85 } | 121 } |
86 | 122 |
87 // Unfortunately we can't make this private in a struct. | 123 // Unfortunately we can't make this private in a struct. |
88 int code_; | 124 int code_; |
89 }; | 125 }; |
90 | 126 |
91 extern const Register no_reg; | 127 const Register no_reg = { -1 }; |
92 | 128 |
93 extern const Register zero_reg; | 129 const Register zero_reg = { 0 }; |
94 extern const Register at; | 130 const Register at = { 1 }; |
95 extern const Register v0; | 131 const Register v0 = { 2 }; |
96 extern const Register v1; | 132 const Register v1 = { 3 }; |
97 extern const Register a0; | 133 const Register a0 = { 4 }; |
98 extern const Register a1; | 134 const Register a1 = { 5 }; |
99 extern const Register a2; | 135 const Register a2 = { 6 }; |
100 extern const Register a3; | 136 const Register a3 = { 7 }; |
101 extern const Register t0; | 137 const Register t0 = { 8 }; |
102 extern const Register t1; | 138 const Register t1 = { 9 }; |
103 extern const Register t2; | 139 const Register t2 = { 10 }; |
104 extern const Register t3; | 140 const Register t3 = { 11 }; |
105 extern const Register t4; | 141 const Register t4 = { 12 }; |
106 extern const Register t5; | 142 const Register t5 = { 13 }; |
107 extern const Register t6; | 143 const Register t6 = { 14 }; |
108 extern const Register t7; | 144 const Register t7 = { 15 }; |
109 extern const Register s0; | 145 const Register s0 = { 16 }; |
110 extern const Register s1; | 146 const Register s1 = { 17 }; |
111 extern const Register s2; | 147 const Register s2 = { 18 }; |
112 extern const Register s3; | 148 const Register s3 = { 19 }; |
113 extern const Register s4; | 149 const Register s4 = { 20 }; |
114 extern const Register s5; | 150 const Register s5 = { 21 }; |
115 extern const Register s6; | 151 const Register s6 = { 22 }; |
116 extern const Register s7; | 152 const Register s7 = { 23 }; |
117 extern const Register t8; | 153 const Register t8 = { 24 }; |
118 extern const Register t9; | 154 const Register t9 = { 25 }; |
119 extern const Register k0; | 155 const Register k0 = { 26 }; |
120 extern const Register k1; | 156 const Register k1 = { 27 }; |
121 extern const Register gp; | 157 const Register gp = { 28 }; |
122 extern const Register sp; | 158 const Register sp = { 29 }; |
123 extern const Register s8_fp; | 159 const Register s8_fp = { 30 }; |
124 extern const Register ra; | 160 const Register ra = { 31 }; |
| 161 |
125 | 162 |
126 int ToNumber(Register reg); | 163 int ToNumber(Register reg); |
127 | 164 |
128 Register ToRegister(int num); | 165 Register ToRegister(int num); |
129 | 166 |
130 // Coprocessor register. | 167 // Coprocessor register. |
131 struct FPURegister { | 168 struct FPURegister { |
132 bool is_valid() const { return 0 <= code_ && code_ < kNumFPURegister ; } | 169 static const int kNumRegisters = v8::internal::kNumFPURegisters; |
| 170 // f0 has been excluded from allocation. This is following ia32 |
| 171 // where xmm0 is excluded. |
| 172 static const int kNumAllocatableRegisters = 15; |
| 173 |
| 174 static int ToAllocationIndex(FPURegister reg) { |
| 175 ASSERT(reg.code() != 0); |
| 176 ASSERT(reg.code() % 2 == 0); |
| 177 return (reg.code() / 2) - 1; |
| 178 } |
| 179 |
| 180 static FPURegister FromAllocationIndex(int index) { |
| 181 ASSERT(index >= 0 && index < kNumAllocatableRegisters); |
| 182 return from_code((index + 1) * 2); |
| 183 } |
| 184 |
| 185 static const char* AllocationIndexToString(int index) { |
| 186 ASSERT(index >= 0 && index < kNumAllocatableRegisters); |
| 187 const char* const names[] = { |
| 188 "f2", |
| 189 "f4", |
| 190 "f6", |
| 191 "f8", |
| 192 "f10", |
| 193 "f12", |
| 194 "f14", |
| 195 "f16", |
| 196 "f18", |
| 197 "f20", |
| 198 "f22", |
| 199 "f24", |
| 200 "f26", |
| 201 "f28", |
| 202 "f30" |
| 203 }; |
| 204 return names[index]; |
| 205 } |
| 206 |
| 207 static FPURegister from_code(int code) { |
| 208 FPURegister r = { code }; |
| 209 return r; |
| 210 } |
| 211 |
| 212 bool is_valid() const { return 0 <= code_ && code_ < kNumFPURegisters ; } |
133 bool is(FPURegister creg) const { return code_ == creg.code_; } | 213 bool is(FPURegister creg) const { return code_ == creg.code_; } |
134 int code() const { | 214 int code() const { |
135 ASSERT(is_valid()); | 215 ASSERT(is_valid()); |
136 return code_; | 216 return code_; |
137 } | 217 } |
138 int bit() const { | 218 int bit() const { |
139 ASSERT(is_valid()); | 219 ASSERT(is_valid()); |
140 return 1 << code_; | 220 return 1 << code_; |
141 } | 221 } |
142 | 222 void setcode(int f) { |
| 223 code_ = f; |
| 224 ASSERT(is_valid()); |
| 225 } |
143 // Unfortunately we can't make this private in a struct. | 226 // Unfortunately we can't make this private in a struct. |
144 int code_; | 227 int code_; |
145 }; | 228 }; |
146 | 229 |
147 extern const FPURegister no_creg; | 230 typedef FPURegister DoubleRegister; |
148 | 231 |
149 extern const FPURegister f0; | 232 const FPURegister no_creg = { -1 }; |
150 extern const FPURegister f1; | |
151 extern const FPURegister f2; | |
152 extern const FPURegister f3; | |
153 extern const FPURegister f4; | |
154 extern const FPURegister f5; | |
155 extern const FPURegister f6; | |
156 extern const FPURegister f7; | |
157 extern const FPURegister f8; | |
158 extern const FPURegister f9; | |
159 extern const FPURegister f10; | |
160 extern const FPURegister f11; | |
161 extern const FPURegister f12; // arg | |
162 extern const FPURegister f13; | |
163 extern const FPURegister f14; // arg | |
164 extern const FPURegister f15; | |
165 extern const FPURegister f16; | |
166 extern const FPURegister f17; | |
167 extern const FPURegister f18; | |
168 extern const FPURegister f19; | |
169 extern const FPURegister f20; | |
170 extern const FPURegister f21; | |
171 extern const FPURegister f22; | |
172 extern const FPURegister f23; | |
173 extern const FPURegister f24; | |
174 extern const FPURegister f25; | |
175 extern const FPURegister f26; | |
176 extern const FPURegister f27; | |
177 extern const FPURegister f28; | |
178 extern const FPURegister f29; | |
179 extern const FPURegister f30; | |
180 extern const FPURegister f31; | |
181 | 233 |
| 234 const FPURegister f0 = { 0 }; // Return value in hard float mode. |
| 235 const FPURegister f1 = { 1 }; |
| 236 const FPURegister f2 = { 2 }; |
| 237 const FPURegister f3 = { 3 }; |
| 238 const FPURegister f4 = { 4 }; |
| 239 const FPURegister f5 = { 5 }; |
| 240 const FPURegister f6 = { 6 }; |
| 241 const FPURegister f7 = { 7 }; |
| 242 const FPURegister f8 = { 8 }; |
| 243 const FPURegister f9 = { 9 }; |
| 244 const FPURegister f10 = { 10 }; |
| 245 const FPURegister f11 = { 11 }; |
| 246 const FPURegister f12 = { 12 }; // Arg 0 in hard float mode. |
| 247 const FPURegister f13 = { 13 }; |
| 248 const FPURegister f14 = { 14 }; // Arg 1 in hard float mode. |
| 249 const FPURegister f15 = { 15 }; |
| 250 const FPURegister f16 = { 16 }; |
| 251 const FPURegister f17 = { 17 }; |
| 252 const FPURegister f18 = { 18 }; |
| 253 const FPURegister f19 = { 19 }; |
| 254 const FPURegister f20 = { 20 }; |
| 255 const FPURegister f21 = { 21 }; |
| 256 const FPURegister f22 = { 22 }; |
| 257 const FPURegister f23 = { 23 }; |
| 258 const FPURegister f24 = { 24 }; |
| 259 const FPURegister f25 = { 25 }; |
| 260 const FPURegister f26 = { 26 }; |
| 261 const FPURegister f27 = { 27 }; |
| 262 const FPURegister f28 = { 28 }; |
| 263 const FPURegister f29 = { 29 }; |
| 264 const FPURegister f30 = { 30 }; |
| 265 const FPURegister f31 = { 31 }; |
182 | 266 |
183 // Returns the equivalent of !cc. | 267 // FPU (coprocessor 1) control registers. |
184 // Negation of the default no_condition (-1) results in a non-default | 268 // Currently only FCSR (#31) is implemented. |
185 // no_condition value (-2). As long as tests for no_condition check | 269 struct FPUControlRegister { |
186 // for condition < 0, this will work as expected. | 270 static const int kFCSRRegister = 31; |
187 inline Condition NegateCondition(Condition cc); | 271 static const int kInvalidFPUControlRegister = -1; |
188 | 272 |
189 inline Condition ReverseCondition(Condition cc) { | 273 bool is_valid() const { return code_ == kFCSRRegister; } |
190 switch (cc) { | 274 bool is(FPUControlRegister creg) const { return code_ == creg.code_; } |
191 case Uless: | 275 int code() const { |
192 return Ugreater; | 276 ASSERT(is_valid()); |
193 case Ugreater: | 277 return code_; |
194 return Uless; | 278 } |
195 case Ugreater_equal: | 279 int bit() const { |
196 return Uless_equal; | 280 ASSERT(is_valid()); |
197 case Uless_equal: | 281 return 1 << code_; |
198 return Ugreater_equal; | 282 } |
199 case less: | 283 void setcode(int f) { |
200 return greater; | 284 code_ = f; |
201 case greater: | 285 ASSERT(is_valid()); |
202 return less; | 286 } |
203 case greater_equal: | 287 // Unfortunately we can't make this private in a struct. |
204 return less_equal; | 288 int code_; |
205 case less_equal: | |
206 return greater_equal; | |
207 default: | |
208 return cc; | |
209 }; | |
210 } | |
211 | |
212 | |
213 enum Hint { | |
214 no_hint = 0 | |
215 }; | 289 }; |
216 | 290 |
217 inline Hint NegateHint(Hint hint) { | 291 const FPUControlRegister no_fpucreg = { -1 }; |
218 return no_hint; | 292 const FPUControlRegister FCSR = { kFCSRRegister }; |
219 } | |
220 | 293 |
221 | 294 |
222 // ----------------------------------------------------------------------------- | 295 // ----------------------------------------------------------------------------- |
223 // Machine instruction Operands. | 296 // Machine instruction Operands. |
224 | 297 |
225 // Class Operand represents a shifter operand in data processing instructions. | 298 // Class Operand represents a shifter operand in data processing instructions. |
226 class Operand BASE_EMBEDDED { | 299 class Operand BASE_EMBEDDED { |
227 public: | 300 public: |
228 // Immediate. | 301 // Immediate. |
229 INLINE(explicit Operand(int32_t immediate, | 302 INLINE(explicit Operand(int32_t immediate, |
(...skipping 21 matching lines...) Expand all Loading... |
251 friend class Assembler; | 324 friend class Assembler; |
252 friend class MacroAssembler; | 325 friend class MacroAssembler; |
253 }; | 326 }; |
254 | 327 |
255 | 328 |
256 // On MIPS we have only one adressing mode with base_reg + offset. | 329 // On MIPS we have only one adressing mode with base_reg + offset. |
257 // Class MemOperand represents a memory operand in load and store instructions. | 330 // Class MemOperand represents a memory operand in load and store instructions. |
258 class MemOperand : public Operand { | 331 class MemOperand : public Operand { |
259 public: | 332 public: |
260 | 333 |
261 explicit MemOperand(Register rn, int16_t offset = 0); | 334 explicit MemOperand(Register rn, int32_t offset = 0); |
262 | 335 |
263 private: | 336 private: |
264 int16_t offset_; | 337 int32_t offset_; |
265 | 338 |
266 friend class Assembler; | 339 friend class Assembler; |
267 }; | 340 }; |
268 | 341 |
269 | 342 |
270 class Assembler : public Malloced { | 343 // CpuFeatures keeps track of which features are supported by the target CPU. |
| 344 // Supported features must be enabled by a Scope before use. |
| 345 class CpuFeatures { |
| 346 public: |
| 347 // Detect features of the target CPU. Set safe defaults if the serializer |
| 348 // is enabled (snapshots must be portable). |
| 349 void Probe(bool portable); |
| 350 |
| 351 // Check whether a feature is supported by the target CPU. |
| 352 bool IsSupported(CpuFeature f) const { |
| 353 if (f == FPU && !FLAG_enable_fpu) return false; |
| 354 return (supported_ & (1u << f)) != 0; |
| 355 } |
| 356 |
| 357 // Check whether a feature is currently enabled. |
| 358 bool IsEnabled(CpuFeature f) const { |
| 359 return (enabled_ & (1u << f)) != 0; |
| 360 } |
| 361 |
| 362 // Enable a specified feature within a scope. |
| 363 class Scope BASE_EMBEDDED { |
| 364 #ifdef DEBUG |
| 365 public: |
| 366 explicit Scope(CpuFeature f) |
| 367 : cpu_features_(Isolate::Current()->cpu_features()), |
| 368 isolate_(Isolate::Current()) { |
| 369 ASSERT(cpu_features_->IsSupported(f)); |
| 370 ASSERT(!Serializer::enabled() || |
| 371 (cpu_features_->found_by_runtime_probing_ & (1u << f)) == 0); |
| 372 old_enabled_ = cpu_features_->enabled_; |
| 373 cpu_features_->enabled_ |= 1u << f; |
| 374 } |
| 375 ~Scope() { |
| 376 ASSERT_EQ(Isolate::Current(), isolate_); |
| 377 cpu_features_->enabled_ = old_enabled_; |
| 378 } |
| 379 private: |
| 380 unsigned old_enabled_; |
| 381 CpuFeatures* cpu_features_; |
| 382 Isolate* isolate_; |
| 383 #else |
| 384 public: |
| 385 explicit Scope(CpuFeature f) {} |
| 386 #endif |
| 387 }; |
| 388 |
| 389 private: |
| 390 CpuFeatures(); |
| 391 |
| 392 unsigned supported_; |
| 393 unsigned enabled_; |
| 394 unsigned found_by_runtime_probing_; |
| 395 |
| 396 friend class Isolate; |
| 397 |
| 398 DISALLOW_COPY_AND_ASSIGN(CpuFeatures); |
| 399 }; |
| 400 |
| 401 |
| 402 class Assembler : public AssemblerBase { |
271 public: | 403 public: |
272 // Create an assembler. Instructions and relocation information are emitted | 404 // Create an assembler. Instructions and relocation information are emitted |
273 // into a buffer, with the instructions starting from the beginning and the | 405 // into a buffer, with the instructions starting from the beginning and the |
274 // relocation information starting from the end of the buffer. See CodeDesc | 406 // relocation information starting from the end of the buffer. See CodeDesc |
275 // for a detailed comment on the layout (globals.h). | 407 // for a detailed comment on the layout (globals.h). |
276 // | 408 // |
277 // If the provided buffer is NULL, the assembler allocates and grows its own | 409 // If the provided buffer is NULL, the assembler allocates and grows its own |
278 // buffer, and buffer_size determines the initial buffer size. The buffer is | 410 // buffer, and buffer_size determines the initial buffer size. The buffer is |
279 // owned by the assembler and deallocated upon destruction of the assembler. | 411 // owned by the assembler and deallocated upon destruction of the assembler. |
280 // | 412 // |
281 // If the provided buffer is not NULL, the assembler uses the provided buffer | 413 // If the provided buffer is not NULL, the assembler uses the provided buffer |
282 // for code generation and assumes its size to be buffer_size. If the buffer | 414 // for code generation and assumes its size to be buffer_size. If the buffer |
283 // is too small, a fatal error occurs. No deallocation of the buffer is done | 415 // is too small, a fatal error occurs. No deallocation of the buffer is done |
284 // upon destruction of the assembler. | 416 // upon destruction of the assembler. |
285 Assembler(void* buffer, int buffer_size); | 417 Assembler(void* buffer, int buffer_size); |
286 ~Assembler(); | 418 ~Assembler(); |
287 | 419 |
| 420 // Overrides the default provided by FLAG_debug_code. |
| 421 void set_emit_debug_code(bool value) { emit_debug_code_ = value; } |
| 422 |
288 // GetCode emits any pending (non-emitted) code and fills the descriptor | 423 // GetCode emits any pending (non-emitted) code and fills the descriptor |
289 // desc. GetCode() is idempotent; it returns the same result if no other | 424 // desc. GetCode() is idempotent; it returns the same result if no other |
290 // Assembler functions are invoked in between GetCode() calls. | 425 // Assembler functions are invoked in between GetCode() calls. |
291 void GetCode(CodeDesc* desc); | 426 void GetCode(CodeDesc* desc); |
292 | 427 |
293 // Label operations & relative jumps (PPUM Appendix D). | 428 // Label operations & relative jumps (PPUM Appendix D). |
294 // | 429 // |
295 // Takes a branch opcode (cc) and a label (L) and generates | 430 // Takes a branch opcode (cc) and a label (L) and generates |
296 // either a backward branch or a forward branch and links it | 431 // either a backward branch or a forward branch and links it |
297 // to the label fixup chain. Usage: | 432 // to the label fixup chain. Usage: |
(...skipping 15 matching lines...) Expand all Loading... |
313 int32_t shifted_branch_offset(Label* L, bool jump_elimination_allowed) { | 448 int32_t shifted_branch_offset(Label* L, bool jump_elimination_allowed) { |
314 int32_t o = branch_offset(L, jump_elimination_allowed); | 449 int32_t o = branch_offset(L, jump_elimination_allowed); |
315 ASSERT((o & 3) == 0); // Assert the offset is aligned. | 450 ASSERT((o & 3) == 0); // Assert the offset is aligned. |
316 return o >> 2; | 451 return o >> 2; |
317 } | 452 } |
318 | 453 |
319 // Puts a labels target address at the given position. | 454 // Puts a labels target address at the given position. |
320 // The high 8 bits are set to zero. | 455 // The high 8 bits are set to zero. |
321 void label_at_put(Label* L, int at_offset); | 456 void label_at_put(Label* L, int at_offset); |
322 | 457 |
323 // Size of an instruction. | |
324 static const int kInstrSize = sizeof(Instr); | |
325 | |
326 // Difference between address of current opcode and target address offset. | |
327 static const int kBranchPCOffset = 4; | |
328 | |
329 // Read/Modify the code target address in the branch/call instruction at pc. | 458 // Read/Modify the code target address in the branch/call instruction at pc. |
330 static Address target_address_at(Address pc); | 459 static Address target_address_at(Address pc); |
331 static void set_target_address_at(Address pc, Address target); | 460 static void set_target_address_at(Address pc, Address target); |
332 | 461 |
333 // This sets the branch destination (which gets loaded at the call address). | 462 // This sets the branch destination (which gets loaded at the call address). |
334 // This is for calls and branches within generated code. | 463 // This is for calls and branches within generated code. |
335 inline static void set_target_at(Address instruction_payload, | 464 inline static void set_target_at(Address instruction_payload, |
336 Address target) { | 465 Address target) { |
337 set_target_address_at(instruction_payload, target); | 466 set_target_address_at(instruction_payload, target); |
338 } | 467 } |
339 | 468 |
340 // This sets the branch destination. | 469 // This sets the branch destination. |
341 // This is for calls and branches to runtime code. | 470 // This is for calls and branches to runtime code. |
342 inline static void set_external_target_at(Address instruction_payload, | 471 inline static void set_external_target_at(Address instruction_payload, |
343 Address target) { | 472 Address target) { |
344 set_target_address_at(instruction_payload, target); | 473 set_target_address_at(instruction_payload, target); |
345 } | 474 } |
346 | 475 |
347 static const int kCallTargetSize = 3 * kPointerSize; | 476 // Size of an instruction. |
348 static const int kExternalTargetSize = 3 * kPointerSize; | 477 static const int kInstrSize = sizeof(Instr); |
| 478 |
| 479 // Difference between address of current opcode and target address offset. |
| 480 static const int kBranchPCOffset = 4; |
| 481 |
| 482 // Here we are patching the address in the LUI/ORI instruction pair. |
| 483 // These values are used in the serialization process and must be zero for |
| 484 // MIPS platform, as Code, Embedded Object or External-reference pointers |
| 485 // are split across two consecutive instructions and don't exist separately |
| 486 // in the code, so the serializer should not step forwards in memory after |
| 487 // a target is resolved and written. |
| 488 static const int kCallTargetSize = 0 * kInstrSize; |
| 489 static const int kExternalTargetSize = 0 * kInstrSize; |
| 490 |
| 491 // Number of consecutive instructions used to store 32bit constant. |
| 492 // Used in RelocInfo::target_address_address() function to tell serializer |
| 493 // address of the instruction that follows LUI/ORI instruction pair. |
| 494 static const int kInstructionsFor32BitConstant = 2; |
349 | 495 |
350 // Distance between the instruction referring to the address of the call | 496 // Distance between the instruction referring to the address of the call |
351 // target and the return address. | 497 // target and the return address. |
352 static const int kCallTargetAddressOffset = 4 * kInstrSize; | 498 static const int kCallTargetAddressOffset = 4 * kInstrSize; |
353 | 499 |
354 // Distance between start of patched return sequence and the emitted address | 500 // Distance between start of patched return sequence and the emitted address |
355 // to jump to. | 501 // to jump to. |
356 static const int kPatchReturnSequenceAddressOffset = kInstrSize; | 502 static const int kPatchReturnSequenceAddressOffset = 0; |
357 | 503 |
358 // Distance between start of patched debug break slot and the emitted address | 504 // Distance between start of patched debug break slot and the emitted address |
359 // to jump to. | 505 // to jump to. |
360 static const int kPatchDebugBreakSlotAddressOffset = kInstrSize; | 506 static const int kPatchDebugBreakSlotAddressOffset = 0 * kInstrSize; |
| 507 |
| 508 // Difference between address of current opcode and value read from pc |
| 509 // register. |
| 510 static const int kPcLoadDelta = 4; |
| 511 |
| 512 // Number of instructions used for the JS return sequence. The constant is |
| 513 // used by the debugger to patch the JS return sequence. |
| 514 static const int kJSReturnSequenceInstructions = 7; |
| 515 static const int kDebugBreakSlotInstructions = 4; |
| 516 static const int kDebugBreakSlotLength = |
| 517 kDebugBreakSlotInstructions * kInstrSize; |
| 518 |
361 | 519 |
362 // --------------------------------------------------------------------------- | 520 // --------------------------------------------------------------------------- |
363 // Code generation. | 521 // Code generation. |
364 | 522 |
365 void nop() { sll(zero_reg, zero_reg, 0); } | 523 // Insert the smallest number of nop instructions |
| 524 // possible to align the pc offset to a multiple |
| 525 // of m. m must be a power of 2 (>= 4). |
| 526 void Align(int m); |
| 527 // Aligns code to something that's optimal for a jump target for the platform. |
| 528 void CodeTargetAlign(); |
| 529 |
| 530 // Different nop operations are used by the code generator to detect certain |
| 531 // states of the generated code. |
| 532 enum NopMarkerTypes { |
| 533 NON_MARKING_NOP = 0, |
| 534 DEBUG_BREAK_NOP, |
| 535 // IC markers. |
| 536 PROPERTY_ACCESS_INLINED, |
| 537 PROPERTY_ACCESS_INLINED_CONTEXT, |
| 538 PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE, |
| 539 // Helper values. |
| 540 LAST_CODE_MARKER, |
| 541 FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED |
| 542 }; |
| 543 |
| 544 // type == 0 is the default non-marking type. |
| 545 void nop(unsigned int type = 0) { |
| 546 ASSERT(type < 32); |
| 547 sll(zero_reg, zero_reg, type, true); |
| 548 } |
366 | 549 |
367 | 550 |
368 //------- Branch and jump instructions -------- | 551 //------- Branch and jump instructions -------- |
369 // We don't use likely variant of instructions. | 552 // We don't use likely variant of instructions. |
370 void b(int16_t offset); | 553 void b(int16_t offset); |
371 void b(Label* L) { b(branch_offset(L, false)>>2); } | 554 void b(Label* L) { b(branch_offset(L, false)>>2); } |
372 void bal(int16_t offset); | 555 void bal(int16_t offset); |
373 void bal(Label* L) { bal(branch_offset(L, false)>>2); } | 556 void bal(Label* L) { bal(branch_offset(L, false)>>2); } |
374 | 557 |
375 void beq(Register rs, Register rt, int16_t offset); | 558 void beq(Register rs, Register rt, int16_t offset); |
(...skipping 17 matching lines...) Expand all Loading... |
393 // Jump targets must be in the current 256 MB-aligned region. ie 28 bits. | 576 // Jump targets must be in the current 256 MB-aligned region. ie 28 bits. |
394 void j(int32_t target); | 577 void j(int32_t target); |
395 void jal(int32_t target); | 578 void jal(int32_t target); |
396 void jalr(Register rs, Register rd = ra); | 579 void jalr(Register rs, Register rd = ra); |
397 void jr(Register target); | 580 void jr(Register target); |
398 | 581 |
399 | 582 |
400 //-------Data-processing-instructions--------- | 583 //-------Data-processing-instructions--------- |
401 | 584 |
402 // Arithmetic. | 585 // Arithmetic. |
403 void add(Register rd, Register rs, Register rt); | |
404 void addu(Register rd, Register rs, Register rt); | 586 void addu(Register rd, Register rs, Register rt); |
405 void sub(Register rd, Register rs, Register rt); | |
406 void subu(Register rd, Register rs, Register rt); | 587 void subu(Register rd, Register rs, Register rt); |
407 void mult(Register rs, Register rt); | 588 void mult(Register rs, Register rt); |
408 void multu(Register rs, Register rt); | 589 void multu(Register rs, Register rt); |
409 void div(Register rs, Register rt); | 590 void div(Register rs, Register rt); |
410 void divu(Register rs, Register rt); | 591 void divu(Register rs, Register rt); |
411 void mul(Register rd, Register rs, Register rt); | 592 void mul(Register rd, Register rs, Register rt); |
412 | 593 |
413 void addi(Register rd, Register rs, int32_t j); | |
414 void addiu(Register rd, Register rs, int32_t j); | 594 void addiu(Register rd, Register rs, int32_t j); |
415 | 595 |
416 // Logical. | 596 // Logical. |
417 void and_(Register rd, Register rs, Register rt); | 597 void and_(Register rd, Register rs, Register rt); |
418 void or_(Register rd, Register rs, Register rt); | 598 void or_(Register rd, Register rs, Register rt); |
419 void xor_(Register rd, Register rs, Register rt); | 599 void xor_(Register rd, Register rs, Register rt); |
420 void nor(Register rd, Register rs, Register rt); | 600 void nor(Register rd, Register rs, Register rt); |
421 | 601 |
422 void andi(Register rd, Register rs, int32_t j); | 602 void andi(Register rd, Register rs, int32_t j); |
423 void ori(Register rd, Register rs, int32_t j); | 603 void ori(Register rd, Register rs, int32_t j); |
424 void xori(Register rd, Register rs, int32_t j); | 604 void xori(Register rd, Register rs, int32_t j); |
425 void lui(Register rd, int32_t j); | 605 void lui(Register rd, int32_t j); |
426 | 606 |
427 // Shifts. | 607 // Shifts. |
428 void sll(Register rd, Register rt, uint16_t sa); | 608 // Please note: sll(zero_reg, zero_reg, x) instructions are reserved as nop |
| 609 // and may cause problems in normal code. coming_from_nop makes sure this |
| 610 // doesn't happen. |
| 611 void sll(Register rd, Register rt, uint16_t sa, bool coming_from_nop = false); |
429 void sllv(Register rd, Register rt, Register rs); | 612 void sllv(Register rd, Register rt, Register rs); |
430 void srl(Register rd, Register rt, uint16_t sa); | 613 void srl(Register rd, Register rt, uint16_t sa); |
431 void srlv(Register rd, Register rt, Register rs); | 614 void srlv(Register rd, Register rt, Register rs); |
432 void sra(Register rt, Register rd, uint16_t sa); | 615 void sra(Register rt, Register rd, uint16_t sa); |
433 void srav(Register rt, Register rd, Register rs); | 616 void srav(Register rt, Register rd, Register rs); |
| 617 void rotr(Register rd, Register rt, uint16_t sa); |
| 618 void rotrv(Register rd, Register rt, Register rs); |
434 | 619 |
435 | 620 |
436 //------------Memory-instructions------------- | 621 //------------Memory-instructions------------- |
437 | 622 |
438 void lb(Register rd, const MemOperand& rs); | 623 void lb(Register rd, const MemOperand& rs); |
439 void lbu(Register rd, const MemOperand& rs); | 624 void lbu(Register rd, const MemOperand& rs); |
| 625 void lh(Register rd, const MemOperand& rs); |
| 626 void lhu(Register rd, const MemOperand& rs); |
440 void lw(Register rd, const MemOperand& rs); | 627 void lw(Register rd, const MemOperand& rs); |
| 628 void lwl(Register rd, const MemOperand& rs); |
| 629 void lwr(Register rd, const MemOperand& rs); |
441 void sb(Register rd, const MemOperand& rs); | 630 void sb(Register rd, const MemOperand& rs); |
| 631 void sh(Register rd, const MemOperand& rs); |
442 void sw(Register rd, const MemOperand& rs); | 632 void sw(Register rd, const MemOperand& rs); |
| 633 void swl(Register rd, const MemOperand& rs); |
| 634 void swr(Register rd, const MemOperand& rs); |
443 | 635 |
444 | 636 |
445 //-------------Misc-instructions-------------- | 637 //-------------Misc-instructions-------------- |
446 | 638 |
447 // Break / Trap instructions. | 639 // Break / Trap instructions. |
448 void break_(uint32_t code); | 640 void break_(uint32_t code); |
449 void tge(Register rs, Register rt, uint16_t code); | 641 void tge(Register rs, Register rt, uint16_t code); |
450 void tgeu(Register rs, Register rt, uint16_t code); | 642 void tgeu(Register rs, Register rt, uint16_t code); |
451 void tlt(Register rs, Register rt, uint16_t code); | 643 void tlt(Register rs, Register rt, uint16_t code); |
452 void tltu(Register rs, Register rt, uint16_t code); | 644 void tltu(Register rs, Register rt, uint16_t code); |
453 void teq(Register rs, Register rt, uint16_t code); | 645 void teq(Register rs, Register rt, uint16_t code); |
454 void tne(Register rs, Register rt, uint16_t code); | 646 void tne(Register rs, Register rt, uint16_t code); |
455 | 647 |
456 // Move from HI/LO register. | 648 // Move from HI/LO register. |
457 void mfhi(Register rd); | 649 void mfhi(Register rd); |
458 void mflo(Register rd); | 650 void mflo(Register rd); |
459 | 651 |
460 // Set on less than. | 652 // Set on less than. |
461 void slt(Register rd, Register rs, Register rt); | 653 void slt(Register rd, Register rs, Register rt); |
462 void sltu(Register rd, Register rs, Register rt); | 654 void sltu(Register rd, Register rs, Register rt); |
463 void slti(Register rd, Register rs, int32_t j); | 655 void slti(Register rd, Register rs, int32_t j); |
464 void sltiu(Register rd, Register rs, int32_t j); | 656 void sltiu(Register rd, Register rs, int32_t j); |
465 | 657 |
| 658 // Conditional move. |
| 659 void movz(Register rd, Register rs, Register rt); |
| 660 void movn(Register rd, Register rs, Register rt); |
| 661 void movt(Register rd, Register rs, uint16_t cc = 0); |
| 662 void movf(Register rd, Register rs, uint16_t cc = 0); |
| 663 |
| 664 // Bit twiddling. |
| 665 void clz(Register rd, Register rs); |
| 666 void ins_(Register rt, Register rs, uint16_t pos, uint16_t size); |
| 667 void ext_(Register rt, Register rs, uint16_t pos, uint16_t size); |
466 | 668 |
467 //--------Coprocessor-instructions---------------- | 669 //--------Coprocessor-instructions---------------- |
468 | 670 |
469 // Load, store, and move. | 671 // Load, store, and move. |
470 void lwc1(FPURegister fd, const MemOperand& src); | 672 void lwc1(FPURegister fd, const MemOperand& src); |
471 void ldc1(FPURegister fd, const MemOperand& src); | 673 void ldc1(FPURegister fd, const MemOperand& src); |
472 | 674 |
473 void swc1(FPURegister fs, const MemOperand& dst); | 675 void swc1(FPURegister fs, const MemOperand& dst); |
474 void sdc1(FPURegister fs, const MemOperand& dst); | 676 void sdc1(FPURegister fs, const MemOperand& dst); |
475 | 677 |
476 // When paired with MTC1 to write a value to a 64-bit FPR, the MTC1 must be | 678 void mtc1(Register rt, FPURegister fs); |
477 // executed first, followed by the MTHC1. | 679 void mfc1(Register rt, FPURegister fs); |
478 void mtc1(FPURegister fs, Register rt); | 680 |
479 void mthc1(FPURegister fs, Register rt); | 681 void ctc1(Register rt, FPUControlRegister fs); |
480 void mfc1(FPURegister fs, Register rt); | 682 void cfc1(Register rt, FPUControlRegister fs); |
481 void mfhc1(FPURegister fs, Register rt); | 683 |
| 684 // Arithmetic. |
| 685 void add_d(FPURegister fd, FPURegister fs, FPURegister ft); |
| 686 void sub_d(FPURegister fd, FPURegister fs, FPURegister ft); |
| 687 void mul_d(FPURegister fd, FPURegister fs, FPURegister ft); |
| 688 void div_d(FPURegister fd, FPURegister fs, FPURegister ft); |
| 689 void abs_d(FPURegister fd, FPURegister fs); |
| 690 void mov_d(FPURegister fd, FPURegister fs); |
| 691 void neg_d(FPURegister fd, FPURegister fs); |
| 692 void sqrt_d(FPURegister fd, FPURegister fs); |
482 | 693 |
483 // Conversion. | 694 // Conversion. |
484 void cvt_w_s(FPURegister fd, FPURegister fs); | 695 void cvt_w_s(FPURegister fd, FPURegister fs); |
485 void cvt_w_d(FPURegister fd, FPURegister fs); | 696 void cvt_w_d(FPURegister fd, FPURegister fs); |
| 697 void trunc_w_s(FPURegister fd, FPURegister fs); |
| 698 void trunc_w_d(FPURegister fd, FPURegister fs); |
| 699 void round_w_s(FPURegister fd, FPURegister fs); |
| 700 void round_w_d(FPURegister fd, FPURegister fs); |
| 701 void floor_w_s(FPURegister fd, FPURegister fs); |
| 702 void floor_w_d(FPURegister fd, FPURegister fs); |
| 703 void ceil_w_s(FPURegister fd, FPURegister fs); |
| 704 void ceil_w_d(FPURegister fd, FPURegister fs); |
486 | 705 |
487 void cvt_l_s(FPURegister fd, FPURegister fs); | 706 void cvt_l_s(FPURegister fd, FPURegister fs); |
488 void cvt_l_d(FPURegister fd, FPURegister fs); | 707 void cvt_l_d(FPURegister fd, FPURegister fs); |
| 708 void trunc_l_s(FPURegister fd, FPURegister fs); |
| 709 void trunc_l_d(FPURegister fd, FPURegister fs); |
| 710 void round_l_s(FPURegister fd, FPURegister fs); |
| 711 void round_l_d(FPURegister fd, FPURegister fs); |
| 712 void floor_l_s(FPURegister fd, FPURegister fs); |
| 713 void floor_l_d(FPURegister fd, FPURegister fs); |
| 714 void ceil_l_s(FPURegister fd, FPURegister fs); |
| 715 void ceil_l_d(FPURegister fd, FPURegister fs); |
489 | 716 |
490 void cvt_s_w(FPURegister fd, FPURegister fs); | 717 void cvt_s_w(FPURegister fd, FPURegister fs); |
491 void cvt_s_l(FPURegister fd, FPURegister fs); | 718 void cvt_s_l(FPURegister fd, FPURegister fs); |
492 void cvt_s_d(FPURegister fd, FPURegister fs); | 719 void cvt_s_d(FPURegister fd, FPURegister fs); |
493 | 720 |
494 void cvt_d_w(FPURegister fd, FPURegister fs); | 721 void cvt_d_w(FPURegister fd, FPURegister fs); |
495 void cvt_d_l(FPURegister fd, FPURegister fs); | 722 void cvt_d_l(FPURegister fd, FPURegister fs); |
496 void cvt_d_s(FPURegister fd, FPURegister fs); | 723 void cvt_d_s(FPURegister fd, FPURegister fs); |
497 | 724 |
498 // Conditions and branches. | 725 // Conditions and branches. |
499 void c(FPUCondition cond, SecondaryField fmt, | 726 void c(FPUCondition cond, SecondaryField fmt, |
500 FPURegister ft, FPURegister fs, uint16_t cc = 0); | 727 FPURegister ft, FPURegister fs, uint16_t cc = 0); |
501 | 728 |
502 void bc1f(int16_t offset, uint16_t cc = 0); | 729 void bc1f(int16_t offset, uint16_t cc = 0); |
503 void bc1f(Label* L, uint16_t cc = 0) { bc1f(branch_offset(L, false)>>2, cc); } | 730 void bc1f(Label* L, uint16_t cc = 0) { bc1f(branch_offset(L, false)>>2, cc); } |
504 void bc1t(int16_t offset, uint16_t cc = 0); | 731 void bc1t(int16_t offset, uint16_t cc = 0); |
505 void bc1t(Label* L, uint16_t cc = 0) { bc1t(branch_offset(L, false)>>2, cc); } | 732 void bc1t(Label* L, uint16_t cc = 0) { bc1t(branch_offset(L, false)>>2, cc); } |
506 | 733 void fcmp(FPURegister src1, const double src2, FPUCondition cond); |
507 | 734 |
508 // Check the code size generated from label to here. | 735 // Check the code size generated from label to here. |
509 int InstructionsGeneratedSince(Label* l) { | 736 int InstructionsGeneratedSince(Label* l) { |
510 return (pc_offset() - l->pos()) / kInstrSize; | 737 return (pc_offset() - l->pos()) / kInstrSize; |
511 } | 738 } |
512 | 739 |
| 740 // Class for scoping postponing the trampoline pool generation. |
| 741 class BlockTrampolinePoolScope { |
| 742 public: |
| 743 explicit BlockTrampolinePoolScope(Assembler* assem) : assem_(assem) { |
| 744 assem_->StartBlockTrampolinePool(); |
| 745 } |
| 746 ~BlockTrampolinePoolScope() { |
| 747 assem_->EndBlockTrampolinePool(); |
| 748 } |
| 749 |
| 750 private: |
| 751 Assembler* assem_; |
| 752 |
| 753 DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope); |
| 754 }; |
| 755 |
513 // Debugging. | 756 // Debugging. |
514 | 757 |
515 // Mark address of the ExitJSFrame code. | 758 // Mark address of the ExitJSFrame code. |
516 void RecordJSReturn(); | 759 void RecordJSReturn(); |
517 | 760 |
| 761 // Mark address of a debug break slot. |
| 762 void RecordDebugBreakSlot(); |
| 763 |
518 // Record a comment relocation entry that can be used by a disassembler. | 764 // Record a comment relocation entry that can be used by a disassembler. |
519 // Use --debug_code to enable. | 765 // Use --code-comments to enable. |
520 void RecordComment(const char* msg); | 766 void RecordComment(const char* msg); |
521 | 767 |
522 void RecordPosition(int pos); | 768 // Writes a single byte or word of data in the code stream. Used for |
523 void RecordStatementPosition(int pos); | 769 // inline tables, e.g., jump-tables. |
524 bool WriteRecordedPositions(); | 770 void db(uint8_t data); |
| 771 void dd(uint32_t data); |
525 | 772 |
526 int32_t pc_offset() const { return pc_ - buffer_; } | 773 int32_t pc_offset() const { return pc_ - buffer_; } |
527 int32_t current_position() const { return current_position_; } | 774 |
528 int32_t current_statement_position() const { | 775 PositionsRecorder* positions_recorder() { return &positions_recorder_; } |
529 return current_statement_position_; | 776 |
| 777 bool can_peephole_optimize(int instructions) { |
| 778 if (!allow_peephole_optimization_) return false; |
| 779 if (last_bound_pos_ > pc_offset() - instructions * kInstrSize) return false; |
| 780 return reloc_info_writer.last_pc() <= pc_ - instructions * kInstrSize; |
530 } | 781 } |
531 | 782 |
| 783 // Postpone the generation of the trampoline pool for the specified number of |
| 784 // instructions. |
| 785 void BlockTrampolinePoolFor(int instructions); |
| 786 |
532 // Check if there is less than kGap bytes available in the buffer. | 787 // Check if there is less than kGap bytes available in the buffer. |
533 // If this is the case, we need to grow the buffer before emitting | 788 // If this is the case, we need to grow the buffer before emitting |
534 // an instruction or relocation information. | 789 // an instruction or relocation information. |
535 inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; } | 790 inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; } |
536 | 791 |
537 // Get the number of bytes available in the buffer. | 792 // Get the number of bytes available in the buffer. |
538 inline int available_space() const { return reloc_info_writer.pos() - pc_; } | 793 inline int available_space() const { return reloc_info_writer.pos() - pc_; } |
539 | 794 |
540 protected: | |
541 int32_t buffer_space() const { return reloc_info_writer.pos() - pc_; } | |
542 | |
543 // Read/patch instructions. | 795 // Read/patch instructions. |
544 static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); } | 796 static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); } |
545 void instr_at_put(byte* pc, Instr instr) { | 797 static void instr_at_put(byte* pc, Instr instr) { |
546 *reinterpret_cast<Instr*>(pc) = instr; | 798 *reinterpret_cast<Instr*>(pc) = instr; |
547 } | 799 } |
548 Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); } | 800 Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); } |
549 void instr_at_put(int pos, Instr instr) { | 801 void instr_at_put(int pos, Instr instr) { |
550 *reinterpret_cast<Instr*>(buffer_ + pos) = instr; | 802 *reinterpret_cast<Instr*>(buffer_ + pos) = instr; |
551 } | 803 } |
552 | 804 |
553 // Check if an instruction is a branch of some kind. | 805 // Check if an instruction is a branch of some kind. |
554 bool is_branch(Instr instr); | 806 static bool IsBranch(Instr instr); |
| 807 |
| 808 static bool IsNop(Instr instr, unsigned int type); |
| 809 static bool IsPop(Instr instr); |
| 810 static bool IsPush(Instr instr); |
| 811 static bool IsLwRegFpOffset(Instr instr); |
| 812 static bool IsSwRegFpOffset(Instr instr); |
| 813 static bool IsLwRegFpNegOffset(Instr instr); |
| 814 static bool IsSwRegFpNegOffset(Instr instr); |
| 815 |
| 816 static Register GetRt(Instr instr); |
| 817 |
| 818 static int32_t GetBranchOffset(Instr instr); |
| 819 static bool IsLw(Instr instr); |
| 820 static int16_t GetLwOffset(Instr instr); |
| 821 static Instr SetLwOffset(Instr instr, int16_t offset); |
| 822 |
| 823 static bool IsSw(Instr instr); |
| 824 static Instr SetSwOffset(Instr instr, int16_t offset); |
| 825 static bool IsAddImmediate(Instr instr); |
| 826 static Instr SetAddImmediateOffset(Instr instr, int16_t offset); |
| 827 |
| 828 void CheckTrampolinePool(bool force_emit = false); |
| 829 |
| 830 protected: |
| 831 bool emit_debug_code() const { return emit_debug_code_; } |
| 832 |
| 833 int32_t buffer_space() const { return reloc_info_writer.pos() - pc_; } |
555 | 834 |
556 // Decode branch instruction at pos and return branch target pos. | 835 // Decode branch instruction at pos and return branch target pos. |
557 int target_at(int32_t pos); | 836 int target_at(int32_t pos); |
558 | 837 |
559 // Patch branch instruction at pos to branch to given branch target pos. | 838 // Patch branch instruction at pos to branch to given branch target pos. |
560 void target_at_put(int32_t pos, int32_t target_pos); | 839 void target_at_put(int32_t pos, int32_t target_pos); |
561 | 840 |
562 // Say if we need to relocate with this mode. | 841 // Say if we need to relocate with this mode. |
563 bool MustUseAt(RelocInfo::Mode rmode); | 842 bool MustUseReg(RelocInfo::Mode rmode); |
564 | 843 |
565 // Record reloc info for current pc_. | 844 // Record reloc info for current pc_. |
566 void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0); | 845 void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0); |
567 | 846 |
| 847 // Block the emission of the trampoline pool before pc_offset. |
| 848 void BlockTrampolinePoolBefore(int pc_offset) { |
| 849 if (no_trampoline_pool_before_ < pc_offset) |
| 850 no_trampoline_pool_before_ = pc_offset; |
| 851 } |
| 852 |
| 853 void StartBlockTrampolinePool() { |
| 854 trampoline_pool_blocked_nesting_++; |
| 855 } |
| 856 void EndBlockTrampolinePool() { |
| 857 trampoline_pool_blocked_nesting_--; |
| 858 } |
| 859 |
| 860 bool is_trampoline_pool_blocked() const { |
| 861 return trampoline_pool_blocked_nesting_ > 0; |
| 862 } |
| 863 |
568 private: | 864 private: |
569 // Code buffer: | 865 // Code buffer: |
570 // The buffer into which code and relocation info are generated. | 866 // The buffer into which code and relocation info are generated. |
571 byte* buffer_; | 867 byte* buffer_; |
572 int buffer_size_; | 868 int buffer_size_; |
573 // True if the assembler owns the buffer, false if buffer is external. | 869 // True if the assembler owns the buffer, false if buffer is external. |
574 bool own_buffer_; | 870 bool own_buffer_; |
575 | 871 |
576 // Buffer size and constant pool distance are checked together at regular | 872 // Buffer size and constant pool distance are checked together at regular |
577 // intervals of kBufferCheckInterval emitted bytes. | 873 // intervals of kBufferCheckInterval emitted bytes. |
578 static const int kBufferCheckInterval = 1*KB/2; | 874 static const int kBufferCheckInterval = 1*KB/2; |
579 | 875 |
580 // Code generation. | 876 // Code generation. |
581 // The relocation writer's position is at least kGap bytes below the end of | 877 // The relocation writer's position is at least kGap bytes below the end of |
582 // the generated instructions. This is so that multi-instruction sequences do | 878 // the generated instructions. This is so that multi-instruction sequences do |
583 // not have to check for overflow. The same is true for writes of large | 879 // not have to check for overflow. The same is true for writes of large |
584 // relocation info entries. | 880 // relocation info entries. |
585 static const int kGap = 32; | 881 static const int kGap = 32; |
586 byte* pc_; // The program counter - moves forward. | 882 byte* pc_; // The program counter - moves forward. |
587 | 883 |
| 884 |
| 885 // Repeated checking whether the trampoline pool should be emitted is rather |
| 886 // expensive. By default we only check again once a number of instructions |
| 887 // has been generated. |
| 888 static const int kCheckConstIntervalInst = 32; |
| 889 static const int kCheckConstInterval = kCheckConstIntervalInst * kInstrSize; |
| 890 |
| 891 int next_buffer_check_; // pc offset of next buffer check. |
| 892 |
| 893 // Emission of the trampoline pool may be blocked in some code sequences. |
| 894 int trampoline_pool_blocked_nesting_; // Block emission if this is not zero. |
| 895 int no_trampoline_pool_before_; // Block emission before this pc offset. |
| 896 |
| 897 // Keep track of the last emitted pool to guarantee a maximal distance. |
| 898 int last_trampoline_pool_end_; // pc offset of the end of the last pool. |
| 899 |
588 // Relocation information generation. | 900 // Relocation information generation. |
589 // Each relocation is encoded as a variable size value. | 901 // Each relocation is encoded as a variable size value. |
590 static const int kMaxRelocSize = RelocInfoWriter::kMaxSize; | 902 static const int kMaxRelocSize = RelocInfoWriter::kMaxSize; |
591 RelocInfoWriter reloc_info_writer; | 903 RelocInfoWriter reloc_info_writer; |
592 | 904 |
593 // The bound position, before this we cannot do instruction elimination. | 905 // The bound position, before this we cannot do instruction elimination. |
594 int last_bound_pos_; | 906 int last_bound_pos_; |
595 | 907 |
596 // Source position information. | |
597 int current_position_; | |
598 int current_statement_position_; | |
599 int written_position_; | |
600 int written_statement_position_; | |
601 | |
602 // Code emission. | 908 // Code emission. |
603 inline void CheckBuffer(); | 909 inline void CheckBuffer(); |
604 void GrowBuffer(); | 910 void GrowBuffer(); |
605 inline void emit(Instr x); | 911 inline void emit(Instr x); |
| 912 inline void CheckTrampolinePoolQuick(); |
606 | 913 |
607 // Instruction generation. | 914 // Instruction generation. |
608 // We have 3 different kind of encoding layout on MIPS. | 915 // We have 3 different kind of encoding layout on MIPS. |
609 // However due to many different types of objects encoded in the same fields | 916 // However due to many different types of objects encoded in the same fields |
610 // we have quite a few aliases for each mode. | 917 // we have quite a few aliases for each mode. |
611 // Using the same structure to refer to Register and FPURegister would spare a | 918 // Using the same structure to refer to Register and FPURegister would spare a |
612 // few aliases, but mixing both does not look clean to me. | 919 // few aliases, but mixing both does not look clean to me. |
613 // Anyway we could surely implement this differently. | 920 // Anyway we could surely implement this differently. |
614 | 921 |
615 void GenInstrRegister(Opcode opcode, | 922 void GenInstrRegister(Opcode opcode, |
616 Register rs, | 923 Register rs, |
617 Register rt, | 924 Register rt, |
618 Register rd, | 925 Register rd, |
619 uint16_t sa = 0, | 926 uint16_t sa = 0, |
620 SecondaryField func = NULLSF); | 927 SecondaryField func = NULLSF); |
621 | 928 |
622 void GenInstrRegister(Opcode opcode, | 929 void GenInstrRegister(Opcode opcode, |
| 930 Register rs, |
| 931 Register rt, |
| 932 uint16_t msb, |
| 933 uint16_t lsb, |
| 934 SecondaryField func); |
| 935 |
| 936 void GenInstrRegister(Opcode opcode, |
623 SecondaryField fmt, | 937 SecondaryField fmt, |
624 FPURegister ft, | 938 FPURegister ft, |
625 FPURegister fs, | 939 FPURegister fs, |
626 FPURegister fd, | 940 FPURegister fd, |
627 SecondaryField func = NULLSF); | 941 SecondaryField func = NULLSF); |
628 | 942 |
629 void GenInstrRegister(Opcode opcode, | 943 void GenInstrRegister(Opcode opcode, |
630 SecondaryField fmt, | 944 SecondaryField fmt, |
631 Register rt, | 945 Register rt, |
632 FPURegister fs, | 946 FPURegister fs, |
633 FPURegister fd, | 947 FPURegister fd, |
634 SecondaryField func = NULLSF); | 948 SecondaryField func = NULLSF); |
635 | 949 |
| 950 void GenInstrRegister(Opcode opcode, |
| 951 SecondaryField fmt, |
| 952 Register rt, |
| 953 FPUControlRegister fs, |
| 954 SecondaryField func = NULLSF); |
| 955 |
636 | 956 |
637 void GenInstrImmediate(Opcode opcode, | 957 void GenInstrImmediate(Opcode opcode, |
638 Register rs, | 958 Register rs, |
639 Register rt, | 959 Register rt, |
640 int32_t j); | 960 int32_t j); |
641 void GenInstrImmediate(Opcode opcode, | 961 void GenInstrImmediate(Opcode opcode, |
642 Register rs, | 962 Register rs, |
643 SecondaryField SF, | 963 SecondaryField SF, |
644 int32_t j); | 964 int32_t j); |
645 void GenInstrImmediate(Opcode opcode, | 965 void GenInstrImmediate(Opcode opcode, |
646 Register r1, | 966 Register r1, |
647 FPURegister r2, | 967 FPURegister r2, |
648 int32_t j); | 968 int32_t j); |
649 | 969 |
650 | 970 |
651 void GenInstrJump(Opcode opcode, | 971 void GenInstrJump(Opcode opcode, |
652 uint32_t address); | 972 uint32_t address); |
653 | 973 |
| 974 // Helpers. |
| 975 void LoadRegPlusOffsetToAt(const MemOperand& src); |
654 | 976 |
655 // Labels. | 977 // Labels. |
656 void print(Label* L); | 978 void print(Label* L); |
657 void bind_to(Label* L, int pos); | 979 void bind_to(Label* L, int pos); |
658 void link_to(Label* L, Label* appendix); | 980 void link_to(Label* L, Label* appendix); |
659 void next(Label* L); | 981 void next(Label* L); |
660 | 982 |
| 983 // One trampoline consists of: |
| 984 // - space for trampoline slots, |
| 985 // - space for labels. |
| 986 // |
| 987 // Space for trampoline slots is equal to slot_count * 2 * kInstrSize. |
| 988 // Space for trampoline slots preceeds space for labels. Each label is of one |
| 989 // instruction size, so total amount for labels is equal to |
| 990 // label_count * kInstrSize. |
| 991 class Trampoline { |
| 992 public: |
| 993 Trampoline(int start, int slot_count, int label_count) { |
| 994 start_ = start; |
| 995 next_slot_ = start; |
| 996 free_slot_count_ = slot_count; |
| 997 next_label_ = start + slot_count * 2 * kInstrSize; |
| 998 free_label_count_ = label_count; |
| 999 end_ = next_label_ + (label_count - 1) * kInstrSize; |
| 1000 } |
| 1001 int start() { |
| 1002 return start_; |
| 1003 } |
| 1004 int end() { |
| 1005 return end_; |
| 1006 } |
| 1007 int take_slot() { |
| 1008 int trampoline_slot = next_slot_; |
| 1009 ASSERT(free_slot_count_ > 0); |
| 1010 free_slot_count_--; |
| 1011 next_slot_ += 2 * kInstrSize; |
| 1012 return trampoline_slot; |
| 1013 } |
| 1014 int take_label() { |
| 1015 int label_pos = next_label_; |
| 1016 ASSERT(free_label_count_ > 0); |
| 1017 free_label_count_--; |
| 1018 next_label_ += kInstrSize; |
| 1019 return label_pos; |
| 1020 } |
| 1021 private: |
| 1022 int start_; |
| 1023 int end_; |
| 1024 int next_slot_; |
| 1025 int free_slot_count_; |
| 1026 int next_label_; |
| 1027 int free_label_count_; |
| 1028 }; |
| 1029 |
| 1030 int32_t get_label_entry(int32_t pos, bool next_pool = true); |
| 1031 int32_t get_trampoline_entry(int32_t pos, bool next_pool = true); |
| 1032 |
| 1033 static const int kSlotsPerTrampoline = 2304; |
| 1034 static const int kLabelsPerTrampoline = 8; |
| 1035 static const int kTrampolineInst = |
| 1036 2 * kSlotsPerTrampoline + kLabelsPerTrampoline; |
| 1037 static const int kTrampolineSize = kTrampolineInst * kInstrSize; |
| 1038 static const int kMaxBranchOffset = (1 << (18 - 1)) - 1; |
| 1039 static const int kMaxDistBetweenPools = |
| 1040 kMaxBranchOffset - 2 * kTrampolineSize; |
| 1041 |
| 1042 List<Trampoline> trampolines_; |
| 1043 |
661 friend class RegExpMacroAssemblerMIPS; | 1044 friend class RegExpMacroAssemblerMIPS; |
662 friend class RelocInfo; | 1045 friend class RelocInfo; |
| 1046 friend class CodePatcher; |
| 1047 friend class BlockTrampolinePoolScope; |
| 1048 |
| 1049 PositionsRecorder positions_recorder_; |
| 1050 bool allow_peephole_optimization_; |
| 1051 bool emit_debug_code_; |
| 1052 friend class PositionsRecorder; |
| 1053 friend class EnsureSpace; |
| 1054 }; |
| 1055 |
| 1056 |
| 1057 class EnsureSpace BASE_EMBEDDED { |
| 1058 public: |
| 1059 explicit EnsureSpace(Assembler* assembler) { |
| 1060 assembler->CheckBuffer(); |
| 1061 } |
663 }; | 1062 }; |
664 | 1063 |
665 } } // namespace v8::internal | 1064 } } // namespace v8::internal |
666 | 1065 |
667 #endif // V8_ARM_ASSEMBLER_MIPS_H_ | 1066 #endif // V8_ARM_ASSEMBLER_MIPS_H_ |
OLD | NEW |