OLD | NEW |
1 // Copyright (c) 1994-2006 Sun Microsystems Inc. | 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. |
2 // All Rights Reserved. | 2 // All Rights Reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // - Redistributions of source code must retain the above copyright notice, | 8 // - Redistributions of source code must retain the above copyright notice, |
9 // this list of conditions and the following disclaimer. | 9 // this list of conditions and the following disclaimer. |
10 // | 10 // |
(...skipping 22 matching lines...) Expand all Loading... |
33 // Copyright 2010 the V8 project authors. All rights reserved. | 33 // Copyright 2010 the V8 project authors. All rights reserved. |
34 | 34 |
35 | 35 |
36 #include "v8.h" | 36 #include "v8.h" |
37 | 37 |
38 #if defined(V8_TARGET_ARCH_MIPS) | 38 #if defined(V8_TARGET_ARCH_MIPS) |
39 | 39 |
40 #include "mips/assembler-mips-inl.h" | 40 #include "mips/assembler-mips-inl.h" |
41 #include "serialize.h" | 41 #include "serialize.h" |
42 | 42 |
43 | |
44 namespace v8 { | 43 namespace v8 { |
45 namespace internal { | 44 namespace internal { |
46 | 45 |
| 46 CpuFeatures::CpuFeatures() |
| 47 : supported_(0), |
| 48 enabled_(0), |
| 49 found_by_runtime_probing_(0) { |
| 50 } |
47 | 51 |
| 52 void CpuFeatures::Probe(bool portable) { |
| 53 // If the compiler is allowed to use fpu then we can use fpu too in our |
| 54 // code generation. |
| 55 #if !defined(__mips__) |
| 56 // For the simulator=mips build, use FPU when FLAG_enable_fpu is enabled. |
| 57 if (FLAG_enable_fpu) { |
| 58 supported_ |= 1u << FPU; |
| 59 } |
| 60 #else |
| 61 if (portable && Serializer::enabled()) { |
| 62 supported_ |= OS::CpuFeaturesImpliedByPlatform(); |
| 63 return; // No features if we might serialize. |
| 64 } |
48 | 65 |
49 const Register no_reg = { -1 }; | 66 if (OS::MipsCpuHasFeature(FPU)) { |
| 67 // This implementation also sets the FPU flags if |
| 68 // runtime detection of FPU returns true. |
| 69 supported_ |= 1u << FPU; |
| 70 found_by_runtime_probing_ |= 1u << FPU; |
| 71 } |
50 | 72 |
51 const Register zero_reg = { 0 }; | 73 if (!portable) found_by_runtime_probing_ = 0; |
52 const Register at = { 1 }; | 74 #endif |
53 const Register v0 = { 2 }; | 75 } |
54 const Register v1 = { 3 }; | |
55 const Register a0 = { 4 }; | |
56 const Register a1 = { 5 }; | |
57 const Register a2 = { 6 }; | |
58 const Register a3 = { 7 }; | |
59 const Register t0 = { 8 }; | |
60 const Register t1 = { 9 }; | |
61 const Register t2 = { 10 }; | |
62 const Register t3 = { 11 }; | |
63 const Register t4 = { 12 }; | |
64 const Register t5 = { 13 }; | |
65 const Register t6 = { 14 }; | |
66 const Register t7 = { 15 }; | |
67 const Register s0 = { 16 }; | |
68 const Register s1 = { 17 }; | |
69 const Register s2 = { 18 }; | |
70 const Register s3 = { 19 }; | |
71 const Register s4 = { 20 }; | |
72 const Register s5 = { 21 }; | |
73 const Register s6 = { 22 }; | |
74 const Register s7 = { 23 }; | |
75 const Register t8 = { 24 }; | |
76 const Register t9 = { 25 }; | |
77 const Register k0 = { 26 }; | |
78 const Register k1 = { 27 }; | |
79 const Register gp = { 28 }; | |
80 const Register sp = { 29 }; | |
81 const Register s8_fp = { 30 }; | |
82 const Register ra = { 31 }; | |
83 | 76 |
84 | 77 |
85 const FPURegister no_creg = { -1 }; | |
86 | |
87 const FPURegister f0 = { 0 }; | |
88 const FPURegister f1 = { 1 }; | |
89 const FPURegister f2 = { 2 }; | |
90 const FPURegister f3 = { 3 }; | |
91 const FPURegister f4 = { 4 }; | |
92 const FPURegister f5 = { 5 }; | |
93 const FPURegister f6 = { 6 }; | |
94 const FPURegister f7 = { 7 }; | |
95 const FPURegister f8 = { 8 }; | |
96 const FPURegister f9 = { 9 }; | |
97 const FPURegister f10 = { 10 }; | |
98 const FPURegister f11 = { 11 }; | |
99 const FPURegister f12 = { 12 }; | |
100 const FPURegister f13 = { 13 }; | |
101 const FPURegister f14 = { 14 }; | |
102 const FPURegister f15 = { 15 }; | |
103 const FPURegister f16 = { 16 }; | |
104 const FPURegister f17 = { 17 }; | |
105 const FPURegister f18 = { 18 }; | |
106 const FPURegister f19 = { 19 }; | |
107 const FPURegister f20 = { 20 }; | |
108 const FPURegister f21 = { 21 }; | |
109 const FPURegister f22 = { 22 }; | |
110 const FPURegister f23 = { 23 }; | |
111 const FPURegister f24 = { 24 }; | |
112 const FPURegister f25 = { 25 }; | |
113 const FPURegister f26 = { 26 }; | |
114 const FPURegister f27 = { 27 }; | |
115 const FPURegister f28 = { 28 }; | |
116 const FPURegister f29 = { 29 }; | |
117 const FPURegister f30 = { 30 }; | |
118 const FPURegister f31 = { 31 }; | |
119 | |
120 int ToNumber(Register reg) { | 78 int ToNumber(Register reg) { |
121 ASSERT(reg.is_valid()); | 79 ASSERT(reg.is_valid()); |
122 const int kNumbers[] = { | 80 const int kNumbers[] = { |
123 0, // zero_reg | 81 0, // zero_reg |
124 1, // at | 82 1, // at |
125 2, // v0 | 83 2, // v0 |
126 3, // v1 | 84 3, // v1 |
127 4, // a0 | 85 4, // a0 |
128 5, // a1 | 86 5, // a1 |
129 6, // a2 | 87 6, // a2 |
(...skipping 19 matching lines...) Expand all Loading... |
149 26, // k0 | 107 26, // k0 |
150 27, // k1 | 108 27, // k1 |
151 28, // gp | 109 28, // gp |
152 29, // sp | 110 29, // sp |
153 30, // s8_fp | 111 30, // s8_fp |
154 31, // ra | 112 31, // ra |
155 }; | 113 }; |
156 return kNumbers[reg.code()]; | 114 return kNumbers[reg.code()]; |
157 } | 115 } |
158 | 116 |
| 117 |
159 Register ToRegister(int num) { | 118 Register ToRegister(int num) { |
160 ASSERT(num >= 0 && num < kNumRegisters); | 119 ASSERT(num >= 0 && num < kNumRegisters); |
161 const Register kRegisters[] = { | 120 const Register kRegisters[] = { |
162 zero_reg, | 121 zero_reg, |
163 at, | 122 at, |
164 v0, v1, | 123 v0, v1, |
165 a0, a1, a2, a3, | 124 a0, a1, a2, a3, |
166 t0, t1, t2, t3, t4, t5, t6, t7, | 125 t0, t1, t2, t3, t4, t5, t6, t7, |
167 s0, s1, s2, s3, s4, s5, s6, s7, | 126 s0, s1, s2, s3, s4, s5, s6, s7, |
168 t8, t9, | 127 t8, t9, |
169 k0, k1, | 128 k0, k1, |
170 gp, | 129 gp, |
171 sp, | 130 sp, |
172 s8_fp, | 131 s8_fp, |
173 ra | 132 ra |
174 }; | 133 }; |
175 return kRegisters[num]; | 134 return kRegisters[num]; |
176 } | 135 } |
177 | 136 |
178 | 137 |
179 // ----------------------------------------------------------------------------- | 138 // ----------------------------------------------------------------------------- |
180 // Implementation of RelocInfo. | 139 // Implementation of RelocInfo. |
181 | 140 |
182 const int RelocInfo::kApplyMask = 0; | 141 const int RelocInfo::kApplyMask = 0; |
183 | 142 |
| 143 |
| 144 bool RelocInfo::IsCodedSpecially() { |
| 145 // The deserializer needs to know whether a pointer is specially coded. Being |
| 146 // specially coded on MIPS means that it is a lui/ori instruction, and that is |
| 147 // always the case inside code objects. |
| 148 return true; |
| 149 } |
| 150 |
| 151 |
184 // Patch the code at the current address with the supplied instructions. | 152 // Patch the code at the current address with the supplied instructions. |
185 void RelocInfo::PatchCode(byte* instructions, int instruction_count) { | 153 void RelocInfo::PatchCode(byte* instructions, int instruction_count) { |
186 Instr* pc = reinterpret_cast<Instr*>(pc_); | 154 Instr* pc = reinterpret_cast<Instr*>(pc_); |
187 Instr* instr = reinterpret_cast<Instr*>(instructions); | 155 Instr* instr = reinterpret_cast<Instr*>(instructions); |
188 for (int i = 0; i < instruction_count; i++) { | 156 for (int i = 0; i < instruction_count; i++) { |
189 *(pc + i) = *(instr + i); | 157 *(pc + i) = *(instr + i); |
190 } | 158 } |
191 | 159 |
192 // Indicate that code has changed. | 160 // Indicate that code has changed. |
193 CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize); | 161 CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize); |
194 } | 162 } |
195 | 163 |
196 | 164 |
197 // Patch the code at the current PC with a call to the target address. | 165 // Patch the code at the current PC with a call to the target address. |
198 // Additional guard instructions can be added if required. | 166 // Additional guard instructions can be added if required. |
199 void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) { | 167 void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) { |
200 // Patch the code at the current address with a call to the target. | 168 // Patch the code at the current address with a call to the target. |
201 UNIMPLEMENTED_MIPS(); | 169 UNIMPLEMENTED_MIPS(); |
202 } | 170 } |
203 | 171 |
204 | 172 |
205 // ----------------------------------------------------------------------------- | 173 // ----------------------------------------------------------------------------- |
206 // Implementation of Operand and MemOperand. | 174 // Implementation of Operand and MemOperand. |
207 // See assembler-mips-inl.h for inlined constructors. | 175 // See assembler-mips-inl.h for inlined constructors. |
208 | 176 |
209 Operand::Operand(Handle<Object> handle) { | 177 Operand::Operand(Handle<Object> handle) { |
210 rm_ = no_reg; | 178 rm_ = no_reg; |
211 // Verify all Objects referred by code are NOT in new space. | 179 // Verify all Objects referred by code are NOT in new space. |
212 Object* obj = *handle; | 180 Object* obj = *handle; |
213 ASSERT(!Heap::InNewSpace(obj)); | 181 ASSERT(!HEAP->InNewSpace(obj)); |
214 if (obj->IsHeapObject()) { | 182 if (obj->IsHeapObject()) { |
215 imm32_ = reinterpret_cast<intptr_t>(handle.location()); | 183 imm32_ = reinterpret_cast<intptr_t>(handle.location()); |
216 rmode_ = RelocInfo::EMBEDDED_OBJECT; | 184 rmode_ = RelocInfo::EMBEDDED_OBJECT; |
217 } else { | 185 } else { |
218 // No relocation needed. | 186 // No relocation needed. |
219 imm32_ = reinterpret_cast<intptr_t>(obj); | 187 imm32_ = reinterpret_cast<intptr_t>(obj); |
220 rmode_ = RelocInfo::NONE; | 188 rmode_ = RelocInfo::NONE; |
221 } | 189 } |
222 } | 190 } |
223 | 191 |
224 MemOperand::MemOperand(Register rm, int16_t offset) : Operand(rm) { | 192 |
| 193 MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) { |
225 offset_ = offset; | 194 offset_ = offset; |
226 } | 195 } |
227 | 196 |
228 | 197 |
229 // ----------------------------------------------------------------------------- | 198 // ----------------------------------------------------------------------------- |
230 // Implementation of Assembler. | 199 // Specific instructions, constants, and masks. |
231 | 200 |
232 static const int kMinimalBufferSize = 4*KB; | 201 static const int kNegOffset = 0x00008000; |
233 static byte* spare_buffer_ = NULL; | 202 // addiu(sp, sp, 4) aka Pop() operation or part of Pop(r) |
| 203 // operations as post-increment of sp. |
| 204 const Instr kPopInstruction = ADDIU | (sp.code() << kRsShift) |
| 205 | (sp.code() << kRtShift) | (kPointerSize & kImm16Mask); |
| 206 // addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp. |
| 207 const Instr kPushInstruction = ADDIU | (sp.code() << kRsShift) |
| 208 | (sp.code() << kRtShift) | (-kPointerSize & kImm16Mask); |
| 209 // sw(r, MemOperand(sp, 0)) |
| 210 const Instr kPushRegPattern = SW | (sp.code() << kRsShift) |
| 211 | (0 & kImm16Mask); |
| 212 // lw(r, MemOperand(sp, 0)) |
| 213 const Instr kPopRegPattern = LW | (sp.code() << kRsShift) |
| 214 | (0 & kImm16Mask); |
234 | 215 |
235 Assembler::Assembler(void* buffer, int buffer_size) { | 216 const Instr kLwRegFpOffsetPattern = LW | (s8_fp.code() << kRsShift) |
| 217 | (0 & kImm16Mask); |
| 218 |
| 219 const Instr kSwRegFpOffsetPattern = SW | (s8_fp.code() << kRsShift) |
| 220 | (0 & kImm16Mask); |
| 221 |
| 222 const Instr kLwRegFpNegOffsetPattern = LW | (s8_fp.code() << kRsShift) |
| 223 | (kNegOffset & kImm16Mask); |
| 224 |
| 225 const Instr kSwRegFpNegOffsetPattern = SW | (s8_fp.code() << kRsShift) |
| 226 | (kNegOffset & kImm16Mask); |
| 227 // A mask for the Rt register for push, pop, lw, sw instructions. |
| 228 const Instr kRtMask = kRtFieldMask; |
| 229 const Instr kLwSwInstrTypeMask = 0xffe00000; |
| 230 const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask; |
| 231 const Instr kLwSwOffsetMask = kImm16Mask; |
| 232 |
| 233 |
| 234 // Spare buffer. |
| 235 static const int kMinimalBufferSize = 4 * KB; |
| 236 |
| 237 |
| 238 Assembler::Assembler(void* buffer, int buffer_size) |
| 239 : AssemblerBase(Isolate::Current()), |
| 240 positions_recorder_(this), |
| 241 allow_peephole_optimization_(false) { |
| 242 // BUG(3245989): disable peephole optimization if crankshaft is enabled. |
| 243 allow_peephole_optimization_ = FLAG_peephole_optimization; |
236 if (buffer == NULL) { | 244 if (buffer == NULL) { |
237 // Do our own buffer management. | 245 // Do our own buffer management. |
238 if (buffer_size <= kMinimalBufferSize) { | 246 if (buffer_size <= kMinimalBufferSize) { |
239 buffer_size = kMinimalBufferSize; | 247 buffer_size = kMinimalBufferSize; |
240 | 248 |
241 if (spare_buffer_ != NULL) { | 249 if (isolate()->assembler_spare_buffer() != NULL) { |
242 buffer = spare_buffer_; | 250 buffer = isolate()->assembler_spare_buffer(); |
243 spare_buffer_ = NULL; | 251 isolate()->set_assembler_spare_buffer(NULL); |
244 } | 252 } |
245 } | 253 } |
246 if (buffer == NULL) { | 254 if (buffer == NULL) { |
247 buffer_ = NewArray<byte>(buffer_size); | 255 buffer_ = NewArray<byte>(buffer_size); |
248 } else { | 256 } else { |
249 buffer_ = static_cast<byte*>(buffer); | 257 buffer_ = static_cast<byte*>(buffer); |
250 } | 258 } |
251 buffer_size_ = buffer_size; | 259 buffer_size_ = buffer_size; |
252 own_buffer_ = true; | 260 own_buffer_ = true; |
253 | 261 |
254 } else { | 262 } else { |
255 // Use externally provided buffer instead. | 263 // Use externally provided buffer instead. |
256 ASSERT(buffer_size > 0); | 264 ASSERT(buffer_size > 0); |
257 buffer_ = static_cast<byte*>(buffer); | 265 buffer_ = static_cast<byte*>(buffer); |
258 buffer_size_ = buffer_size; | 266 buffer_size_ = buffer_size; |
259 own_buffer_ = false; | 267 own_buffer_ = false; |
260 } | 268 } |
261 | 269 |
262 // Setup buffer pointers. | 270 // Setup buffer pointers. |
263 ASSERT(buffer_ != NULL); | 271 ASSERT(buffer_ != NULL); |
264 pc_ = buffer_; | 272 pc_ = buffer_; |
265 reloc_info_writer.Reposition(buffer_ + buffer_size, pc_); | 273 reloc_info_writer.Reposition(buffer_ + buffer_size, pc_); |
266 current_statement_position_ = RelocInfo::kNoPosition; | 274 |
267 current_position_ = RelocInfo::kNoPosition; | 275 last_trampoline_pool_end_ = 0; |
268 written_statement_position_ = current_statement_position_; | 276 no_trampoline_pool_before_ = 0; |
269 written_position_ = current_position_; | 277 trampoline_pool_blocked_nesting_ = 0; |
| 278 next_buffer_check_ = kMaxBranchOffset - kTrampolineSize; |
270 } | 279 } |
271 | 280 |
272 | 281 |
273 Assembler::~Assembler() { | 282 Assembler::~Assembler() { |
274 if (own_buffer_) { | 283 if (own_buffer_) { |
275 if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) { | 284 if (isolate()->assembler_spare_buffer() == NULL && |
276 spare_buffer_ = buffer_; | 285 buffer_size_ == kMinimalBufferSize) { |
| 286 isolate()->set_assembler_spare_buffer(buffer_); |
277 } else { | 287 } else { |
278 DeleteArray(buffer_); | 288 DeleteArray(buffer_); |
279 } | 289 } |
280 } | 290 } |
281 } | 291 } |
282 | 292 |
283 | 293 |
284 void Assembler::GetCode(CodeDesc* desc) { | 294 void Assembler::GetCode(CodeDesc* desc) { |
285 ASSERT(pc_ <= reloc_info_writer.pos()); // no overlap | 295 ASSERT(pc_ <= reloc_info_writer.pos()); // No overlap. |
286 // Setup code descriptor. | 296 // Setup code descriptor. |
287 desc->buffer = buffer_; | 297 desc->buffer = buffer_; |
288 desc->buffer_size = buffer_size_; | 298 desc->buffer_size = buffer_size_; |
289 desc->instr_size = pc_offset(); | 299 desc->instr_size = pc_offset(); |
290 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos(); | 300 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos(); |
291 } | 301 } |
292 | 302 |
293 | 303 |
| 304 void Assembler::Align(int m) { |
| 305 ASSERT(m >= 4 && IsPowerOf2(m)); |
| 306 while ((pc_offset() & (m - 1)) != 0) { |
| 307 nop(); |
| 308 } |
| 309 } |
| 310 |
| 311 |
| 312 void Assembler::CodeTargetAlign() { |
| 313 // No advantage to aligning branch/call targets to more than |
| 314 // single instruction, that I am aware of. |
| 315 Align(4); |
| 316 } |
| 317 |
| 318 |
| 319 Register Assembler::GetRt(Instr instr) { |
| 320 Register rt; |
| 321 rt.code_ = (instr & kRtMask) >> kRtShift; |
| 322 return rt; |
| 323 } |
| 324 |
| 325 |
| 326 bool Assembler::IsPop(Instr instr) { |
| 327 return (instr & ~kRtMask) == kPopRegPattern; |
| 328 } |
| 329 |
| 330 |
| 331 bool Assembler::IsPush(Instr instr) { |
| 332 return (instr & ~kRtMask) == kPushRegPattern; |
| 333 } |
| 334 |
| 335 |
| 336 bool Assembler::IsSwRegFpOffset(Instr instr) { |
| 337 return ((instr & kLwSwInstrTypeMask) == kSwRegFpOffsetPattern); |
| 338 } |
| 339 |
| 340 |
| 341 bool Assembler::IsLwRegFpOffset(Instr instr) { |
| 342 return ((instr & kLwSwInstrTypeMask) == kLwRegFpOffsetPattern); |
| 343 } |
| 344 |
| 345 |
| 346 bool Assembler::IsSwRegFpNegOffset(Instr instr) { |
| 347 return ((instr & (kLwSwInstrTypeMask | kNegOffset)) == |
| 348 kSwRegFpNegOffsetPattern); |
| 349 } |
| 350 |
| 351 |
| 352 bool Assembler::IsLwRegFpNegOffset(Instr instr) { |
| 353 return ((instr & (kLwSwInstrTypeMask | kNegOffset)) == |
| 354 kLwRegFpNegOffsetPattern); |
| 355 } |
| 356 |
| 357 |
294 // Labels refer to positions in the (to be) generated code. | 358 // Labels refer to positions in the (to be) generated code. |
295 // There are bound, linked, and unused labels. | 359 // There are bound, linked, and unused labels. |
296 // | 360 // |
297 // Bound labels refer to known positions in the already | 361 // Bound labels refer to known positions in the already |
298 // generated code. pos() is the position the label refers to. | 362 // generated code. pos() is the position the label refers to. |
299 // | 363 // |
300 // Linked labels refer to unknown positions in the code | 364 // Linked labels refer to unknown positions in the code |
301 // to be generated; pos() is the position of the last | 365 // to be generated; pos() is the position of the last |
302 // instruction using the label. | 366 // instruction using the label. |
303 | 367 |
| 368 // The link chain is terminated by a value in the instruction of -1, |
| 369 // which is an otherwise illegal value (branch -1 is inf loop). |
| 370 // The instruction 16-bit offset field addresses 32-bit words, but in |
| 371 // code is conv to an 18-bit value addressing bytes, hence the -4 value. |
304 | 372 |
305 // The link chain is terminated by a negative code position (must be aligned). | |
306 const int kEndOfChain = -4; | 373 const int kEndOfChain = -4; |
307 | 374 |
308 bool Assembler::is_branch(Instr instr) { | 375 |
| 376 bool Assembler::IsBranch(Instr instr) { |
309 uint32_t opcode = ((instr & kOpcodeMask)); | 377 uint32_t opcode = ((instr & kOpcodeMask)); |
310 uint32_t rt_field = ((instr & kRtFieldMask)); | 378 uint32_t rt_field = ((instr & kRtFieldMask)); |
311 uint32_t rs_field = ((instr & kRsFieldMask)); | 379 uint32_t rs_field = ((instr & kRsFieldMask)); |
| 380 uint32_t label_constant = (instr & ~kImm16Mask); |
312 // Checks if the instruction is a branch. | 381 // Checks if the instruction is a branch. |
313 return opcode == BEQ || | 382 return opcode == BEQ || |
314 opcode == BNE || | 383 opcode == BNE || |
315 opcode == BLEZ || | 384 opcode == BLEZ || |
316 opcode == BGTZ || | 385 opcode == BGTZ || |
317 opcode == BEQL || | 386 opcode == BEQL || |
318 opcode == BNEL || | 387 opcode == BNEL || |
319 opcode == BLEZL || | 388 opcode == BLEZL || |
320 opcode == BGTZL|| | 389 opcode == BGTZL|| |
321 (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ || | 390 (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ || |
322 rt_field == BLTZAL || rt_field == BGEZAL)) || | 391 rt_field == BLTZAL || rt_field == BGEZAL)) || |
323 (opcode == COP1 && rs_field == BC1); // Coprocessor branch. | 392 (opcode == COP1 && rs_field == BC1) || // Coprocessor branch. |
| 393 label_constant == 0; // Emitted label const in reg-exp engine. |
324 } | 394 } |
325 | 395 |
326 | 396 |
| 397 bool Assembler::IsNop(Instr instr, unsigned int type) { |
| 398 // See Assembler::nop(type). |
| 399 ASSERT(type < 32); |
| 400 uint32_t opcode = ((instr & kOpcodeMask)); |
| 401 uint32_t rt = ((instr & kRtFieldMask) >> kRtShift); |
| 402 uint32_t rs = ((instr & kRsFieldMask) >> kRsShift); |
| 403 uint32_t sa = ((instr & kSaFieldMask) >> kSaShift); |
| 404 |
| 405 // nop(type) == sll(zero_reg, zero_reg, type); |
| 406 // Technically all these values will be 0 but |
| 407 // this makes more sense to the reader. |
| 408 |
| 409 bool ret = (opcode == SLL && |
| 410 rt == static_cast<uint32_t>(ToNumber(zero_reg)) && |
| 411 rs == static_cast<uint32_t>(ToNumber(zero_reg)) && |
| 412 sa == type); |
| 413 |
| 414 return ret; |
| 415 } |
| 416 |
| 417 |
| 418 int32_t Assembler::GetBranchOffset(Instr instr) { |
| 419 ASSERT(IsBranch(instr)); |
| 420 return ((int16_t)(instr & kImm16Mask)) << 2; |
| 421 } |
| 422 |
| 423 |
| 424 bool Assembler::IsLw(Instr instr) { |
| 425 return ((instr & kOpcodeMask) == LW); |
| 426 } |
| 427 |
| 428 |
| 429 int16_t Assembler::GetLwOffset(Instr instr) { |
| 430 ASSERT(IsLw(instr)); |
| 431 return ((instr & kImm16Mask)); |
| 432 } |
| 433 |
| 434 |
| 435 Instr Assembler::SetLwOffset(Instr instr, int16_t offset) { |
| 436 ASSERT(IsLw(instr)); |
| 437 |
| 438 // We actually create a new lw instruction based on the original one. |
| 439 Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask) |
| 440 | (offset & kImm16Mask); |
| 441 |
| 442 return temp_instr; |
| 443 } |
| 444 |
| 445 |
| 446 bool Assembler::IsSw(Instr instr) { |
| 447 return ((instr & kOpcodeMask) == SW); |
| 448 } |
| 449 |
| 450 |
| 451 Instr Assembler::SetSwOffset(Instr instr, int16_t offset) { |
| 452 ASSERT(IsSw(instr)); |
| 453 return ((instr & ~kImm16Mask) | (offset & kImm16Mask)); |
| 454 } |
| 455 |
| 456 |
| 457 bool Assembler::IsAddImmediate(Instr instr) { |
| 458 return ((instr & kOpcodeMask) == ADDIU); |
| 459 } |
| 460 |
| 461 |
| 462 Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) { |
| 463 ASSERT(IsAddImmediate(instr)); |
| 464 return ((instr & ~kImm16Mask) | (offset & kImm16Mask)); |
| 465 } |
| 466 |
| 467 |
327 int Assembler::target_at(int32_t pos) { | 468 int Assembler::target_at(int32_t pos) { |
328 Instr instr = instr_at(pos); | 469 Instr instr = instr_at(pos); |
329 if ((instr & ~kImm16Mask) == 0) { | 470 if ((instr & ~kImm16Mask) == 0) { |
330 // Emitted label constant, not part of a branch. | 471 // Emitted label constant, not part of a branch. |
331 return instr - (Code::kHeaderSize - kHeapObjectTag); | 472 if (instr == 0) { |
| 473 return kEndOfChain; |
| 474 } else { |
| 475 int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14; |
| 476 return (imm18 + pos); |
| 477 } |
332 } | 478 } |
333 // Check we have a branch instruction. | 479 // Check we have a branch instruction. |
334 ASSERT(is_branch(instr)); | 480 ASSERT(IsBranch(instr)); |
335 // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming | 481 // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming |
336 // the compiler uses arithmectic shifts for signed integers. | 482 // the compiler uses arithmectic shifts for signed integers. |
337 int32_t imm18 = ((instr & | 483 int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14; |
338 static_cast<int32_t>(kImm16Mask)) << 16) >> 14; | |
339 | 484 |
340 return pos + kBranchPCOffset + imm18; | 485 if (imm18 == kEndOfChain) { |
| 486 // EndOfChain sentinel is returned directly, not relative to pc or pos. |
| 487 return kEndOfChain; |
| 488 } else { |
| 489 return pos + kBranchPCOffset + imm18; |
| 490 } |
341 } | 491 } |
342 | 492 |
343 | 493 |
344 void Assembler::target_at_put(int32_t pos, int32_t target_pos) { | 494 void Assembler::target_at_put(int32_t pos, int32_t target_pos) { |
345 Instr instr = instr_at(pos); | 495 Instr instr = instr_at(pos); |
346 if ((instr & ~kImm16Mask) == 0) { | 496 if ((instr & ~kImm16Mask) == 0) { |
347 ASSERT(target_pos == kEndOfChain || target_pos >= 0); | 497 ASSERT(target_pos == kEndOfChain || target_pos >= 0); |
348 // Emitted label constant, not part of a branch. | 498 // Emitted label constant, not part of a branch. |
349 // Make label relative to Code* of generated Code object. | 499 // Make label relative to Code* of generated Code object. |
350 instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag)); | 500 instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag)); |
351 return; | 501 return; |
352 } | 502 } |
353 | 503 |
354 ASSERT(is_branch(instr)); | 504 ASSERT(IsBranch(instr)); |
355 int32_t imm18 = target_pos - (pos + kBranchPCOffset); | 505 int32_t imm18 = target_pos - (pos + kBranchPCOffset); |
356 ASSERT((imm18 & 3) == 0); | 506 ASSERT((imm18 & 3) == 0); |
357 | 507 |
358 instr &= ~kImm16Mask; | 508 instr &= ~kImm16Mask; |
359 int32_t imm16 = imm18 >> 2; | 509 int32_t imm16 = imm18 >> 2; |
360 ASSERT(is_int16(imm16)); | 510 ASSERT(is_int16(imm16)); |
361 | 511 |
362 instr_at_put(pos, instr | (imm16 & kImm16Mask)); | 512 instr_at_put(pos, instr | (imm16 & kImm16Mask)); |
363 } | 513 } |
364 | 514 |
(...skipping 16 matching lines...) Expand all Loading... |
381 } | 531 } |
382 next(&l); | 532 next(&l); |
383 } | 533 } |
384 } else { | 534 } else { |
385 PrintF("label in inconsistent state (pos = %d)\n", L->pos_); | 535 PrintF("label in inconsistent state (pos = %d)\n", L->pos_); |
386 } | 536 } |
387 } | 537 } |
388 | 538 |
389 | 539 |
390 void Assembler::bind_to(Label* L, int pos) { | 540 void Assembler::bind_to(Label* L, int pos) { |
391 ASSERT(0 <= pos && pos <= pc_offset()); // must have a valid binding position | 541 ASSERT(0 <= pos && pos <= pc_offset()); // Must have valid binding position. |
392 while (L->is_linked()) { | 542 while (L->is_linked()) { |
393 int32_t fixup_pos = L->pos(); | 543 int32_t fixup_pos = L->pos(); |
394 next(L); // call next before overwriting link with target at fixup_pos | 544 int32_t dist = pos - fixup_pos; |
| 545 next(L); // Call next before overwriting link with target at fixup_pos. |
| 546 if (dist > kMaxBranchOffset) { |
| 547 do { |
| 548 int32_t trampoline_pos = get_trampoline_entry(fixup_pos); |
| 549 ASSERT((trampoline_pos - fixup_pos) <= kMaxBranchOffset); |
| 550 target_at_put(fixup_pos, trampoline_pos); |
| 551 fixup_pos = trampoline_pos; |
| 552 dist = pos - fixup_pos; |
| 553 } while (dist > kMaxBranchOffset); |
| 554 } else if (dist < -kMaxBranchOffset) { |
| 555 do { |
| 556 int32_t trampoline_pos = get_trampoline_entry(fixup_pos, false); |
| 557 ASSERT((trampoline_pos - fixup_pos) >= -kMaxBranchOffset); |
| 558 target_at_put(fixup_pos, trampoline_pos); |
| 559 fixup_pos = trampoline_pos; |
| 560 dist = pos - fixup_pos; |
| 561 } while (dist < -kMaxBranchOffset); |
| 562 }; |
395 target_at_put(fixup_pos, pos); | 563 target_at_put(fixup_pos, pos); |
396 } | 564 } |
397 L->bind_to(pos); | 565 L->bind_to(pos); |
398 | 566 |
399 // Keep track of the last bound label so we don't eliminate any instructions | 567 // Keep track of the last bound label so we don't eliminate any instructions |
400 // before a bound label. | 568 // before a bound label. |
401 if (pos > last_bound_pos_) | 569 if (pos > last_bound_pos_) |
402 last_bound_pos_ = pos; | 570 last_bound_pos_ = pos; |
403 } | 571 } |
404 | 572 |
405 | 573 |
406 void Assembler::link_to(Label* L, Label* appendix) { | 574 void Assembler::link_to(Label* L, Label* appendix) { |
407 if (appendix->is_linked()) { | 575 if (appendix->is_linked()) { |
408 if (L->is_linked()) { | 576 if (L->is_linked()) { |
409 // Append appendix to L's list. | 577 // Append appendix to L's list. |
410 int fixup_pos; | 578 int fixup_pos; |
411 int link = L->pos(); | 579 int link = L->pos(); |
412 do { | 580 do { |
413 fixup_pos = link; | 581 fixup_pos = link; |
414 link = target_at(fixup_pos); | 582 link = target_at(fixup_pos); |
415 } while (link > 0); | 583 } while (link > 0); |
416 ASSERT(link == kEndOfChain); | 584 ASSERT(link == kEndOfChain); |
417 target_at_put(fixup_pos, appendix->pos()); | 585 target_at_put(fixup_pos, appendix->pos()); |
418 } else { | 586 } else { |
419 // L is empty, simply use appendix | 587 // L is empty, simply use appendix. |
420 *L = *appendix; | 588 *L = *appendix; |
421 } | 589 } |
422 } | 590 } |
423 appendix->Unuse(); // appendix should not be used anymore | 591 appendix->Unuse(); // Appendix should not be used anymore. |
424 } | 592 } |
425 | 593 |
426 | 594 |
427 void Assembler::bind(Label* L) { | 595 void Assembler::bind(Label* L) { |
428 ASSERT(!L->is_bound()); // label can only be bound once | 596 ASSERT(!L->is_bound()); // Label can only be bound once. |
429 bind_to(L, pc_offset()); | 597 bind_to(L, pc_offset()); |
430 } | 598 } |
431 | 599 |
432 | 600 |
433 void Assembler::next(Label* L) { | 601 void Assembler::next(Label* L) { |
434 ASSERT(L->is_linked()); | 602 ASSERT(L->is_linked()); |
435 int link = target_at(L->pos()); | 603 int link = target_at(L->pos()); |
436 if (link > 0) { | 604 ASSERT(link > 0 || link == kEndOfChain); |
| 605 if (link == kEndOfChain) { |
| 606 L->Unuse(); |
| 607 } else if (link > 0) { |
437 L->link_to(link); | 608 L->link_to(link); |
438 } else { | |
439 ASSERT(link == kEndOfChain); | |
440 L->Unuse(); | |
441 } | 609 } |
442 } | 610 } |
443 | 611 |
444 | 612 |
445 // We have to use a temporary register for things that can be relocated even | 613 // We have to use a temporary register for things that can be relocated even |
446 // if they can be encoded in the MIPS's 16 bits of immediate-offset instruction | 614 // if they can be encoded in the MIPS's 16 bits of immediate-offset instruction |
447 // space. There is no guarantee that the relocated location can be similarly | 615 // space. There is no guarantee that the relocated location can be similarly |
448 // encoded. | 616 // encoded. |
449 bool Assembler::MustUseAt(RelocInfo::Mode rmode) { | 617 bool Assembler::MustUseReg(RelocInfo::Mode rmode) { |
450 if (rmode == RelocInfo::EXTERNAL_REFERENCE) { | 618 return rmode != RelocInfo::NONE; |
451 return Serializer::enabled(); | |
452 } else if (rmode == RelocInfo::NONE) { | |
453 return false; | |
454 } | |
455 return true; | |
456 } | 619 } |
457 | 620 |
458 | 621 |
459 void Assembler::GenInstrRegister(Opcode opcode, | 622 void Assembler::GenInstrRegister(Opcode opcode, |
460 Register rs, | 623 Register rs, |
461 Register rt, | 624 Register rt, |
462 Register rd, | 625 Register rd, |
463 uint16_t sa, | 626 uint16_t sa, |
464 SecondaryField func) { | 627 SecondaryField func) { |
465 ASSERT(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa)); | 628 ASSERT(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa)); |
466 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift) | 629 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
467 | (rd.code() << kRdShift) | (sa << kSaShift) | func; | 630 | (rd.code() << kRdShift) | (sa << kSaShift) | func; |
468 emit(instr); | 631 emit(instr); |
469 } | 632 } |
470 | 633 |
471 | 634 |
472 void Assembler::GenInstrRegister(Opcode opcode, | 635 void Assembler::GenInstrRegister(Opcode opcode, |
| 636 Register rs, |
| 637 Register rt, |
| 638 uint16_t msb, |
| 639 uint16_t lsb, |
| 640 SecondaryField func) { |
| 641 ASSERT(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb)); |
| 642 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
| 643 | (msb << kRdShift) | (lsb << kSaShift) | func; |
| 644 emit(instr); |
| 645 } |
| 646 |
| 647 |
| 648 void Assembler::GenInstrRegister(Opcode opcode, |
473 SecondaryField fmt, | 649 SecondaryField fmt, |
474 FPURegister ft, | 650 FPURegister ft, |
475 FPURegister fs, | 651 FPURegister fs, |
476 FPURegister fd, | 652 FPURegister fd, |
477 SecondaryField func) { | 653 SecondaryField func) { |
478 ASSERT(fd.is_valid() && fs.is_valid() && ft.is_valid()); | 654 ASSERT(fd.is_valid() && fs.is_valid() && ft.is_valid()); |
479 Instr instr = opcode | fmt | (ft.code() << 16) | (fs.code() << kFsShift) | 655 ASSERT(isolate()->cpu_features()->IsEnabled(FPU)); |
480 | (fd.code() << 6) | func; | 656 Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift) |
| 657 | (fd.code() << kFdShift) | func; |
481 emit(instr); | 658 emit(instr); |
482 } | 659 } |
483 | 660 |
484 | 661 |
485 void Assembler::GenInstrRegister(Opcode opcode, | 662 void Assembler::GenInstrRegister(Opcode opcode, |
486 SecondaryField fmt, | 663 SecondaryField fmt, |
487 Register rt, | 664 Register rt, |
488 FPURegister fs, | 665 FPURegister fs, |
489 FPURegister fd, | 666 FPURegister fd, |
490 SecondaryField func) { | 667 SecondaryField func) { |
491 ASSERT(fd.is_valid() && fs.is_valid() && rt.is_valid()); | 668 ASSERT(fd.is_valid() && fs.is_valid() && rt.is_valid()); |
| 669 ASSERT(isolate()->cpu_features()->IsEnabled(FPU)); |
492 Instr instr = opcode | fmt | (rt.code() << kRtShift) | 670 Instr instr = opcode | fmt | (rt.code() << kRtShift) |
493 | (fs.code() << kFsShift) | (fd.code() << 6) | func; | 671 | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func; |
494 emit(instr); | 672 emit(instr); |
495 } | 673 } |
496 | 674 |
| 675 |
| 676 void Assembler::GenInstrRegister(Opcode opcode, |
| 677 SecondaryField fmt, |
| 678 Register rt, |
| 679 FPUControlRegister fs, |
| 680 SecondaryField func) { |
| 681 ASSERT(fs.is_valid() && rt.is_valid()); |
| 682 ASSERT(isolate()->cpu_features()->IsEnabled(FPU)); |
| 683 Instr instr = |
| 684 opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func; |
| 685 emit(instr); |
| 686 } |
| 687 |
497 | 688 |
498 // Instructions with immediate value. | 689 // Instructions with immediate value. |
499 // Registers are in the order of the instruction encoding, from left to right. | 690 // Registers are in the order of the instruction encoding, from left to right. |
500 void Assembler::GenInstrImmediate(Opcode opcode, | 691 void Assembler::GenInstrImmediate(Opcode opcode, |
501 Register rs, | 692 Register rs, |
502 Register rt, | 693 Register rt, |
503 int32_t j) { | 694 int32_t j) { |
504 ASSERT(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j))); | 695 ASSERT(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j))); |
505 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift) | 696 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
506 | (j & kImm16Mask); | 697 | (j & kImm16Mask); |
507 emit(instr); | 698 emit(instr); |
508 } | 699 } |
509 | 700 |
510 | 701 |
511 void Assembler::GenInstrImmediate(Opcode opcode, | 702 void Assembler::GenInstrImmediate(Opcode opcode, |
512 Register rs, | 703 Register rs, |
513 SecondaryField SF, | 704 SecondaryField SF, |
514 int32_t j) { | 705 int32_t j) { |
515 ASSERT(rs.is_valid() && (is_int16(j) || is_uint16(j))); | 706 ASSERT(rs.is_valid() && (is_int16(j) || is_uint16(j))); |
516 Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask); | 707 Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask); |
517 emit(instr); | 708 emit(instr); |
518 } | 709 } |
519 | 710 |
520 | 711 |
521 void Assembler::GenInstrImmediate(Opcode opcode, | 712 void Assembler::GenInstrImmediate(Opcode opcode, |
522 Register rs, | 713 Register rs, |
523 FPURegister ft, | 714 FPURegister ft, |
524 int32_t j) { | 715 int32_t j) { |
525 ASSERT(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j))); | 716 ASSERT(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j))); |
| 717 ASSERT(isolate()->cpu_features()->IsEnabled(FPU)); |
526 Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift) | 718 Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift) |
527 | (j & kImm16Mask); | 719 | (j & kImm16Mask); |
528 emit(instr); | 720 emit(instr); |
529 } | 721 } |
530 | 722 |
531 | 723 |
532 // Registers are in the order of the instruction encoding, from left to right. | 724 // Registers are in the order of the instruction encoding, from left to right. |
533 void Assembler::GenInstrJump(Opcode opcode, | 725 void Assembler::GenInstrJump(Opcode opcode, |
534 uint32_t address) { | 726 uint32_t address) { |
| 727 BlockTrampolinePoolScope block_trampoline_pool(this); |
535 ASSERT(is_uint26(address)); | 728 ASSERT(is_uint26(address)); |
536 Instr instr = opcode | address; | 729 Instr instr = opcode | address; |
537 emit(instr); | 730 emit(instr); |
| 731 BlockTrampolinePoolFor(1); // For associated delay slot. |
| 732 } |
| 733 |
| 734 |
| 735 // Returns the next free label entry from the next trampoline pool. |
| 736 int32_t Assembler::get_label_entry(int32_t pos, bool next_pool) { |
| 737 int trampoline_count = trampolines_.length(); |
| 738 int32_t label_entry = 0; |
| 739 ASSERT(trampoline_count > 0); |
| 740 |
| 741 if (next_pool) { |
| 742 for (int i = 0; i < trampoline_count; i++) { |
| 743 if (trampolines_[i].start() > pos) { |
| 744 label_entry = trampolines_[i].take_label(); |
| 745 break; |
| 746 } |
| 747 } |
| 748 } else { // Caller needs a label entry from the previous pool. |
| 749 for (int i = trampoline_count-1; i >= 0; i--) { |
| 750 if (trampolines_[i].end() < pos) { |
| 751 label_entry = trampolines_[i].take_label(); |
| 752 break; |
| 753 } |
| 754 } |
| 755 } |
| 756 return label_entry; |
| 757 } |
| 758 |
| 759 |
| 760 // Returns the next free trampoline entry from the next trampoline pool. |
| 761 int32_t Assembler::get_trampoline_entry(int32_t pos, bool next_pool) { |
| 762 int trampoline_count = trampolines_.length(); |
| 763 int32_t trampoline_entry = 0; |
| 764 ASSERT(trampoline_count > 0); |
| 765 |
| 766 if (next_pool) { |
| 767 for (int i = 0; i < trampoline_count; i++) { |
| 768 if (trampolines_[i].start() > pos) { |
| 769 trampoline_entry = trampolines_[i].take_slot(); |
| 770 break; |
| 771 } |
| 772 } |
| 773 } else { // Caller needs a trampoline entry from the previous pool. |
| 774 for (int i = trampoline_count-1; i >= 0; i--) { |
| 775 if (trampolines_[i].end() < pos) { |
| 776 trampoline_entry = trampolines_[i].take_slot(); |
| 777 break; |
| 778 } |
| 779 } |
| 780 } |
| 781 return trampoline_entry; |
538 } | 782 } |
539 | 783 |
540 | 784 |
541 int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) { | 785 int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) { |
542 int32_t target_pos; | 786 int32_t target_pos; |
| 787 int32_t pc_offset_v = pc_offset(); |
| 788 |
543 if (L->is_bound()) { | 789 if (L->is_bound()) { |
544 target_pos = L->pos(); | 790 target_pos = L->pos(); |
| 791 int32_t dist = pc_offset_v - target_pos; |
| 792 if (dist > kMaxBranchOffset) { |
| 793 do { |
| 794 int32_t trampoline_pos = get_trampoline_entry(target_pos); |
| 795 ASSERT((trampoline_pos - target_pos) > 0); |
| 796 ASSERT((trampoline_pos - target_pos) <= kMaxBranchOffset); |
| 797 target_at_put(trampoline_pos, target_pos); |
| 798 target_pos = trampoline_pos; |
| 799 dist = pc_offset_v - target_pos; |
| 800 } while (dist > kMaxBranchOffset); |
| 801 } else if (dist < -kMaxBranchOffset) { |
| 802 do { |
| 803 int32_t trampoline_pos = get_trampoline_entry(target_pos, false); |
| 804 ASSERT((target_pos - trampoline_pos) > 0); |
| 805 ASSERT((target_pos - trampoline_pos) <= kMaxBranchOffset); |
| 806 target_at_put(trampoline_pos, target_pos); |
| 807 target_pos = trampoline_pos; |
| 808 dist = pc_offset_v - target_pos; |
| 809 } while (dist < -kMaxBranchOffset); |
| 810 } |
545 } else { | 811 } else { |
546 if (L->is_linked()) { | 812 if (L->is_linked()) { |
547 target_pos = L->pos(); // L's link | 813 target_pos = L->pos(); // L's link. |
| 814 int32_t dist = pc_offset_v - target_pos; |
| 815 if (dist > kMaxBranchOffset) { |
| 816 do { |
| 817 int32_t label_pos = get_label_entry(target_pos); |
| 818 ASSERT((label_pos - target_pos) < kMaxBranchOffset); |
| 819 label_at_put(L, label_pos); |
| 820 target_pos = label_pos; |
| 821 dist = pc_offset_v - target_pos; |
| 822 } while (dist > kMaxBranchOffset); |
| 823 } else if (dist < -kMaxBranchOffset) { |
| 824 do { |
| 825 int32_t label_pos = get_label_entry(target_pos, false); |
| 826 ASSERT((label_pos - target_pos) > -kMaxBranchOffset); |
| 827 label_at_put(L, label_pos); |
| 828 target_pos = label_pos; |
| 829 dist = pc_offset_v - target_pos; |
| 830 } while (dist < -kMaxBranchOffset); |
| 831 } |
| 832 L->link_to(pc_offset()); |
548 } else { | 833 } else { |
549 target_pos = kEndOfChain; | 834 L->link_to(pc_offset()); |
| 835 return kEndOfChain; |
550 } | 836 } |
551 L->link_to(pc_offset()); | |
552 } | 837 } |
553 | 838 |
554 int32_t offset = target_pos - (pc_offset() + kBranchPCOffset); | 839 int32_t offset = target_pos - (pc_offset() + kBranchPCOffset); |
| 840 ASSERT((offset & 3) == 0); |
| 841 ASSERT(is_int16(offset >> 2)); |
| 842 |
555 return offset; | 843 return offset; |
556 } | 844 } |
557 | 845 |
558 | 846 |
559 void Assembler::label_at_put(Label* L, int at_offset) { | 847 void Assembler::label_at_put(Label* L, int at_offset) { |
560 int target_pos; | 848 int target_pos; |
561 if (L->is_bound()) { | 849 if (L->is_bound()) { |
562 target_pos = L->pos(); | 850 target_pos = L->pos(); |
| 851 instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag)); |
563 } else { | 852 } else { |
564 if (L->is_linked()) { | 853 if (L->is_linked()) { |
565 target_pos = L->pos(); // L's link | 854 target_pos = L->pos(); // L's link. |
| 855 int32_t imm18 = target_pos - at_offset; |
| 856 ASSERT((imm18 & 3) == 0); |
| 857 int32_t imm16 = imm18 >> 2; |
| 858 ASSERT(is_int16(imm16)); |
| 859 instr_at_put(at_offset, (imm16 & kImm16Mask)); |
566 } else { | 860 } else { |
567 target_pos = kEndOfChain; | 861 target_pos = kEndOfChain; |
| 862 instr_at_put(at_offset, 0); |
568 } | 863 } |
569 L->link_to(at_offset); | 864 L->link_to(at_offset); |
570 instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag)); | |
571 } | 865 } |
572 } | 866 } |
573 | 867 |
574 | 868 |
575 //------- Branch and jump instructions -------- | 869 //------- Branch and jump instructions -------- |
576 | 870 |
577 void Assembler::b(int16_t offset) { | 871 void Assembler::b(int16_t offset) { |
578 beq(zero_reg, zero_reg, offset); | 872 beq(zero_reg, zero_reg, offset); |
579 } | 873 } |
580 | 874 |
581 | 875 |
582 void Assembler::bal(int16_t offset) { | 876 void Assembler::bal(int16_t offset) { |
| 877 positions_recorder()->WriteRecordedPositions(); |
583 bgezal(zero_reg, offset); | 878 bgezal(zero_reg, offset); |
584 } | 879 } |
585 | 880 |
586 | 881 |
587 void Assembler::beq(Register rs, Register rt, int16_t offset) { | 882 void Assembler::beq(Register rs, Register rt, int16_t offset) { |
| 883 BlockTrampolinePoolScope block_trampoline_pool(this); |
588 GenInstrImmediate(BEQ, rs, rt, offset); | 884 GenInstrImmediate(BEQ, rs, rt, offset); |
| 885 BlockTrampolinePoolFor(1); // For associated delay slot. |
589 } | 886 } |
590 | 887 |
591 | 888 |
592 void Assembler::bgez(Register rs, int16_t offset) { | 889 void Assembler::bgez(Register rs, int16_t offset) { |
| 890 BlockTrampolinePoolScope block_trampoline_pool(this); |
593 GenInstrImmediate(REGIMM, rs, BGEZ, offset); | 891 GenInstrImmediate(REGIMM, rs, BGEZ, offset); |
| 892 BlockTrampolinePoolFor(1); // For associated delay slot. |
594 } | 893 } |
595 | 894 |
596 | 895 |
597 void Assembler::bgezal(Register rs, int16_t offset) { | 896 void Assembler::bgezal(Register rs, int16_t offset) { |
| 897 BlockTrampolinePoolScope block_trampoline_pool(this); |
| 898 positions_recorder()->WriteRecordedPositions(); |
598 GenInstrImmediate(REGIMM, rs, BGEZAL, offset); | 899 GenInstrImmediate(REGIMM, rs, BGEZAL, offset); |
| 900 BlockTrampolinePoolFor(1); // For associated delay slot. |
599 } | 901 } |
600 | 902 |
601 | 903 |
602 void Assembler::bgtz(Register rs, int16_t offset) { | 904 void Assembler::bgtz(Register rs, int16_t offset) { |
| 905 BlockTrampolinePoolScope block_trampoline_pool(this); |
603 GenInstrImmediate(BGTZ, rs, zero_reg, offset); | 906 GenInstrImmediate(BGTZ, rs, zero_reg, offset); |
| 907 BlockTrampolinePoolFor(1); // For associated delay slot. |
604 } | 908 } |
605 | 909 |
606 | 910 |
607 void Assembler::blez(Register rs, int16_t offset) { | 911 void Assembler::blez(Register rs, int16_t offset) { |
| 912 BlockTrampolinePoolScope block_trampoline_pool(this); |
608 GenInstrImmediate(BLEZ, rs, zero_reg, offset); | 913 GenInstrImmediate(BLEZ, rs, zero_reg, offset); |
| 914 BlockTrampolinePoolFor(1); // For associated delay slot. |
609 } | 915 } |
610 | 916 |
611 | 917 |
612 void Assembler::bltz(Register rs, int16_t offset) { | 918 void Assembler::bltz(Register rs, int16_t offset) { |
| 919 BlockTrampolinePoolScope block_trampoline_pool(this); |
613 GenInstrImmediate(REGIMM, rs, BLTZ, offset); | 920 GenInstrImmediate(REGIMM, rs, BLTZ, offset); |
| 921 BlockTrampolinePoolFor(1); // For associated delay slot. |
614 } | 922 } |
615 | 923 |
616 | 924 |
617 void Assembler::bltzal(Register rs, int16_t offset) { | 925 void Assembler::bltzal(Register rs, int16_t offset) { |
| 926 BlockTrampolinePoolScope block_trampoline_pool(this); |
| 927 positions_recorder()->WriteRecordedPositions(); |
618 GenInstrImmediate(REGIMM, rs, BLTZAL, offset); | 928 GenInstrImmediate(REGIMM, rs, BLTZAL, offset); |
| 929 BlockTrampolinePoolFor(1); // For associated delay slot. |
619 } | 930 } |
620 | 931 |
621 | 932 |
622 void Assembler::bne(Register rs, Register rt, int16_t offset) { | 933 void Assembler::bne(Register rs, Register rt, int16_t offset) { |
| 934 BlockTrampolinePoolScope block_trampoline_pool(this); |
623 GenInstrImmediate(BNE, rs, rt, offset); | 935 GenInstrImmediate(BNE, rs, rt, offset); |
| 936 BlockTrampolinePoolFor(1); // For associated delay slot. |
624 } | 937 } |
625 | 938 |
626 | 939 |
627 void Assembler::j(int32_t target) { | 940 void Assembler::j(int32_t target) { |
628 ASSERT(is_uint28(target) && ((target & 3) == 0)); | 941 ASSERT(is_uint28(target) && ((target & 3) == 0)); |
629 GenInstrJump(J, target >> 2); | 942 GenInstrJump(J, target >> 2); |
630 } | 943 } |
631 | 944 |
632 | 945 |
633 void Assembler::jr(Register rs) { | 946 void Assembler::jr(Register rs) { |
| 947 BlockTrampolinePoolScope block_trampoline_pool(this); |
| 948 if (rs.is(ra)) { |
| 949 positions_recorder()->WriteRecordedPositions(); |
| 950 } |
634 GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR); | 951 GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR); |
| 952 BlockTrampolinePoolFor(1); // For associated delay slot. |
635 } | 953 } |
636 | 954 |
637 | 955 |
638 void Assembler::jal(int32_t target) { | 956 void Assembler::jal(int32_t target) { |
| 957 positions_recorder()->WriteRecordedPositions(); |
639 ASSERT(is_uint28(target) && ((target & 3) == 0)); | 958 ASSERT(is_uint28(target) && ((target & 3) == 0)); |
640 GenInstrJump(JAL, target >> 2); | 959 GenInstrJump(JAL, target >> 2); |
641 } | 960 } |
642 | 961 |
643 | 962 |
644 void Assembler::jalr(Register rs, Register rd) { | 963 void Assembler::jalr(Register rs, Register rd) { |
| 964 BlockTrampolinePoolScope block_trampoline_pool(this); |
| 965 positions_recorder()->WriteRecordedPositions(); |
645 GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR); | 966 GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR); |
| 967 BlockTrampolinePoolFor(1); // For associated delay slot. |
646 } | 968 } |
647 | 969 |
648 | 970 |
649 //-------Data-processing-instructions--------- | 971 //-------Data-processing-instructions--------- |
650 | 972 |
651 // Arithmetic. | 973 // Arithmetic. |
652 | 974 |
653 void Assembler::add(Register rd, Register rs, Register rt) { | |
654 GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADD); | |
655 } | |
656 | |
657 | |
658 void Assembler::addu(Register rd, Register rs, Register rt) { | 975 void Assembler::addu(Register rd, Register rs, Register rt) { |
659 GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU); | 976 GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU); |
660 } | 977 } |
661 | 978 |
662 | 979 |
663 void Assembler::addi(Register rd, Register rs, int32_t j) { | 980 void Assembler::addiu(Register rd, Register rs, int32_t j) { |
664 GenInstrImmediate(ADDI, rs, rd, j); | 981 GenInstrImmediate(ADDIU, rs, rd, j); |
| 982 |
| 983 // Eliminate pattern: push(r), pop(). |
| 984 // addiu(sp, sp, Operand(-kPointerSize)); |
| 985 // sw(src, MemOperand(sp, 0); |
| 986 // addiu(sp, sp, Operand(kPointerSize)); |
| 987 // Both instructions can be eliminated. |
| 988 if (can_peephole_optimize(3) && |
| 989 // Pattern. |
| 990 instr_at(pc_ - 1 * kInstrSize) == kPopInstruction && |
| 991 (instr_at(pc_ - 2 * kInstrSize) & ~kRtMask) == kPushRegPattern && |
| 992 (instr_at(pc_ - 3 * kInstrSize)) == kPushInstruction) { |
| 993 pc_ -= 3 * kInstrSize; |
| 994 if (FLAG_print_peephole_optimization) { |
| 995 PrintF("%x push(reg)/pop() eliminated\n", pc_offset()); |
| 996 } |
| 997 } |
| 998 |
| 999 // Eliminate pattern: push(ry), pop(rx). |
| 1000 // addiu(sp, sp, -kPointerSize) |
| 1001 // sw(ry, MemOperand(sp, 0) |
| 1002 // lw(rx, MemOperand(sp, 0) |
| 1003 // addiu(sp, sp, kPointerSize); |
| 1004 // Both instructions can be eliminated if ry = rx. |
| 1005 // If ry != rx, a register copy from ry to rx is inserted |
| 1006 // after eliminating the push and the pop instructions. |
| 1007 if (can_peephole_optimize(4)) { |
| 1008 Instr pre_push_sp_set = instr_at(pc_ - 4 * kInstrSize); |
| 1009 Instr push_instr = instr_at(pc_ - 3 * kInstrSize); |
| 1010 Instr pop_instr = instr_at(pc_ - 2 * kInstrSize); |
| 1011 Instr post_pop_sp_set = instr_at(pc_ - 1 * kInstrSize); |
| 1012 |
| 1013 if (IsPush(push_instr) && |
| 1014 IsPop(pop_instr) && pre_push_sp_set == kPushInstruction && |
| 1015 post_pop_sp_set == kPopInstruction) { |
| 1016 if ((pop_instr & kRtMask) != (push_instr & kRtMask)) { |
| 1017 // For consecutive push and pop on different registers, |
| 1018 // we delete both the push & pop and insert a register move. |
| 1019 // push ry, pop rx --> mov rx, ry. |
| 1020 Register reg_pushed, reg_popped; |
| 1021 reg_pushed = GetRt(push_instr); |
| 1022 reg_popped = GetRt(pop_instr); |
| 1023 pc_ -= 4 * kInstrSize; |
| 1024 // Insert a mov instruction, which is better than a pair of push & pop. |
| 1025 or_(reg_popped, reg_pushed, zero_reg); |
| 1026 if (FLAG_print_peephole_optimization) { |
| 1027 PrintF("%x push/pop (diff reg) replaced by a reg move\n", |
| 1028 pc_offset()); |
| 1029 } |
| 1030 } else { |
| 1031 // For consecutive push and pop on the same register, |
| 1032 // both the push and the pop can be deleted. |
| 1033 pc_ -= 4 * kInstrSize; |
| 1034 if (FLAG_print_peephole_optimization) { |
| 1035 PrintF("%x push/pop (same reg) eliminated\n", pc_offset()); |
| 1036 } |
| 1037 } |
| 1038 } |
| 1039 } |
| 1040 |
| 1041 if (can_peephole_optimize(5)) { |
| 1042 Instr pre_push_sp_set = instr_at(pc_ - 5 * kInstrSize); |
| 1043 Instr mem_write_instr = instr_at(pc_ - 4 * kInstrSize); |
| 1044 Instr lw_instr = instr_at(pc_ - 3 * kInstrSize); |
| 1045 Instr mem_read_instr = instr_at(pc_ - 2 * kInstrSize); |
| 1046 Instr post_pop_sp_set = instr_at(pc_ - 1 * kInstrSize); |
| 1047 |
| 1048 if (IsPush(mem_write_instr) && |
| 1049 pre_push_sp_set == kPushInstruction && |
| 1050 IsPop(mem_read_instr) && |
| 1051 post_pop_sp_set == kPopInstruction) { |
| 1052 if ((IsLwRegFpOffset(lw_instr) || |
| 1053 IsLwRegFpNegOffset(lw_instr))) { |
| 1054 if ((mem_write_instr & kRtMask) == |
| 1055 (mem_read_instr & kRtMask)) { |
| 1056 // Pattern: push & pop from/to same register, |
| 1057 // with a fp+offset lw in between. |
| 1058 // |
| 1059 // The following: |
| 1060 // addiu sp, sp, -4 |
| 1061 // sw rx, [sp, #0]! |
| 1062 // lw rz, [fp, #-24] |
| 1063 // lw rx, [sp, 0], |
| 1064 // addiu sp, sp, 4 |
| 1065 // |
| 1066 // Becomes: |
| 1067 // if(rx == rz) |
| 1068 // delete all |
| 1069 // else |
| 1070 // lw rz, [fp, #-24] |
| 1071 |
| 1072 if ((mem_write_instr & kRtMask) == (lw_instr & kRtMask)) { |
| 1073 pc_ -= 5 * kInstrSize; |
| 1074 } else { |
| 1075 pc_ -= 5 * kInstrSize; |
| 1076 // Reinsert back the lw rz. |
| 1077 emit(lw_instr); |
| 1078 } |
| 1079 if (FLAG_print_peephole_optimization) { |
| 1080 PrintF("%x push/pop -dead ldr fp+offset in middle\n", pc_offset()); |
| 1081 } |
| 1082 } else { |
| 1083 // Pattern: push & pop from/to different registers |
| 1084 // with a fp + offset lw in between. |
| 1085 // |
| 1086 // The following: |
| 1087 // addiu sp, sp ,-4 |
| 1088 // sw rx, [sp, 0] |
| 1089 // lw rz, [fp, #-24] |
| 1090 // lw ry, [sp, 0] |
| 1091 // addiu sp, sp, 4 |
| 1092 // |
| 1093 // Becomes: |
| 1094 // if(ry == rz) |
| 1095 // mov ry, rx; |
| 1096 // else if(rx != rz) |
| 1097 // lw rz, [fp, #-24] |
| 1098 // mov ry, rx |
| 1099 // else if((ry != rz) || (rx == rz)) becomes: |
| 1100 // mov ry, rx |
| 1101 // lw rz, [fp, #-24] |
| 1102 |
| 1103 Register reg_pushed, reg_popped; |
| 1104 if ((mem_read_instr & kRtMask) == (lw_instr & kRtMask)) { |
| 1105 reg_pushed = GetRt(mem_write_instr); |
| 1106 reg_popped = GetRt(mem_read_instr); |
| 1107 pc_ -= 5 * kInstrSize; |
| 1108 or_(reg_popped, reg_pushed, zero_reg); // Move instruction. |
| 1109 } else if ((mem_write_instr & kRtMask) |
| 1110 != (lw_instr & kRtMask)) { |
| 1111 reg_pushed = GetRt(mem_write_instr); |
| 1112 reg_popped = GetRt(mem_read_instr); |
| 1113 pc_ -= 5 * kInstrSize; |
| 1114 emit(lw_instr); |
| 1115 or_(reg_popped, reg_pushed, zero_reg); // Move instruction. |
| 1116 } else if (((mem_read_instr & kRtMask) |
| 1117 != (lw_instr & kRtMask)) || |
| 1118 ((mem_write_instr & kRtMask) |
| 1119 == (lw_instr & kRtMask)) ) { |
| 1120 reg_pushed = GetRt(mem_write_instr); |
| 1121 reg_popped = GetRt(mem_read_instr); |
| 1122 pc_ -= 5 * kInstrSize; |
| 1123 or_(reg_popped, reg_pushed, zero_reg); // Move instruction. |
| 1124 emit(lw_instr); |
| 1125 } |
| 1126 if (FLAG_print_peephole_optimization) { |
| 1127 PrintF("%x push/pop (ldr fp+off in middle)\n", pc_offset()); |
| 1128 } |
| 1129 } |
| 1130 } |
| 1131 } |
| 1132 } |
665 } | 1133 } |
666 | 1134 |
667 | 1135 |
668 void Assembler::addiu(Register rd, Register rs, int32_t j) { | |
669 GenInstrImmediate(ADDIU, rs, rd, j); | |
670 } | |
671 | |
672 | |
673 void Assembler::sub(Register rd, Register rs, Register rt) { | |
674 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUB); | |
675 } | |
676 | |
677 | |
678 void Assembler::subu(Register rd, Register rs, Register rt) { | 1136 void Assembler::subu(Register rd, Register rs, Register rt) { |
679 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU); | 1137 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU); |
680 } | 1138 } |
681 | 1139 |
682 | 1140 |
683 void Assembler::mul(Register rd, Register rs, Register rt) { | 1141 void Assembler::mul(Register rd, Register rs, Register rt) { |
684 GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL); | 1142 GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL); |
685 } | 1143 } |
686 | 1144 |
687 | 1145 |
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
736 GenInstrImmediate(XORI, rs, rt, j); | 1194 GenInstrImmediate(XORI, rs, rt, j); |
737 } | 1195 } |
738 | 1196 |
739 | 1197 |
740 void Assembler::nor(Register rd, Register rs, Register rt) { | 1198 void Assembler::nor(Register rd, Register rs, Register rt) { |
741 GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR); | 1199 GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR); |
742 } | 1200 } |
743 | 1201 |
744 | 1202 |
745 // Shifts. | 1203 // Shifts. |
746 void Assembler::sll(Register rd, Register rt, uint16_t sa) { | 1204 void Assembler::sll(Register rd, |
| 1205 Register rt, |
| 1206 uint16_t sa, |
| 1207 bool coming_from_nop) { |
| 1208 // Don't allow nop instructions in the form sll zero_reg, zero_reg to be |
| 1209 // generated using the sll instruction. They must be generated using |
| 1210 // nop(int/NopMarkerTypes) or MarkCode(int/NopMarkerTypes) pseudo |
| 1211 // instructions. |
| 1212 ASSERT(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg))); |
747 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SLL); | 1213 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SLL); |
748 } | 1214 } |
749 | 1215 |
750 | 1216 |
751 void Assembler::sllv(Register rd, Register rt, Register rs) { | 1217 void Assembler::sllv(Register rd, Register rt, Register rs) { |
752 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV); | 1218 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV); |
753 } | 1219 } |
754 | 1220 |
755 | 1221 |
756 void Assembler::srl(Register rd, Register rt, uint16_t sa) { | 1222 void Assembler::srl(Register rd, Register rt, uint16_t sa) { |
757 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRL); | 1223 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRL); |
758 } | 1224 } |
759 | 1225 |
760 | 1226 |
761 void Assembler::srlv(Register rd, Register rt, Register rs) { | 1227 void Assembler::srlv(Register rd, Register rt, Register rs) { |
762 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV); | 1228 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV); |
763 } | 1229 } |
764 | 1230 |
765 | 1231 |
766 void Assembler::sra(Register rd, Register rt, uint16_t sa) { | 1232 void Assembler::sra(Register rd, Register rt, uint16_t sa) { |
767 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRA); | 1233 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRA); |
768 } | 1234 } |
769 | 1235 |
770 | 1236 |
771 void Assembler::srav(Register rd, Register rt, Register rs) { | 1237 void Assembler::srav(Register rd, Register rt, Register rs) { |
772 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV); | 1238 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV); |
773 } | 1239 } |
774 | 1240 |
775 | 1241 |
| 1242 void Assembler::rotr(Register rd, Register rt, uint16_t sa) { |
| 1243 // Should be called via MacroAssembler::Ror. |
| 1244 ASSERT(rd.is_valid() && rt.is_valid() && is_uint5(sa)); |
| 1245 ASSERT(mips32r2); |
| 1246 Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift) |
| 1247 | (rd.code() << kRdShift) | (sa << kSaShift) | SRL; |
| 1248 emit(instr); |
| 1249 } |
| 1250 |
| 1251 |
| 1252 void Assembler::rotrv(Register rd, Register rt, Register rs) { |
| 1253 // Should be called via MacroAssembler::Ror. |
| 1254 ASSERT(rd.is_valid() && rt.is_valid() && rs.is_valid() ); |
| 1255 ASSERT(mips32r2); |
| 1256 Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
| 1257 | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV; |
| 1258 emit(instr); |
| 1259 } |
| 1260 |
| 1261 |
776 //------------Memory-instructions------------- | 1262 //------------Memory-instructions------------- |
777 | 1263 |
| 1264 // Helper for base-reg + offset, when offset is larger than int16. |
| 1265 void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) { |
| 1266 ASSERT(!src.rm().is(at)); |
| 1267 lui(at, src.offset_ >> kLuiShift); |
| 1268 ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset. |
| 1269 addu(at, at, src.rm()); // Add base register. |
| 1270 } |
| 1271 |
| 1272 |
778 void Assembler::lb(Register rd, const MemOperand& rs) { | 1273 void Assembler::lb(Register rd, const MemOperand& rs) { |
779 GenInstrImmediate(LB, rs.rm(), rd, rs.offset_); | 1274 if (is_int16(rs.offset_)) { |
| 1275 GenInstrImmediate(LB, rs.rm(), rd, rs.offset_); |
| 1276 } else { // Offset > 16 bits, use multiple instructions to load. |
| 1277 LoadRegPlusOffsetToAt(rs); |
| 1278 GenInstrImmediate(LB, at, rd, 0); // Equiv to lb(rd, MemOperand(at, 0)); |
| 1279 } |
780 } | 1280 } |
781 | 1281 |
782 | 1282 |
783 void Assembler::lbu(Register rd, const MemOperand& rs) { | 1283 void Assembler::lbu(Register rd, const MemOperand& rs) { |
784 GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_); | 1284 if (is_int16(rs.offset_)) { |
| 1285 GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_); |
| 1286 } else { // Offset > 16 bits, use multiple instructions to load. |
| 1287 LoadRegPlusOffsetToAt(rs); |
| 1288 GenInstrImmediate(LBU, at, rd, 0); // Equiv to lbu(rd, MemOperand(at, 0)); |
| 1289 } |
| 1290 } |
| 1291 |
| 1292 |
| 1293 void Assembler::lh(Register rd, const MemOperand& rs) { |
| 1294 if (is_int16(rs.offset_)) { |
| 1295 GenInstrImmediate(LH, rs.rm(), rd, rs.offset_); |
| 1296 } else { // Offset > 16 bits, use multiple instructions to load. |
| 1297 LoadRegPlusOffsetToAt(rs); |
| 1298 GenInstrImmediate(LH, at, rd, 0); // Equiv to lh(rd, MemOperand(at, 0)); |
| 1299 } |
| 1300 } |
| 1301 |
| 1302 |
| 1303 void Assembler::lhu(Register rd, const MemOperand& rs) { |
| 1304 if (is_int16(rs.offset_)) { |
| 1305 GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_); |
| 1306 } else { // Offset > 16 bits, use multiple instructions to load. |
| 1307 LoadRegPlusOffsetToAt(rs); |
| 1308 GenInstrImmediate(LHU, at, rd, 0); // Equiv to lhu(rd, MemOperand(at, 0)); |
| 1309 } |
785 } | 1310 } |
786 | 1311 |
787 | 1312 |
788 void Assembler::lw(Register rd, const MemOperand& rs) { | 1313 void Assembler::lw(Register rd, const MemOperand& rs) { |
789 GenInstrImmediate(LW, rs.rm(), rd, rs.offset_); | 1314 if (is_int16(rs.offset_)) { |
| 1315 GenInstrImmediate(LW, rs.rm(), rd, rs.offset_); |
| 1316 } else { // Offset > 16 bits, use multiple instructions to load. |
| 1317 LoadRegPlusOffsetToAt(rs); |
| 1318 GenInstrImmediate(LW, at, rd, 0); // Equiv to lw(rd, MemOperand(at, 0)); |
| 1319 } |
| 1320 |
| 1321 if (can_peephole_optimize(2)) { |
| 1322 Instr sw_instr = instr_at(pc_ - 2 * kInstrSize); |
| 1323 Instr lw_instr = instr_at(pc_ - 1 * kInstrSize); |
| 1324 |
| 1325 if ((IsSwRegFpOffset(sw_instr) && |
| 1326 IsLwRegFpOffset(lw_instr)) || |
| 1327 (IsSwRegFpNegOffset(sw_instr) && |
| 1328 IsLwRegFpNegOffset(lw_instr))) { |
| 1329 if ((lw_instr & kLwSwInstrArgumentMask) == |
| 1330 (sw_instr & kLwSwInstrArgumentMask)) { |
| 1331 // Pattern: Lw/sw same fp+offset, same register. |
| 1332 // |
| 1333 // The following: |
| 1334 // sw rx, [fp, #-12] |
| 1335 // lw rx, [fp, #-12] |
| 1336 // |
| 1337 // Becomes: |
| 1338 // sw rx, [fp, #-12] |
| 1339 |
| 1340 pc_ -= 1 * kInstrSize; |
| 1341 if (FLAG_print_peephole_optimization) { |
| 1342 PrintF("%x sw/lw (fp + same offset), same reg\n", pc_offset()); |
| 1343 } |
| 1344 } else if ((lw_instr & kLwSwOffsetMask) == |
| 1345 (sw_instr & kLwSwOffsetMask)) { |
| 1346 // Pattern: Lw/sw same fp+offset, different register. |
| 1347 // |
| 1348 // The following: |
| 1349 // sw rx, [fp, #-12] |
| 1350 // lw ry, [fp, #-12] |
| 1351 // |
| 1352 // Becomes: |
| 1353 // sw rx, [fp, #-12] |
| 1354 // mov ry, rx |
| 1355 |
| 1356 Register reg_stored, reg_loaded; |
| 1357 reg_stored = GetRt(sw_instr); |
| 1358 reg_loaded = GetRt(lw_instr); |
| 1359 pc_ -= 1 * kInstrSize; |
| 1360 // Insert a mov instruction, which is better than lw. |
| 1361 or_(reg_loaded, reg_stored, zero_reg); // Move instruction. |
| 1362 if (FLAG_print_peephole_optimization) { |
| 1363 PrintF("%x sw/lw (fp + same offset), diff reg \n", pc_offset()); |
| 1364 } |
| 1365 } |
| 1366 } |
| 1367 } |
| 1368 } |
| 1369 |
| 1370 |
| 1371 void Assembler::lwl(Register rd, const MemOperand& rs) { |
| 1372 GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_); |
| 1373 } |
| 1374 |
| 1375 |
| 1376 void Assembler::lwr(Register rd, const MemOperand& rs) { |
| 1377 GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_); |
790 } | 1378 } |
791 | 1379 |
792 | 1380 |
793 void Assembler::sb(Register rd, const MemOperand& rs) { | 1381 void Assembler::sb(Register rd, const MemOperand& rs) { |
794 GenInstrImmediate(SB, rs.rm(), rd, rs.offset_); | 1382 if (is_int16(rs.offset_)) { |
| 1383 GenInstrImmediate(SB, rs.rm(), rd, rs.offset_); |
| 1384 } else { // Offset > 16 bits, use multiple instructions to store. |
| 1385 LoadRegPlusOffsetToAt(rs); |
| 1386 GenInstrImmediate(SB, at, rd, 0); // Equiv to sb(rd, MemOperand(at, 0)); |
| 1387 } |
| 1388 } |
| 1389 |
| 1390 |
| 1391 void Assembler::sh(Register rd, const MemOperand& rs) { |
| 1392 if (is_int16(rs.offset_)) { |
| 1393 GenInstrImmediate(SH, rs.rm(), rd, rs.offset_); |
| 1394 } else { // Offset > 16 bits, use multiple instructions to store. |
| 1395 LoadRegPlusOffsetToAt(rs); |
| 1396 GenInstrImmediate(SH, at, rd, 0); // Equiv to sh(rd, MemOperand(at, 0)); |
| 1397 } |
795 } | 1398 } |
796 | 1399 |
797 | 1400 |
798 void Assembler::sw(Register rd, const MemOperand& rs) { | 1401 void Assembler::sw(Register rd, const MemOperand& rs) { |
799 GenInstrImmediate(SW, rs.rm(), rd, rs.offset_); | 1402 if (is_int16(rs.offset_)) { |
| 1403 GenInstrImmediate(SW, rs.rm(), rd, rs.offset_); |
| 1404 } else { // Offset > 16 bits, use multiple instructions to store. |
| 1405 LoadRegPlusOffsetToAt(rs); |
| 1406 GenInstrImmediate(SW, at, rd, 0); // Equiv to sw(rd, MemOperand(at, 0)); |
| 1407 } |
| 1408 |
| 1409 // Eliminate pattern: pop(), push(r). |
| 1410 // addiu sp, sp, Operand(kPointerSize); |
| 1411 // addiu sp, sp, Operand(-kPointerSize); |
| 1412 // -> sw r, MemOpernad(sp, 0); |
| 1413 if (can_peephole_optimize(3) && |
| 1414 // Pattern. |
| 1415 instr_at(pc_ - 1 * kInstrSize) == |
| 1416 (kPushRegPattern | (rd.code() << kRtShift)) && |
| 1417 instr_at(pc_ - 2 * kInstrSize) == kPushInstruction && |
| 1418 instr_at(pc_ - 3 * kInstrSize) == kPopInstruction) { |
| 1419 pc_ -= 3 * kInstrSize; |
| 1420 GenInstrImmediate(SW, rs.rm(), rd, rs.offset_); |
| 1421 if (FLAG_print_peephole_optimization) { |
| 1422 PrintF("%x pop()/push(reg) eliminated\n", pc_offset()); |
| 1423 } |
| 1424 } |
800 } | 1425 } |
801 | 1426 |
802 | 1427 |
| 1428 void Assembler::swl(Register rd, const MemOperand& rs) { |
| 1429 GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_); |
| 1430 } |
| 1431 |
| 1432 |
| 1433 void Assembler::swr(Register rd, const MemOperand& rs) { |
| 1434 GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_); |
| 1435 } |
| 1436 |
| 1437 |
803 void Assembler::lui(Register rd, int32_t j) { | 1438 void Assembler::lui(Register rd, int32_t j) { |
804 GenInstrImmediate(LUI, zero_reg, rd, j); | 1439 GenInstrImmediate(LUI, zero_reg, rd, j); |
805 } | 1440 } |
806 | 1441 |
807 | 1442 |
808 //-------------Misc-instructions-------------- | 1443 //-------------Misc-instructions-------------- |
809 | 1444 |
810 // Break / Trap instructions. | 1445 // Break / Trap instructions. |
811 void Assembler::break_(uint32_t code) { | 1446 void Assembler::break_(uint32_t code) { |
812 ASSERT((code & ~0xfffff) == 0); | 1447 ASSERT((code & ~0xfffff) == 0); |
(...skipping 21 matching lines...) Expand all Loading... |
834 void Assembler::tlt(Register rs, Register rt, uint16_t code) { | 1469 void Assembler::tlt(Register rs, Register rt, uint16_t code) { |
835 ASSERT(is_uint10(code)); | 1470 ASSERT(is_uint10(code)); |
836 Instr instr = | 1471 Instr instr = |
837 SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6; | 1472 SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6; |
838 emit(instr); | 1473 emit(instr); |
839 } | 1474 } |
840 | 1475 |
841 | 1476 |
842 void Assembler::tltu(Register rs, Register rt, uint16_t code) { | 1477 void Assembler::tltu(Register rs, Register rt, uint16_t code) { |
843 ASSERT(is_uint10(code)); | 1478 ASSERT(is_uint10(code)); |
844 Instr instr = SPECIAL | TLTU | rs.code() << kRsShift | 1479 Instr instr = |
| 1480 SPECIAL | TLTU | rs.code() << kRsShift |
845 | rt.code() << kRtShift | code << 6; | 1481 | rt.code() << kRtShift | code << 6; |
846 emit(instr); | 1482 emit(instr); |
847 } | 1483 } |
848 | 1484 |
849 | 1485 |
850 void Assembler::teq(Register rs, Register rt, uint16_t code) { | 1486 void Assembler::teq(Register rs, Register rt, uint16_t code) { |
851 ASSERT(is_uint10(code)); | 1487 ASSERT(is_uint10(code)); |
852 Instr instr = | 1488 Instr instr = |
853 SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6; | 1489 SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6; |
854 emit(instr); | 1490 emit(instr); |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
889 void Assembler::slti(Register rt, Register rs, int32_t j) { | 1525 void Assembler::slti(Register rt, Register rs, int32_t j) { |
890 GenInstrImmediate(SLTI, rs, rt, j); | 1526 GenInstrImmediate(SLTI, rs, rt, j); |
891 } | 1527 } |
892 | 1528 |
893 | 1529 |
894 void Assembler::sltiu(Register rt, Register rs, int32_t j) { | 1530 void Assembler::sltiu(Register rt, Register rs, int32_t j) { |
895 GenInstrImmediate(SLTIU, rs, rt, j); | 1531 GenInstrImmediate(SLTIU, rs, rt, j); |
896 } | 1532 } |
897 | 1533 |
898 | 1534 |
| 1535 // Conditional move. |
| 1536 void Assembler::movz(Register rd, Register rs, Register rt) { |
| 1537 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVZ); |
| 1538 } |
| 1539 |
| 1540 |
| 1541 void Assembler::movn(Register rd, Register rs, Register rt) { |
| 1542 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN); |
| 1543 } |
| 1544 |
| 1545 |
| 1546 void Assembler::movt(Register rd, Register rs, uint16_t cc) { |
| 1547 Register rt; |
| 1548 rt.code_ = (cc & 0x0003) << 2 | 1; |
| 1549 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI); |
| 1550 } |
| 1551 |
| 1552 |
| 1553 void Assembler::movf(Register rd, Register rs, uint16_t cc) { |
| 1554 Register rt; |
| 1555 rt.code_ = (cc & 0x0003) << 2 | 0; |
| 1556 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI); |
| 1557 } |
| 1558 |
| 1559 |
| 1560 // Bit twiddling. |
| 1561 void Assembler::clz(Register rd, Register rs) { |
| 1562 // Clz instr requires same GPR number in 'rd' and 'rt' fields. |
| 1563 GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ); |
| 1564 } |
| 1565 |
| 1566 |
| 1567 void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) { |
| 1568 // Should be called via MacroAssembler::Ins. |
| 1569 // Ins instr has 'rt' field as dest, and two uint5: msb, lsb. |
| 1570 ASSERT(mips32r2); |
| 1571 GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS); |
| 1572 } |
| 1573 |
| 1574 |
| 1575 void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) { |
| 1576 // Should be called via MacroAssembler::Ext. |
| 1577 // Ext instr has 'rt' field as dest, and two uint5: msb, lsb. |
| 1578 ASSERT(mips32r2); |
| 1579 GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT); |
| 1580 } |
| 1581 |
| 1582 |
899 //--------Coprocessor-instructions---------------- | 1583 //--------Coprocessor-instructions---------------- |
900 | 1584 |
901 // Load, store, move. | 1585 // Load, store, move. |
902 void Assembler::lwc1(FPURegister fd, const MemOperand& src) { | 1586 void Assembler::lwc1(FPURegister fd, const MemOperand& src) { |
903 GenInstrImmediate(LWC1, src.rm(), fd, src.offset_); | 1587 GenInstrImmediate(LWC1, src.rm(), fd, src.offset_); |
904 } | 1588 } |
905 | 1589 |
906 | 1590 |
907 void Assembler::ldc1(FPURegister fd, const MemOperand& src) { | 1591 void Assembler::ldc1(FPURegister fd, const MemOperand& src) { |
908 GenInstrImmediate(LDC1, src.rm(), fd, src.offset_); | 1592 // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit |
| 1593 // load to two 32-bit loads. |
| 1594 GenInstrImmediate(LWC1, src.rm(), fd, src.offset_); |
| 1595 FPURegister nextfpreg; |
| 1596 nextfpreg.setcode(fd.code() + 1); |
| 1597 GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ + 4); |
909 } | 1598 } |
910 | 1599 |
911 | 1600 |
912 void Assembler::swc1(FPURegister fd, const MemOperand& src) { | 1601 void Assembler::swc1(FPURegister fd, const MemOperand& src) { |
913 GenInstrImmediate(SWC1, src.rm(), fd, src.offset_); | 1602 GenInstrImmediate(SWC1, src.rm(), fd, src.offset_); |
914 } | 1603 } |
915 | 1604 |
916 | 1605 |
917 void Assembler::sdc1(FPURegister fd, const MemOperand& src) { | 1606 void Assembler::sdc1(FPURegister fd, const MemOperand& src) { |
918 GenInstrImmediate(SDC1, src.rm(), fd, src.offset_); | 1607 // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit |
| 1608 // store to two 32-bit stores. |
| 1609 GenInstrImmediate(SWC1, src.rm(), fd, src.offset_); |
| 1610 FPURegister nextfpreg; |
| 1611 nextfpreg.setcode(fd.code() + 1); |
| 1612 GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ + 4); |
919 } | 1613 } |
920 | 1614 |
921 | 1615 |
922 void Assembler::mtc1(FPURegister fs, Register rt) { | 1616 void Assembler::mtc1(Register rt, FPURegister fs) { |
923 GenInstrRegister(COP1, MTC1, rt, fs, f0); | 1617 GenInstrRegister(COP1, MTC1, rt, fs, f0); |
924 } | 1618 } |
925 | 1619 |
926 | 1620 |
927 void Assembler::mthc1(FPURegister fs, Register rt) { | 1621 void Assembler::mfc1(Register rt, FPURegister fs) { |
928 GenInstrRegister(COP1, MTHC1, rt, fs, f0); | |
929 } | |
930 | |
931 | |
932 void Assembler::mfc1(FPURegister fs, Register rt) { | |
933 GenInstrRegister(COP1, MFC1, rt, fs, f0); | 1622 GenInstrRegister(COP1, MFC1, rt, fs, f0); |
934 } | 1623 } |
935 | 1624 |
936 | 1625 |
937 void Assembler::mfhc1(FPURegister fs, Register rt) { | 1626 void Assembler::ctc1(Register rt, FPUControlRegister fs) { |
938 GenInstrRegister(COP1, MFHC1, rt, fs, f0); | 1627 GenInstrRegister(COP1, CTC1, rt, fs); |
939 } | 1628 } |
940 | 1629 |
941 | 1630 |
| 1631 void Assembler::cfc1(Register rt, FPUControlRegister fs) { |
| 1632 GenInstrRegister(COP1, CFC1, rt, fs); |
| 1633 } |
| 1634 |
| 1635 |
| 1636 // Arithmetic. |
| 1637 |
| 1638 void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) { |
| 1639 GenInstrRegister(COP1, D, ft, fs, fd, ADD_D); |
| 1640 } |
| 1641 |
| 1642 |
| 1643 void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) { |
| 1644 GenInstrRegister(COP1, D, ft, fs, fd, SUB_D); |
| 1645 } |
| 1646 |
| 1647 |
| 1648 void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) { |
| 1649 GenInstrRegister(COP1, D, ft, fs, fd, MUL_D); |
| 1650 } |
| 1651 |
| 1652 |
| 1653 void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) { |
| 1654 GenInstrRegister(COP1, D, ft, fs, fd, DIV_D); |
| 1655 } |
| 1656 |
| 1657 |
| 1658 void Assembler::abs_d(FPURegister fd, FPURegister fs) { |
| 1659 GenInstrRegister(COP1, D, f0, fs, fd, ABS_D); |
| 1660 } |
| 1661 |
| 1662 |
| 1663 void Assembler::mov_d(FPURegister fd, FPURegister fs) { |
| 1664 GenInstrRegister(COP1, D, f0, fs, fd, MOV_D); |
| 1665 } |
| 1666 |
| 1667 |
| 1668 void Assembler::neg_d(FPURegister fd, FPURegister fs) { |
| 1669 GenInstrRegister(COP1, D, f0, fs, fd, NEG_D); |
| 1670 } |
| 1671 |
| 1672 |
| 1673 void Assembler::sqrt_d(FPURegister fd, FPURegister fs) { |
| 1674 GenInstrRegister(COP1, D, f0, fs, fd, SQRT_D); |
| 1675 } |
| 1676 |
| 1677 |
942 // Conversions. | 1678 // Conversions. |
943 | 1679 |
944 void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) { | 1680 void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) { |
945 GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S); | 1681 GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S); |
946 } | 1682 } |
947 | 1683 |
948 | 1684 |
949 void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) { | 1685 void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) { |
950 GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D); | 1686 GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D); |
951 } | 1687 } |
952 | 1688 |
953 | 1689 |
| 1690 void Assembler::trunc_w_s(FPURegister fd, FPURegister fs) { |
| 1691 GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_W_S); |
| 1692 } |
| 1693 |
| 1694 |
| 1695 void Assembler::trunc_w_d(FPURegister fd, FPURegister fs) { |
| 1696 GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_W_D); |
| 1697 } |
| 1698 |
| 1699 |
| 1700 void Assembler::round_w_s(FPURegister fd, FPURegister fs) { |
| 1701 GenInstrRegister(COP1, S, f0, fs, fd, ROUND_W_S); |
| 1702 } |
| 1703 |
| 1704 |
| 1705 void Assembler::round_w_d(FPURegister fd, FPURegister fs) { |
| 1706 GenInstrRegister(COP1, D, f0, fs, fd, ROUND_W_D); |
| 1707 } |
| 1708 |
| 1709 |
| 1710 void Assembler::floor_w_s(FPURegister fd, FPURegister fs) { |
| 1711 GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_W_S); |
| 1712 } |
| 1713 |
| 1714 |
| 1715 void Assembler::floor_w_d(FPURegister fd, FPURegister fs) { |
| 1716 GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_W_D); |
| 1717 } |
| 1718 |
| 1719 |
| 1720 void Assembler::ceil_w_s(FPURegister fd, FPURegister fs) { |
| 1721 GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S); |
| 1722 } |
| 1723 |
| 1724 |
| 1725 void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) { |
| 1726 GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D); |
| 1727 } |
| 1728 |
| 1729 |
954 void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) { | 1730 void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) { |
| 1731 ASSERT(mips32r2); |
955 GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S); | 1732 GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S); |
956 } | 1733 } |
957 | 1734 |
958 | 1735 |
959 void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) { | 1736 void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) { |
| 1737 ASSERT(mips32r2); |
960 GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D); | 1738 GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D); |
961 } | 1739 } |
962 | 1740 |
963 | 1741 |
| 1742 void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) { |
| 1743 ASSERT(mips32r2); |
| 1744 GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S); |
| 1745 } |
| 1746 |
| 1747 |
| 1748 void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) { |
| 1749 ASSERT(mips32r2); |
| 1750 GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D); |
| 1751 } |
| 1752 |
| 1753 |
| 1754 void Assembler::round_l_s(FPURegister fd, FPURegister fs) { |
| 1755 GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S); |
| 1756 } |
| 1757 |
| 1758 |
| 1759 void Assembler::round_l_d(FPURegister fd, FPURegister fs) { |
| 1760 GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D); |
| 1761 } |
| 1762 |
| 1763 |
| 1764 void Assembler::floor_l_s(FPURegister fd, FPURegister fs) { |
| 1765 GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S); |
| 1766 } |
| 1767 |
| 1768 |
| 1769 void Assembler::floor_l_d(FPURegister fd, FPURegister fs) { |
| 1770 GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D); |
| 1771 } |
| 1772 |
| 1773 |
| 1774 void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) { |
| 1775 GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S); |
| 1776 } |
| 1777 |
| 1778 |
| 1779 void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) { |
| 1780 GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D); |
| 1781 } |
| 1782 |
| 1783 |
964 void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) { | 1784 void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) { |
965 GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W); | 1785 GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W); |
966 } | 1786 } |
967 | 1787 |
968 | 1788 |
969 void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) { | 1789 void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) { |
| 1790 ASSERT(mips32r2); |
970 GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L); | 1791 GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L); |
971 } | 1792 } |
972 | 1793 |
973 | 1794 |
974 void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) { | 1795 void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) { |
975 GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D); | 1796 GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D); |
976 } | 1797 } |
977 | 1798 |
978 | 1799 |
979 void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) { | 1800 void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) { |
980 GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W); | 1801 GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W); |
981 } | 1802 } |
982 | 1803 |
983 | 1804 |
984 void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) { | 1805 void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) { |
| 1806 ASSERT(mips32r2); |
985 GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L); | 1807 GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L); |
986 } | 1808 } |
987 | 1809 |
988 | 1810 |
989 void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) { | 1811 void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) { |
990 GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S); | 1812 GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S); |
991 } | 1813 } |
992 | 1814 |
993 | 1815 |
994 // Conditions. | 1816 // Conditions. |
995 void Assembler::c(FPUCondition cond, SecondaryField fmt, | 1817 void Assembler::c(FPUCondition cond, SecondaryField fmt, |
996 FPURegister ft, FPURegister fs, uint16_t cc) { | 1818 FPURegister fs, FPURegister ft, uint16_t cc) { |
| 1819 ASSERT(isolate()->cpu_features()->IsEnabled(FPU)); |
997 ASSERT(is_uint3(cc)); | 1820 ASSERT(is_uint3(cc)); |
998 ASSERT((fmt & ~(31 << kRsShift)) == 0); | 1821 ASSERT((fmt & ~(31 << kRsShift)) == 0); |
999 Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift | 1822 Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift |
1000 | cc << 8 | 3 << 4 | cond; | 1823 | cc << 8 | 3 << 4 | cond; |
1001 emit(instr); | 1824 emit(instr); |
1002 } | 1825 } |
1003 | 1826 |
1004 | 1827 |
| 1828 void Assembler::fcmp(FPURegister src1, const double src2, |
| 1829 FPUCondition cond) { |
| 1830 ASSERT(isolate()->cpu_features()->IsSupported(FPU)); |
| 1831 ASSERT(src2 == 0.0); |
| 1832 mtc1(zero_reg, f14); |
| 1833 cvt_d_w(f14, f14); |
| 1834 c(cond, D, src1, f14, 0); |
| 1835 } |
| 1836 |
| 1837 |
1005 void Assembler::bc1f(int16_t offset, uint16_t cc) { | 1838 void Assembler::bc1f(int16_t offset, uint16_t cc) { |
| 1839 ASSERT(isolate()->cpu_features()->IsEnabled(FPU)); |
1006 ASSERT(is_uint3(cc)); | 1840 ASSERT(is_uint3(cc)); |
1007 Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask); | 1841 Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask); |
1008 emit(instr); | 1842 emit(instr); |
1009 } | 1843 } |
1010 | 1844 |
1011 | 1845 |
1012 void Assembler::bc1t(int16_t offset, uint16_t cc) { | 1846 void Assembler::bc1t(int16_t offset, uint16_t cc) { |
| 1847 ASSERT(isolate()->cpu_features()->IsEnabled(FPU)); |
1013 ASSERT(is_uint3(cc)); | 1848 ASSERT(is_uint3(cc)); |
1014 Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask); | 1849 Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask); |
1015 emit(instr); | 1850 emit(instr); |
1016 } | 1851 } |
1017 | 1852 |
1018 | 1853 |
1019 // Debugging. | 1854 // Debugging. |
1020 void Assembler::RecordJSReturn() { | 1855 void Assembler::RecordJSReturn() { |
1021 WriteRecordedPositions(); | 1856 positions_recorder()->WriteRecordedPositions(); |
1022 CheckBuffer(); | 1857 CheckBuffer(); |
1023 RecordRelocInfo(RelocInfo::JS_RETURN); | 1858 RecordRelocInfo(RelocInfo::JS_RETURN); |
1024 } | 1859 } |
1025 | 1860 |
1026 | 1861 |
| 1862 void Assembler::RecordDebugBreakSlot() { |
| 1863 positions_recorder()->WriteRecordedPositions(); |
| 1864 CheckBuffer(); |
| 1865 RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT); |
| 1866 } |
| 1867 |
| 1868 |
1027 void Assembler::RecordComment(const char* msg) { | 1869 void Assembler::RecordComment(const char* msg) { |
1028 if (FLAG_debug_code) { | 1870 if (FLAG_code_comments) { |
1029 CheckBuffer(); | 1871 CheckBuffer(); |
1030 RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg)); | 1872 RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg)); |
1031 } | 1873 } |
1032 } | 1874 } |
1033 | 1875 |
1034 | 1876 |
1035 void Assembler::RecordPosition(int pos) { | |
1036 if (pos == RelocInfo::kNoPosition) return; | |
1037 ASSERT(pos >= 0); | |
1038 current_position_ = pos; | |
1039 } | |
1040 | |
1041 | |
1042 void Assembler::RecordStatementPosition(int pos) { | |
1043 if (pos == RelocInfo::kNoPosition) return; | |
1044 ASSERT(pos >= 0); | |
1045 current_statement_position_ = pos; | |
1046 } | |
1047 | |
1048 | |
1049 bool Assembler::WriteRecordedPositions() { | |
1050 bool written = false; | |
1051 | |
1052 // Write the statement position if it is different from what was written last | |
1053 // time. | |
1054 if (current_statement_position_ != written_statement_position_) { | |
1055 CheckBuffer(); | |
1056 RecordRelocInfo(RelocInfo::STATEMENT_POSITION, current_statement_position_); | |
1057 written_statement_position_ = current_statement_position_; | |
1058 written = true; | |
1059 } | |
1060 | |
1061 // Write the position if it is different from what was written last time and | |
1062 // also different from the written statement position. | |
1063 if (current_position_ != written_position_ && | |
1064 current_position_ != written_statement_position_) { | |
1065 CheckBuffer(); | |
1066 RecordRelocInfo(RelocInfo::POSITION, current_position_); | |
1067 written_position_ = current_position_; | |
1068 written = true; | |
1069 } | |
1070 | |
1071 // Return whether something was written. | |
1072 return written; | |
1073 } | |
1074 | |
1075 | |
1076 void Assembler::GrowBuffer() { | 1877 void Assembler::GrowBuffer() { |
1077 if (!own_buffer_) FATAL("external code buffer is too small"); | 1878 if (!own_buffer_) FATAL("external code buffer is too small"); |
1078 | 1879 |
1079 // Compute new buffer size. | 1880 // Compute new buffer size. |
1080 CodeDesc desc; // the new buffer | 1881 CodeDesc desc; // The new buffer. |
1081 if (buffer_size_ < 4*KB) { | 1882 if (buffer_size_ < 4*KB) { |
1082 desc.buffer_size = 4*KB; | 1883 desc.buffer_size = 4*KB; |
1083 } else if (buffer_size_ < 1*MB) { | 1884 } else if (buffer_size_ < 1*MB) { |
1084 desc.buffer_size = 2*buffer_size_; | 1885 desc.buffer_size = 2*buffer_size_; |
1085 } else { | 1886 } else { |
1086 desc.buffer_size = buffer_size_ + 1*MB; | 1887 desc.buffer_size = buffer_size_ + 1*MB; |
1087 } | 1888 } |
1088 CHECK_GT(desc.buffer_size, 0); // no overflow | 1889 CHECK_GT(desc.buffer_size, 0); // No overflow. |
1089 | 1890 |
1090 // Setup new buffer. | 1891 // Setup new buffer. |
1091 desc.buffer = NewArray<byte>(desc.buffer_size); | 1892 desc.buffer = NewArray<byte>(desc.buffer_size); |
1092 | 1893 |
1093 desc.instr_size = pc_offset(); | 1894 desc.instr_size = pc_offset(); |
1094 desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos(); | 1895 desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos(); |
1095 | 1896 |
1096 // Copy the data. | 1897 // Copy the data. |
1097 int pc_delta = desc.buffer - buffer_; | 1898 int pc_delta = desc.buffer - buffer_; |
1098 int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_); | 1899 int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_); |
1099 memmove(desc.buffer, buffer_, desc.instr_size); | 1900 memmove(desc.buffer, buffer_, desc.instr_size); |
1100 memmove(reloc_info_writer.pos() + rc_delta, | 1901 memmove(reloc_info_writer.pos() + rc_delta, |
1101 reloc_info_writer.pos(), desc.reloc_size); | 1902 reloc_info_writer.pos(), desc.reloc_size); |
1102 | 1903 |
1103 // Switch buffers. | 1904 // Switch buffers. |
1104 DeleteArray(buffer_); | 1905 DeleteArray(buffer_); |
1105 buffer_ = desc.buffer; | 1906 buffer_ = desc.buffer; |
1106 buffer_size_ = desc.buffer_size; | 1907 buffer_size_ = desc.buffer_size; |
1107 pc_ += pc_delta; | 1908 pc_ += pc_delta; |
1108 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta, | 1909 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta, |
1109 reloc_info_writer.last_pc() + pc_delta); | 1910 reloc_info_writer.last_pc() + pc_delta); |
1110 | 1911 |
1111 | |
1112 // On ia32 and ARM pc relative addressing is used, and we thus need to apply a | 1912 // On ia32 and ARM pc relative addressing is used, and we thus need to apply a |
1113 // shift by pc_delta. But on MIPS the target address it directly loaded, so | 1913 // shift by pc_delta. But on MIPS the target address it directly loaded, so |
1114 // we do not need to relocate here. | 1914 // we do not need to relocate here. |
1115 | 1915 |
1116 ASSERT(!overflow()); | 1916 ASSERT(!overflow()); |
1117 } | 1917 } |
1118 | 1918 |
1119 | 1919 |
| 1920 void Assembler::db(uint8_t data) { |
| 1921 CheckBuffer(); |
| 1922 *reinterpret_cast<uint8_t*>(pc_) = data; |
| 1923 pc_ += sizeof(uint8_t); |
| 1924 } |
| 1925 |
| 1926 |
| 1927 void Assembler::dd(uint32_t data) { |
| 1928 CheckBuffer(); |
| 1929 *reinterpret_cast<uint32_t*>(pc_) = data; |
| 1930 pc_ += sizeof(uint32_t); |
| 1931 } |
| 1932 |
| 1933 |
1120 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { | 1934 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { |
1121 RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants | 1935 RelocInfo rinfo(pc_, rmode, data); // We do not try to reuse pool constants. |
1122 if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::STATEMENT_POSITION) { | 1936 if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) { |
1123 // Adjust code for new modes. | 1937 // Adjust code for new modes. |
1124 ASSERT(RelocInfo::IsJSReturn(rmode) | 1938 ASSERT(RelocInfo::IsDebugBreakSlot(rmode) |
| 1939 || RelocInfo::IsJSReturn(rmode) |
1125 || RelocInfo::IsComment(rmode) | 1940 || RelocInfo::IsComment(rmode) |
1126 || RelocInfo::IsPosition(rmode)); | 1941 || RelocInfo::IsPosition(rmode)); |
1127 // These modes do not need an entry in the constant pool. | 1942 // These modes do not need an entry in the constant pool. |
1128 } | 1943 } |
1129 if (rinfo.rmode() != RelocInfo::NONE) { | 1944 if (rinfo.rmode() != RelocInfo::NONE) { |
1130 // Don't record external references unless the heap will be serialized. | 1945 // Don't record external references unless the heap will be serialized. |
1131 if (rmode == RelocInfo::EXTERNAL_REFERENCE && | 1946 if (rmode == RelocInfo::EXTERNAL_REFERENCE && |
1132 !Serializer::enabled() && | 1947 !Serializer::enabled() && |
1133 !FLAG_debug_code) { | 1948 !FLAG_debug_code) { |
1134 return; | 1949 return; |
1135 } | 1950 } |
1136 ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here | 1951 ASSERT(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here. |
1137 reloc_info_writer.Write(&rinfo); | 1952 reloc_info_writer.Write(&rinfo); |
1138 } | 1953 } |
1139 } | 1954 } |
1140 | 1955 |
1141 | 1956 |
| 1957 void Assembler::BlockTrampolinePoolFor(int instructions) { |
| 1958 BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize); |
| 1959 } |
| 1960 |
| 1961 |
| 1962 void Assembler::CheckTrampolinePool(bool force_emit) { |
| 1963 // Calculate the offset of the next check. |
| 1964 next_buffer_check_ = pc_offset() + kCheckConstInterval; |
| 1965 |
| 1966 int dist = pc_offset() - last_trampoline_pool_end_; |
| 1967 |
| 1968 if (dist <= kMaxDistBetweenPools && !force_emit) { |
| 1969 return; |
| 1970 } |
| 1971 |
| 1972 // Some small sequences of instructions must not be broken up by the |
| 1973 // insertion of a trampoline pool; such sequences are protected by setting |
| 1974 // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_, |
| 1975 // which are both checked here. Also, recursive calls to CheckTrampolinePool |
| 1976 // are blocked by trampoline_pool_blocked_nesting_. |
| 1977 if ((trampoline_pool_blocked_nesting_ > 0) || |
| 1978 (pc_offset() < no_trampoline_pool_before_)) { |
| 1979 // Emission is currently blocked; make sure we try again as soon as |
| 1980 // possible. |
| 1981 if (trampoline_pool_blocked_nesting_ > 0) { |
| 1982 next_buffer_check_ = pc_offset() + kInstrSize; |
| 1983 } else { |
| 1984 next_buffer_check_ = no_trampoline_pool_before_; |
| 1985 } |
| 1986 return; |
| 1987 } |
| 1988 |
| 1989 // First we emit jump (2 instructions), then we emit trampoline pool. |
| 1990 { BlockTrampolinePoolScope block_trampoline_pool(this); |
| 1991 Label after_pool; |
| 1992 b(&after_pool); |
| 1993 nop(); |
| 1994 |
| 1995 int pool_start = pc_offset(); |
| 1996 for (int i = 0; i < kSlotsPerTrampoline; i++) { |
| 1997 b(&after_pool); |
| 1998 nop(); |
| 1999 } |
| 2000 for (int i = 0; i < kLabelsPerTrampoline; i++) { |
| 2001 emit(0); |
| 2002 } |
| 2003 last_trampoline_pool_end_ = pc_offset() - kInstrSize; |
| 2004 bind(&after_pool); |
| 2005 trampolines_.Add(Trampoline(pool_start, |
| 2006 kSlotsPerTrampoline, |
| 2007 kLabelsPerTrampoline)); |
| 2008 |
| 2009 // Since a trampoline pool was just emitted, |
| 2010 // move the check offset forward by the standard interval. |
| 2011 next_buffer_check_ = last_trampoline_pool_end_ + kMaxDistBetweenPools; |
| 2012 } |
| 2013 return; |
| 2014 } |
| 2015 |
| 2016 |
1142 Address Assembler::target_address_at(Address pc) { | 2017 Address Assembler::target_address_at(Address pc) { |
1143 Instr instr1 = instr_at(pc); | 2018 Instr instr1 = instr_at(pc); |
1144 Instr instr2 = instr_at(pc + kInstrSize); | 2019 Instr instr2 = instr_at(pc + kInstrSize); |
1145 // Check we have 2 instructions generated by li. | 2020 // Check we have 2 instructions generated by li. |
1146 ASSERT(((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) || | 2021 ASSERT(((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) || |
1147 ((instr1 == nopInstr) && ((instr2 & kOpcodeMask) == ADDI || | 2022 ((instr1 == nopInstr) && ((instr2 & kOpcodeMask) == ADDI || |
1148 (instr2 & kOpcodeMask) == ORI || | 2023 (instr2 & kOpcodeMask) == ORI || |
1149 (instr2 & kOpcodeMask) == LUI))); | 2024 (instr2 & kOpcodeMask) == LUI))); |
1150 // Interpret these 2 instructions. | 2025 // Interpret these 2 instructions. |
1151 if (instr1 == nopInstr) { | 2026 if (instr1 == nopInstr) { |
1152 if ((instr2 & kOpcodeMask) == ADDI) { | 2027 if ((instr2 & kOpcodeMask) == ADDI) { |
1153 return reinterpret_cast<Address>(((instr2 & kImm16Mask) << 16) >> 16); | 2028 return reinterpret_cast<Address>(((instr2 & kImm16Mask) << 16) >> 16); |
1154 } else if ((instr2 & kOpcodeMask) == ORI) { | 2029 } else if ((instr2 & kOpcodeMask) == ORI) { |
1155 return reinterpret_cast<Address>(instr2 & kImm16Mask); | 2030 return reinterpret_cast<Address>(instr2 & kImm16Mask); |
1156 } else if ((instr2 & kOpcodeMask) == LUI) { | 2031 } else if ((instr2 & kOpcodeMask) == LUI) { |
1157 return reinterpret_cast<Address>((instr2 & kImm16Mask) << 16); | 2032 return reinterpret_cast<Address>((instr2 & kImm16Mask) << 16); |
1158 } | 2033 } |
1159 } else if ((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) { | 2034 } else if ((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) { |
1160 // 32 bits value. | 2035 // 32 bit value. |
1161 return reinterpret_cast<Address>( | 2036 return reinterpret_cast<Address>( |
1162 (instr1 & kImm16Mask) << 16 | (instr2 & kImm16Mask)); | 2037 (instr1 & kImm16Mask) << 16 | (instr2 & kImm16Mask)); |
1163 } | 2038 } |
1164 | 2039 |
1165 // We should never get here. | 2040 // We should never get here. |
1166 UNREACHABLE(); | 2041 UNREACHABLE(); |
1167 return (Address)0x0; | 2042 return (Address)0x0; |
1168 } | 2043 } |
1169 | 2044 |
1170 | 2045 |
1171 void Assembler::set_target_address_at(Address pc, Address target) { | 2046 void Assembler::set_target_address_at(Address pc, Address target) { |
1172 // On MIPS we need to patch the code to generate. | 2047 // On MIPS we need to patch the code to generate. |
1173 | 2048 |
1174 // First check we have a li. | 2049 // First check we have a li. |
1175 Instr instr2 = instr_at(pc + kInstrSize); | 2050 Instr instr2 = instr_at(pc + kInstrSize); |
1176 #ifdef DEBUG | 2051 #ifdef DEBUG |
1177 Instr instr1 = instr_at(pc); | 2052 Instr instr1 = instr_at(pc); |
1178 | 2053 |
1179 // Check we have indeed the result from a li with MustUseAt true. | 2054 // Check we have indeed the result from a li with MustUseReg true. |
1180 CHECK(((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) || | 2055 CHECK(((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) || |
1181 ((instr1 == 0) && ((instr2 & kOpcodeMask)== ADDIU || | 2056 ((instr1 == 0) && ((instr2 & kOpcodeMask)== ADDIU || |
1182 (instr2 & kOpcodeMask)== ORI || | 2057 (instr2 & kOpcodeMask)== ORI || |
1183 (instr2 & kOpcodeMask)== LUI))); | 2058 (instr2 & kOpcodeMask)== LUI))); |
1184 #endif | 2059 #endif |
1185 | 2060 |
1186 | |
1187 uint32_t rt_code = (instr2 & kRtFieldMask); | 2061 uint32_t rt_code = (instr2 & kRtFieldMask); |
1188 uint32_t* p = reinterpret_cast<uint32_t*>(pc); | 2062 uint32_t* p = reinterpret_cast<uint32_t*>(pc); |
1189 uint32_t itarget = reinterpret_cast<uint32_t>(target); | 2063 uint32_t itarget = reinterpret_cast<uint32_t>(target); |
1190 | 2064 |
1191 if (is_int16(itarget)) { | 2065 if (is_int16(itarget)) { |
1192 // nop | 2066 // nop. |
1193 // addiu rt zero_reg j | 2067 // addiu rt zero_reg j. |
1194 *p = nopInstr; | 2068 *p = nopInstr; |
1195 *(p+1) = ADDIU | rt_code | (itarget & LOMask); | 2069 *(p+1) = ADDIU | rt_code | (itarget & kImm16Mask); |
1196 } else if (!(itarget & HIMask)) { | 2070 } else if (!(itarget & kHiMask)) { |
1197 // nop | 2071 // nop. |
1198 // ori rt zero_reg j | 2072 // ori rt zero_reg j. |
1199 *p = nopInstr; | 2073 *p = nopInstr; |
1200 *(p+1) = ORI | rt_code | (itarget & LOMask); | 2074 *(p+1) = ORI | rt_code | (itarget & kImm16Mask); |
1201 } else if (!(itarget & LOMask)) { | 2075 } else if (!(itarget & kImm16Mask)) { |
1202 // nop | 2076 // nop. |
1203 // lui rt (HIMask & itarget)>>16 | 2077 // lui rt (kHiMask & itarget) >> kLuiShift. |
1204 *p = nopInstr; | 2078 *p = nopInstr; |
1205 *(p+1) = LUI | rt_code | ((itarget & HIMask)>>16); | 2079 *(p+1) = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift); |
1206 } else { | 2080 } else { |
1207 // lui rt (HIMask & itarget)>>16 | 2081 // lui rt (kHiMask & itarget) >> kLuiShift. |
1208 // ori rt rt, (LOMask & itarget) | 2082 // ori rt rt, (kImm16Mask & itarget). |
1209 *p = LUI | rt_code | ((itarget & HIMask)>>16); | 2083 *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift); |
1210 *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & LOMask); | 2084 *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask); |
1211 } | 2085 } |
1212 | 2086 |
1213 CPU::FlushICache(pc, 2 * sizeof(int32_t)); | 2087 CPU::FlushICache(pc, 2 * sizeof(int32_t)); |
1214 } | 2088 } |
1215 | 2089 |
1216 | 2090 |
1217 } } // namespace v8::internal | 2091 } } // namespace v8::internal |
1218 | 2092 |
1219 #endif // V8_TARGET_ARCH_MIPS | 2093 #endif // V8_TARGET_ARCH_MIPS |
OLD | NEW |