Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(541)

Side by Side Diff: src/sh4/assembler-sh4.h

Issue 11275184: First draft of the sh4 port Base URL: http://github.com/v8/v8.git@master
Patch Set: Use GYP and fixe some typos Created 8 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/regexp-macro-assembler-tracer.cc ('k') | src/sh4/assembler-sh4.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright 2011-2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 // A light-weight SH4 Assembler.
29
30 #ifndef V8_SH4_ASSEMBLER_SH4_H_
31 #define V8_SH4_ASSEMBLER_SH4_H_
32
33 #include "assembler.h"
34 #include "serialize.h"
35
36 namespace v8 {
37 namespace internal {
38
39 // CPU Registers.
40 //
41 // 1) We would prefer to use an enum, but enum values are assignment-
42 // compatible with int, which has caused code-generation bugs.
43 //
44 // 2) We would prefer to use a class instead of a struct but we don't like
45 // the register initialization to depend on the particular initialization
46 // order (which appears to be different on OS X, Linux, and Windows for the
47 // installed versions of C++ we tried). Using a struct permits C-style
48 // "initialization". Also, the Register objects cannot be const as this
49 // forces initialization stubs in MSVC, making us dependent on initialization
50 // order.
51 //
52 // 3) By not using an enum, we are possibly preventing the compiler from
53 // doing certain constant folds, which may significantly reduce the
54 // code generated for some assembly instructions (because they boil down
55 // to a few constants). If this is a problem, we could change the code
56 // such that we use an enum in optimized mode, and the struct in debug
57 // mode. This way we get the compile-time error checking in debug mode
58 // and best performance in optimized code.
59
60 // Core register
61 struct Register {
62 static const int kNumRegisters = 16;
63 static const int kNumAllocatableRegisters = 8;
64 static const int kSizeInBytes = 4;
65
66 static int ToAllocationIndex(Register reg) {
67 ASSERT(reg.code() < kNumAllocatableRegisters);
68 return reg.code();
69 }
70
71 static Register FromAllocationIndex(int index) {
72 ASSERT(index >= 0 && index < kNumAllocatableRegisters);
73 return from_code(index);
74 }
75
76 static const char* AllocationIndexToString(int index) {
77 ASSERT(index >= 0 && index < kNumAllocatableRegisters);
78 const char* const names[] = {
79 "r0",
80 "r1",
81 "r2",
82 "r3",
83 "r4",
84 "r5",
85 "r6",
86 "r7",
87 };
88 return names[index];
89 }
90
91 static Register from_code(int code) {
92 Register r = { code };
93 return r;
94 }
95
96 bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
97 bool is(Register reg) const { return code_ == reg.code_; }
98 int code() const {
99 ASSERT(is_valid());
100 return code_;
101 }
102 int bit() const {
103 ASSERT(is_valid());
104 return 1 << code_;
105 }
106
107 void set_code(int code) {
108 code_ = code;
109 ASSERT(is_valid());
110 }
111
112 // Unfortunately we can't make this private in a struct.
113 int code_;
114 };
115
116
117 #define REGISTER(N, C) \
118 const int kRegister_ ## N ## _Code = C; \
119 const Register N = { C }
120
121 REGISTER(no_reg, -1);
122 REGISTER(r0, 0); // ABI caller saved/return, idem for JIT
123 REGISTER(r1, 1); // ABI caller saved/return, idem for JIT
124 REGISTER(r2, 2); // ABI caller saved/return, idem for JIT
125 REGISTER(r3, 3); // ABI caller saved/return, idem for JIT
126 REGISTER(r4, 4); // ABI caller saved/param, idem for JIT
127 REGISTER(r5, 5); // ABI caller saved/param, idem for JIT
128 REGISTER(r6, 6); // ABI caller saved/param, idem for JIT
129 REGISTER(r7, 7); // ABI caller saved/param, idem for JIT
130 REGISTER(r8, 8); // ABI callee saved, idem for JIT
131 REGISTER(r9, 9); // ABI callee saved, idem for JIT
132 REGISTER(r10, 10); // ABI callee saved, idem for JIT
133 REGISTER(r11, 11); // ABI callee saved, idem for JIT
134 REGISTER(roots, 12); // ABI GP, root table pointer for JIT
135 REGISTER(cp, 13); // ABI callee saved, context pointer for JIT
136 REGISTER(fp, 14); // ABI FP, idem for JIT
137 REGISTER(sp, 15); // ABI SP, idem for JIT
138
139 const Register pr = { -2 }; // Link register
140
141 const Register sh4_r0 = r0;
142 const Register sh4_r1 = r1;
143 const Register sh4_r2 = r2;
144 const Register sh4_r3 = r3;
145 const Register sh4_r4 = r4;
146 const Register sh4_r5 = r5;
147 const Register sh4_r6 = r6;
148 const Register sh4_r7 = r7;
149 const Register sh4_r8 = r8;
150 const Register sh4_r9 = r9;
151 const Register sh4_r10 = r10;
152 const Register sh4_r11 = r11;
153 const Register sh4_r12 = roots;
154 const Register sh4_r13 = cp;
155 const Register sh4_rtmp = r11; // Used for low level (assembler-sh4.cc)
156 const Register sh4_ip = r10; // Used as additional scratch in JS code
157
158
159 // Single word VFP register.
160 struct SwVfpRegister {
161 bool is_valid() const { return 0 <= code_ && code_ < 16; }
162 bool is(SwVfpRegister reg) const { return code_ == reg.code_; }
163 int code() const {
164 ASSERT(is_valid());
165 return code_;
166 }
167 int bit() const {
168 ASSERT(is_valid());
169 return 1 << code_;
170 }
171 static SwVfpRegister from_code(int code) {
172 SwVfpRegister r = { code };
173 return r;
174 }
175 void split_code(int* vm, int* m) const {
176 ASSERT(is_valid());
177 *m = code_ & 0x1;
178 *vm = code_ >> 1;
179 }
180
181 int code_;
182 };
183
184
185 // Double word VFP register.
186 struct DwVfpRegister {
187 static const int kNumRegisters = 8;
188 static const int kNumAllocatableRegisters = 8;
189
190 static int ToAllocationIndex(DwVfpRegister reg) {
191 ASSERT(reg.code() != 0);
192 return reg.code();
193 }
194
195 static DwVfpRegister FromAllocationIndex(int index) {
196 ASSERT(index >= 0 && index < kNumAllocatableRegisters);
197 return from_code(index);
198 }
199
200 static const char* AllocationIndexToString(int index) {
201 ASSERT(index >= 0 && index < kNumAllocatableRegisters);
202 const char* const names[] = {
203 "dr0",
204 "dr2",
205 "dr4",
206 "dr6",
207 "dr8",
208 "dr10",
209 "dr12",
210 "dr14",
211 };
212 return names[index];
213 }
214
215 static DwVfpRegister from_code(int code) {
216 DwVfpRegister r = { code };
217 return r;
218 }
219
220 // Supporting dr0 to dr8
221 bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters * 2 - 1; }
222 bool is(DwVfpRegister reg) const { return code_ == reg.code_; }
223 SwVfpRegister low() const {
224 SwVfpRegister reg;
225 reg.code_ = code_;
226
227 ASSERT(reg.is_valid());
228 return reg;
229 }
230 SwVfpRegister high() const {
231 SwVfpRegister reg;
232 reg.code_ = code_ + 1;
233
234 ASSERT(reg.is_valid());
235 return reg;
236 }
237 int code() const {
238 ASSERT(is_valid());
239 return code_;
240 }
241 int bit() const {
242 ASSERT(is_valid());
243 return 1 << code_;
244 }
245 void split_code(int* vm, int* m) const {
246 ASSERT(is_valid());
247 *m = (code_ & 0x10) >> 4;
248 *vm = code_ & 0x0F;
249 }
250
251 int code_;
252 };
253
254 typedef DwVfpRegister DoubleRegister;
255
256 // Support for the VFP registers fr0 to fr15 (dr0 to dr7).
257 // Note that "fr(N):fr(N+1)" is the same as "dr(N)".
258 const SwVfpRegister no_freg = { -1 };
259 const SwVfpRegister fr0 = { 0 };
260 const SwVfpRegister fr1 = { 1 };
261 const SwVfpRegister fr2 = { 2 };
262 const SwVfpRegister fr3 = { 3 };
263 const SwVfpRegister fr4 = { 4 };
264 const SwVfpRegister fr5 = { 5 };
265 const SwVfpRegister fr6 = { 6 };
266 const SwVfpRegister fr7 = { 7 };
267 const SwVfpRegister fr8 = { 8 };
268 const SwVfpRegister fr9 = { 9 };
269 const SwVfpRegister fr10 = { 10 };
270 const SwVfpRegister fr11 = { 11 };
271 // Callee saved registers
272 // Using these registers is forbidden for the moment as we do not save/restaure
273 // them on ABI frontiers.
274 // const SwVfpRegister fr12 = { 12 };
275 // const SwVfpRegister fr13 = { 13 };
276 // const SwVfpRegister fr14 = { 14 };
277 // const SwVfpRegister fr15 = { 15 };
278
279
280 // Caller saved registers
281 const DwVfpRegister no_dreg = { -1 };
282 const DwVfpRegister dr0 = { 0 };
283 const DwVfpRegister dr2 = { 2 };
284 const DwVfpRegister dr4 = { 4 };
285 const DwVfpRegister dr6 = { 6 };
286 const DwVfpRegister dr8 = { 8 };
287 const DwVfpRegister dr10 = { 10 };
288 // Callee saved registers.
289 // The uses of theses registers is forbidden for the moment as we do not
290 // save/restaure them on ABI frontiers.
291 // const DwVfpRegister dr12 = { 12 };
292 // const DwVfpRegister dr14 = { 14 };
293
294 enum Condition {
295 // any value < 0 is considered no_condition
296 kNoCondition = -1,
297
298 eq = 0 << 28, // equal
299 ne = 1 << 28, // not equal
300 gt = 2 << 28, // greater
301 ge = 3 << 28, // greater or equal
302 hi = 4 << 28, // unsigned higher
303 hs = 5 << 28, // unsigned higher or equal
304 lt = 6 << 28, // lesser
305 le = 7 << 28, // lesser or equal
306 ui = 8 << 28, // unsigned lower
307 us = 9 << 28, // unsigned lower or equal
308 pl = 10 << 28, // positiv
309 pz = 11 << 28, // positiv or null
310 ql = 12 << 28, // negativ
311 qz = 13 << 28, // negativ or null
312 al = 14 << 28, // Always
313
314 // Aliases
315 t = eq, // cmp eq; if SH4 cmpeq/cmp sets the T bit, t == eq
316 f = ne // cmp ne: if SH4 cmpeq/cmp clears the T bit, f == ne
317 };
318
319 enum AddrMode {
320 PreIndex,
321 PostIndex,
322 Offset
323 };
324
325
326 // We use the cause fields bcause they are set to 1 or 0 depending on the
327 // action. So they don't need to be reseted but they mist be use immediately
328 static const uint32_t kFPUExceptionMask = 0x1f << 12;
329 static const uint32_t kFPUInexactExceptionBit = 1 << 12;
330 static const uint32_t kFPUUnderflowExceptionBit = 1 << 13;
331 static const uint32_t kFPUOverflowExceptionBit = 1 << 14;
332 static const uint32_t kFPUDividezeroExceptionBit = 1 << 15;
333 static const uint32_t kFPUInvalidExceptionBit = 1 << 16;
334
335 // FPU rounding modes.
336 enum FPURoundingMode {
337 RN = 0, // Round to Nearest.
338 RZ = 1, // Round towards zero.
339
340 // Aliases.
341 kRoundToNearest = RN,
342 kRoundToZero = RZ
343 };
344
345 static const uint32_t kFPURoundingModeMask = 1;
346
347 enum CheckForInexactConversion {
348 kCheckForInexactConversion,
349 kDontCheckForInexactConversion
350 };
351
352
353 // Returns the equivalent of !cc.
354 // Negation of the default no_condition (-1) results in a non-default
355 // no_condition value (-2). As long as tests for no_condition check
356 // for condition < 0, this will work as expected.
357 inline Condition NegateCondition(Condition cc) {
358 switch (cc) {
359 case eq:
360 return ne;
361 case ne:
362 return eq;
363 case gt:
364 return le;
365 case ge:
366 return lt;
367 case hi:
368 return us;
369 case hs:
370 return ui;
371 case lt:
372 return ge;
373 case le:
374 return gt;
375 case ui:
376 return hs;
377 case us:
378 return hi;
379 case pl:
380 return qz;
381 case pz:
382 return ql;
383 case ql:
384 return pz;
385 case qz:
386 return pl;
387 default:
388 return cc;
389 }
390 }
391
392
393 // Corresponds to transposing the operands of a comparison.
394 inline Condition ReverseCondition(Condition cc) {
395 UNIMPLEMENTED();
396 return ne;
397 }
398
399
400 // -----------------------------------------------------------------------------
401 // Machine instruction Operands
402
403 enum ScaleFactor {
404 times_1 = 0,
405 times_2 = 1,
406 times_4 = 2,
407 times_8 = 3,
408 times_int_size = times_4,
409 times_half_pointer_size = times_2,
410 times_pointer_size = times_4,
411 times_twice_pointer_size = times_8
412 };
413
414
415 class Operand BASE_EMBEDDED {
416 public:
417 inline explicit Operand(int32_t immediate,
418 RelocInfo::Mode rmode = RelocInfo::NONE);
419 inline explicit Operand(const ExternalReference& f);
420 inline explicit Operand(Smi* value);
421 inline static Operand Zero() {
422 return Operand(static_cast<int32_t>(0));
423 }
424 explicit Operand(Handle<Object> handle);
425
426 bool is_int8() const {
427 return -128 <= imm32_ && imm32_ < 128 && rmode_ == RelocInfo::NONE;
428 }
429
430 private:
431 int32_t imm32_;
432 RelocInfo::Mode rmode_;
433
434 friend class Assembler;
435 };
436
437
438 class MemOperand BASE_EMBEDDED {
439 public:
440 INLINE(explicit MemOperand(Register Rd, int32_t offset = 0,
441 AddrMode mode = Offset));
442 INLINE(explicit MemOperand(Register Rd, Register offset));
443
444 void set_offset(int32_t offset) {
445 ASSERT(rn_.is(no_reg));
446 offset_ = offset;
447 }
448
449 uint32_t offset() const {
450 ASSERT(rn_.is(no_reg));
451 return offset_;
452 }
453
454 Register rn() const { return rn_; }
455 Register rm() const { return rm_; }
456
457 private:
458 Register rm_;
459 Register rn_;
460 int32_t offset_;
461 AddrMode mode_;
462
463 friend class Assembler;
464 };
465
466 class CpuFeatures : public AllStatic {
467 public:
468 // Detect features of the target CPU. Set safe defaults if the serializer
469 // is enabled (snapshots must be portable).
470 static void Probe();
471
472 // Check whether a feature is supported by the target CPU.
473 static bool IsSupported(CpuFeature f) {
474 ASSERT(initialized_);
475 if (f == FPU && !FLAG_enable_fpu) return false;
476 return (supported_ & (1u << f)) != 0;
477 }
478
479 #ifdef DEBUG
480 // Check whether a feature is currently enabled.
481 static bool IsEnabled(CpuFeature f) {
482 ASSERT(initialized_);
483 Isolate* isolate = Isolate::UncheckedCurrent();
484 if (isolate == NULL) {
485 // When no isolate is available, work as if we're running in
486 // release mode.
487 return IsSupported(f);
488 }
489 unsigned enabled = static_cast<unsigned>(isolate->enabled_cpu_features());
490 return (enabled & (1u << f)) != 0;
491 }
492 #endif
493
494 // Enable a specified feature within a scope.
495 class Scope BASE_EMBEDDED {
496 #ifdef DEBUG
497
498 public:
499 explicit Scope(CpuFeature f) {
500 unsigned mask = 1u << f;
501 ASSERT(CpuFeatures::IsSupported(f));
502 ASSERT(!Serializer::enabled() ||
503 (CpuFeatures::found_by_runtime_probing_ & mask) == 0);
504 isolate_ = Isolate::UncheckedCurrent();
505 old_enabled_ = 0;
506 if (isolate_ != NULL) {
507 old_enabled_ = static_cast<unsigned>(isolate_->enabled_cpu_features());
508 isolate_->set_enabled_cpu_features(old_enabled_ | mask);
509 }
510 }
511 ~Scope() {
512 ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_);
513 if (isolate_ != NULL) {
514 isolate_->set_enabled_cpu_features(old_enabled_);
515 }
516 }
517
518 private:
519 Isolate* isolate_;
520 unsigned old_enabled_;
521 #else
522
523 public:
524 explicit Scope(CpuFeature f) {}
525 #endif
526 };
527
528 class TryForceFeatureScope BASE_EMBEDDED {
529 public:
530 explicit TryForceFeatureScope(CpuFeature f)
531 : old_supported_(CpuFeatures::supported_) {
532 if (CanForce()) {
533 CpuFeatures::supported_ |= (1u << f);
534 }
535 }
536
537 ~TryForceFeatureScope() {
538 if (CanForce()) {
539 CpuFeatures::supported_ = old_supported_;
540 }
541 }
542
543 private:
544 static bool CanForce() {
545 // It's only safe to temporarily force support of CPU features
546 // when there's only a single isolate, which is guaranteed when
547 // the serializer is enabled.
548 return Serializer::enabled();
549 }
550
551 const unsigned old_supported_;
552 };
553
554 private:
555 #ifdef DEBUG
556 static bool initialized_;
557 #endif
558 static unsigned supported_;
559 static unsigned found_by_runtime_probing_;
560
561 DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
562 };
563
564
565 typedef uint16_t Instr;
566
567 // Use a target specfic value instead of kZapValue
568 const int kSH4ZapValue = 0xbadbaffe;
569
570
571 class Assembler : public AssemblerBase {
572 public:
573 // Create an assembler. Instructions and relocation information are emitted
574 // into a buffer, with the instructions starting from the beginning and the
575 // relocation information starting from the end of the buffer. See CodeDesc
576 // for a detailed comment on the layout (globals.h).
577 //
578 // If the provided buffer is NULL, the assembler allocates and grows its own
579 // buffer, and buffer_size determines the initial buffer size. The buffer is
580 // owned by the assembler and deallocated upon destruction of the assembler.
581 //
582 // If the provided buffer is not NULL, the assembler uses the provided buffer
583 // for code generation and assumes its size to be buffer_size. If the buffer
584 // is too small, a fatal error occurs. No deallocation of the buffer is done
585 // upon destruction of the assembler.
586 Assembler(Isolate* isolate, void* buffer, int buffer_size);
587 ~Assembler();
588
589 // Overrides the default provided by FLAG_debug_code.
590 void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
591
592 // Avoids using instructions that vary in size in unpredictable ways between
593 // the snapshot and the running VM. This is needed by the full compiler so
594 // that it can recompile code with debug support and fix the PC.
595 void set_predictable_code_size(bool value) { predictable_code_size_ = value; }
596
597 // GetCode emits any pending (non-emitted) code and fills the descriptor
598 // desc. GetCode() is idempotent; it returns the same result if no other
599 // Assembler functions are invoked in between GetCode() calls.
600 void GetCode(CodeDesc* desc);
601
602 // Label operations & relative jumps (PPUM Appendix D)
603 //
604 // Takes a branch opcode (cc) and a label (L) and generates
605 // either a backward branch or a forward branch and links it
606 // to the label fixup chain. Usage:
607 //
608 // Label L; // unbound label
609 // j(cc, &L); // forward branch to unbound label
610 // bind(&L); // bind label to the current pc
611 // j(cc, &L); // backward branch to bound label
612 // bind(&L); // illegal: a label may be bound only once
613 //
614 // Note: The same Label can be used for forward and backward branches
615 // but it may be bound only once.
616
617 // binds an unbound label L to the current code position
618 void bind(Label* L);
619
620 // Puts a labels target address at the given position.
621 // This function is only used with not-bound labels
622 void load_label(Label* L);
623
624 // Return the address in the constant pool of the code target address used by
625 // the branch/call instruction at pc, or the object in a mov.
626 INLINE(static Address target_pointer_address_at(Address pc));
627
628 // Read/Modify the pointer in the branch/call/move instruction at pc.
629 INLINE(static Address target_pointer_at(Address pc));
630 INLINE(static void set_target_pointer_at(Address pc, Address target));
631
632 // Read/Modify the code target address in the branch/call instruction at pc.
633 INLINE(static Address target_address_at(Address pc));
634 INLINE(static void set_target_address_at(Address pc, Address target));
635
636 // Return the code target address at a call site from the return address
637 // of that call in the instruction stream.
638 INLINE(static Address target_address_from_return_address(Address pc));
639
640 // Given the address of the beginning of a call, return the address
641 // in the instruction stream that the call will return from.
642 INLINE(static Address return_address_from_call_start(Address pc));
643
644 // This sets the branch destination (which is in the constant pool on ARM).
645 // This is for calls and branches within generated code.
646 inline static void deserialization_set_special_target_at(
647 Address constant_pool_entry, Address target) {
648 // When serializing, the object visitor resolves the target_address_address
649 // and stops processing there to recursively serialize another object (or
650 // a reference to it).
651 // Thus when deserializing, the rewriting of targets directly uses the
652 // constant pool address. (same as on ARM)
653 Memory::Address_at(constant_pool_entry) = target;
654 }
655
656 // This sets the branch destination (which is in the constant pool on SH4).
657 // This is for calls and branches to runtime code.
658 inline static void set_external_target_at(Address constant_pool_entry,
659 Address target) {
660 // same as above, this function is currently not used anywhere.
661 UNREACHABLE();
662 deserialization_set_special_target_at(constant_pool_entry, target);
663 }
664
665 // Here we are patching the address in the constant pool, not the actual call
666 // instruction. The address in the constant pool is the same size as a
667 // pointer.
668 static const int kSpecialTargetSize = kPointerSize;
669
670 // Size of an instruction.
671 static const int kInstrSize = sizeof(Instr);
672
673 // Distance between the instruction referring to the address of the call
674 // target and the return address.
675 // The call sequence is:
676 // mov.l const_pool, rx @ call sequence start address
677 // nop
678 // bra skip
679 // nop
680 // const_pool:
681 // .long call_address
682 // skip:
683 // jsr rx
684 // nop
685 // ... @ return address (put in pr by the jsr)
686 static const int kCallTargetAddressOffset = 2 * kInstrSize + 4 +
687 4 * kInstrSize;
688
689 // Distance between start of patched return sequence and the emitted address
690 // to jump to.
691 static const int kPatchReturnSequenceAddressOffset = 0 * kInstrSize;
692
693 // Distance between start of patched debug break slot and the emitted address
694 // to jump to.
695 static const int kPatchDebugBreakSlotAddressOffset = 0 * kInstrSize;
696
697 // The debug break slot must be able to contain a call instruction.
698 static const int kDebugBreakSlotLength = kInstrSize;
699
700
701 // branch type
702 enum branch_type {
703 branch_true = 1 << 1,
704 branch_false = 1 << 2,
705 branch_unconditional = 1 << 3,
706 branch_subroutine = 1 << 4
707 };
708
709 static const RegList kAllRegisters = 0xffffffff;
710
711 // Negative pc_offset value (aligned value)
712 static const int kEndOfChain = -4;
713
714
715 // ---------------------------------------------------------------------------
716 // Wrappers around the code generators
717 void add(Register Rd, const Operand& imm, Register rtmp = sh4_rtmp);
718 void add(Register Rd, Register Rs, const Operand& imm,
719 Register rtmp = sh4_rtmp);
720 void add(Register Rd, Register Rs, Register Rt);
721
722 void bt(Label* L, Register rtmp = sh4_rtmp)
723 { ASSERT(!L->is_near_linked());
724 branch(L, rtmp, branch_true); }
725 void bt_near(Label* L, Register rtmp = sh4_rtmp)
726 { ASSERT(L->is_bound() || L->is_unused() || L->is_near_linked());
727 branch(L, rtmp, branch_true, Label::kNear); }
728
729 void bf(Label* L, Register rtmp = sh4_rtmp)
730 { ASSERT(!L->is_near_linked());
731 branch(L, rtmp, branch_false); }
732 void bf_near(Label* L, Register rtmp = sh4_rtmp)
733 { ASSERT(L->is_bound() || L->is_unused() || L->is_near_linked());
734 branch(L, rtmp, branch_false, Label::kNear); }
735
736 void jmp(Label* L, Register rtmp = sh4_rtmp)
737 { ASSERT(!L->is_near_linked());
738 branch(L, rtmp, branch_unconditional); }
739 void jmp_near(Label* L, Register rtmp = sh4_rtmp)
740 { ASSERT(L->is_bound() || L->is_unused() || L->is_near_linked());
741 branch(L, rtmp, branch_unconditional, Label::kNear); }
742
743 void b(Label* L, Register rtmp = sh4_rtmp)
744 { jmp(L, rtmp); }
745 void b_near(Label* L, Register rtmp = sh4_rtmp)
746 { jmp_near(L, rtmp); }
747
748 void b(Condition cond, Label* L, Label::Distance distance = Label::kFar,
749 Register rtmp = sh4_rtmp) {
750 ASSERT((distance == Label::kNear &&
751 (L->is_bound() || L->is_unused() || L->is_near_linked())) ||
752 (distance == Label::kFar && (!L->is_near_linked())));
753 ASSERT(cond == ne || cond == eq);
754 branch(L, rtmp, cond == eq ? branch_true: branch_false, distance);
755 }
756
757 void jsr(Label* L, Register rtmp = sh4_rtmp)
758 { branch(L, rtmp, branch_subroutine); }
759
760
761 // Check the code size generated from label to here.
762 int SizeOfCodeGeneratedSince(Label* label) {
763 return pc_offset() - label->pos();
764 }
765
766 // Check the number of instructions generated from label to here.
767 int InstructionsGeneratedSince(Label* label) {
768 return SizeOfCodeGeneratedSince(label) / kInstrSize;
769 }
770
771 void jmp(Register Rd);
772 void jsr(Register Rd);
773 void jmp(Handle<Code> code, RelocInfo::Mode rmode, Register rtmp = sh4_rtmp);
774 void jsr(Handle<Code> code, RelocInfo::Mode rmode, Register rtmp = sh4_rtmp);
775
776 void cmpeq(Register Rd, Register Rs) { ASSERT(!Rs.is(Rd));
777 cmpeq_(Rs, Rd); }
778 void cmpgt(Register Rd, Register Rs) { ASSERT(!Rs.is(Rd));
779 cmpgt_(Rs, Rd); } // is Rd > Rs ?
780 void cmpge(Register Rd, Register Rs) { ASSERT(!Rs.is(Rd));
781 cmpge_(Rs, Rd); } // is Rd >= Rs ?
782 void cmphi(Register Rd, Register Rs) { ASSERT(!Rs.is(Rd));
783 cmphi_(Rs, Rd); } // is Rd u> Rs ?
784 void cmphs(Register Rd, Register Rs) { ASSERT(!Rs.is(Rd));
785 cmphs_(Rs, Rd); } // is Rd u>= Rs ?
786
787 inline void cmpeq(Register Rd, const Operand& imm,
788 Register rtmp = sh4_rtmp);
789 inline void cmpgt(Register Rd, const Operand& imm,
790 Register rtmp = sh4_rtmp);
791 inline void cmpge(Register Rd, const Operand& imm,
792 Register rtmp = sh4_rtmp);
793 inline void cmphi(Register Rd, const Operand& imm,
794 Register rtmp = sh4_rtmp);
795 inline void cmphs(Register Rd, const Operand& imm,
796 Register rtmp = sh4_rtmp);
797
798 // ALiases for cmpeq
799 void cmp(Register Rd, Register Rs) { cmpeq_(Rs, Rd); }
800 void cmp(Register Rd, const Operand& imm, Register rtmp = sh4_rtmp)
801 { cmpeq(Rd, imm, rtmp); }
802
803 inline void cmp(Condition *cond, Register Rd, Register Rs);
804 void cmp(Condition *cond, Register Rd, const Operand& imm,
805 Register rtmp = sh4_rtmp) {
806 mov(rtmp, imm);
807 cmp(cond, Rd, rtmp);
808 }
809
810 void cmpeq_r0_unsigned_imm(int imm) {
811 ASSERT(is_uint8(imm));
812 cmpeq_imm_R0_((int8_t)imm); }
813 bool fits_cmp_unsigned_imm(int imm) { return is_uint8(imm); }
814
815 void dt(Register Rd) { dt_(Rd); }
816
817 // FPU support
818 // Load float
819 void fldr(SwVfpRegister dst, const MemOperand& src, Register rtmp = sh4_rtmp);
820 // Load double
821 void dldr(DwVfpRegister dst, const MemOperand& src, Register rtmp = sh4_rtmp);
822 // Store float
823 void fstr(SwVfpRegister src, const MemOperand& dst, Register rtmp = sh4_rtmp);
824 // Store double
825 void dstr(DwVfpRegister src, const MemOperand& dst, Register rtmp = sh4_rtmp);
826
827 // Double conversion from register: Dd = (double)Rs
828 void dfloat(DwVfpRegister Dd, Register Rs);
829 // Double conversion from int operand: Dd = (double)imm
830 void dfloat(DwVfpRegister Dd, const Operand &imm, Register rtmp = sh4_rtmp);
831
832 // Double conversion from unsigned int register: Dd = (double)Rs(unsigned)
833 void dufloat(DwVfpRegister Dd, Register Rs, DwVfpRegister drtmp,
834 Register rtmp);
835
836 // Interger conversion from double: Rs = (int)Dd
837 void idouble(Register Rd, DwVfpRegister Ds, Register fpscr = no_reg);
838 // Interger conversion from dingle: Rs = (int)Frs
839 void isingle(Register Rd, SwVfpRegister Frs);
840
841 // Conversion from simple to double
842 void fcnvsd(DwVfpRegister Dd, SwVfpRegister Fs) { flds_FPUL_(Fs);
843 fcnvsd_FPUL_double_(Dd); }
844 // Conversion from double to simple
845 void fcnvds(SwVfpRegister Fd, DwVfpRegister Ds) { fcnvds_double_FPUL_(Ds);
846 fsts_FPUL_(Fd); }
847
848 // Double comparisons
849 void dcmpeq(DwVfpRegister Dd, DwVfpRegister Ds) { fcmpeq_double_(Ds, Dd); }
850 void dcmpgt(DwVfpRegister Dd, DwVfpRegister Ds) { fcmpgt_double_(Ds, Dd); }
851
852 // FPU operations
853 void fadd(DwVfpRegister Dd, DwVfpRegister Ds) { fadd_double_(Ds, Dd); }
854 void fsub(DwVfpRegister Dd, DwVfpRegister Ds) { fsub_double_(Ds, Dd); }
855 void fmul(DwVfpRegister Dd, DwVfpRegister Ds) { fmul_double_(Ds, Dd); }
856 void fdiv(DwVfpRegister Dd, DwVfpRegister Ds) { fdiv_double_(Ds, Dd); }
857
858 // Read/patch instructions
859 static Instr instr_at(byte* pc)
860 { return *reinterpret_cast<Instr*>(pc); }
861 static void instr_at_put(byte* pc, Instr instr)
862 { *reinterpret_cast<Instr*>(pc) = instr; }
863 static Condition GetCondition(Instr instr);
864 static bool IsBranch(Instr instr);
865 static Register GetRn(Instr instr);
866 static Register GetRm(Instr instr);
867 static bool IsCmpRegister(Instr instr);
868 static bool IsCmpImmediate(Instr instr);
869 static Register GetCmpImmediateRegister(Instr instr);
870 static int GetCmpImmediateAsUnsigned(Instr instr);
871 static bool IsMovImmediate(Instr instr);
872 static bool IsMovlPcRelative(Instr instr)
873 { return (instr & (0xf << 12)) == 0xd000; }
874
875 void sub(Register Rd, Register Rs, const Operand& imm,
876 Register rtmp = sh4_rtmp);
877 void sub(Register Rd, Register Rs, Register Rt);
878
879 // Reverse sub: imm - Rs
880 inline void rsb(Register Rd, Register Rs, const Operand& imm,
881 Register rtmp = sh4_rtmp);
882 // Reverse sub: Rt - Rs
883 inline void rsb(Register Rd, Register Rs, Register Rt);
884 inline void rsb(Register Rd, Register Rs, const Operand& imm,
885 Condition cond, Register rtmp = sh4_rtmp);
886
887 void addv(Register Rd, Register Rs, Register Rt);
888 void addv(Register Rd, Register Rs, const Operand& imm,
889 Register rtmp = sh4_rtmp);
890 void subv(Register Rd, Register Rs, Register Rt, Register rtmp = sh4_rtmp);
891
892 void addc(Register Rd, Register Rs, Register Rt);
893 void subc(Register Rd, Register Rs, Register Rt, Register rtmp = sh4_rtmp);
894
895 // Note for shifts with shift amount in register:
896 // the default behavior is mapped on ARM behavior which flushes when shift
897 // amount is >= 32. The implementation is quite slow in this case as SH4
898 // shifts do not flush.
899 // In case where the register shift amount is known to be in range [0,31] one
900 // can call the shift methods with in_range parameter set to true. This case
901 // generates faster code.
902 // In addition, in the case of lsl (but not asr nor lsr) the semantic is to
903 // wrap on SH4 (i.e. the least 5 bits are extracted before doing the shift),
904 // thus in this case the boolean parameter is called wrap and this semantic
905 // can be used on purpose whatever the shift amount.
906
907 // arithmetic shift right
908 void asr(Register Rd, Register Rs, Register Rt, bool in_range = false,
909 Register rtmp = sh4_rtmp);
910 void asr(Register Rd, Register Rs, const Operand& imm,
911 Register rtmp = sh4_rtmp);
912 // arithmetic shift left
913 void asl(Register Rd, Register Rs, const Operand& imm,
914 Register rtmp = sh4_rtmp);
915
916 void lsl(Register Rd, Register Rs, const Operand& imm,
917 Register rtmp = sh4_rtmp);
918 void lsl(Register Rd, Register Rs, Register Rt, bool wrap = false,
919 Register rtmp = sh4_rtmp);
920 void lsr(Register Rd, Register Rs, const Operand& imm,
921 Register rtmp = sh4_rtmp);
922 void lsr(Register Rd, Register Rs, Register Rt, bool in_range = false,
923 Register rtmp = sh4_rtmp);
924
925 void land(Register Rd, Register Rs, const Operand& imm,
926 Register rtmp = sh4_rtmp);
927 void land(Register Rd, Register Rs, Register Rt);
928
929 // bit clear
930 void bic(Register Rd, Register Rs, const Operand& imm,
931 Register rtmp = sh4_rtmp)
932 { land(Rd, Rs, Operand(~imm.imm32_), rtmp); }
933 void bic(Register Rd, Register Rs, Register Rt, Register rtmp = sh4_rtmp) {
934 lnot(rtmp, Rt);
935 land(Rd, Rs, rtmp);
936 }
937
938 void lnot(Register Rd, Register Rs) { not_(Rs, Rd); }
939 void mvn(Register Rd, Register Rs) { lnot(Rd, Rs); } // Alias for lnot()
940
941 void lor(Register Rd, Register Rs, const Operand& imm,
942 Register rtmp = sh4_rtmp);
943 void lor(Register Rd, Register Rs, Register Rt);
944 void lor(Register Rd, Register Rs, const Operand& imm, Condition cond,
945 Register rtmp = sh4_rtmp);
946 void lor(Register Rd, Register Rs, Register Rt, Condition cond);
947
948 void lxor(Register Rd, Register Rs, const Operand& imm,
949 Register rtmp = sh4_rtmp);
950 void lxor(Register Rd, Register Rs, Register Rt);
951
952 // Aliases for lxor
953 void eor(Register Rd, Register Rs, const Operand& imm,
954 Register rtmp = sh4_rtmp) { lxor(Rd, Rs, imm, rtmp); }
955 void eor(Register Rd, Register Rs, Register Rt) { lxor(Rd, Rs, Rt); }
956
957 // Aliases for lor
958 void orr(Register Rd, Register Rs, const Operand& imm,
959 Register rtmp = sh4_rtmp) { lor(Rd, Rs, imm, rtmp); }
960 void orr(Register Rd, Register Rs, Register Rt) { lor(Rd, Rs, Rt); }
961 void orr(Register Rd, Register Rs, const Operand& imm,
962 Condition cond, Register rtmp = sh4_rtmp)
963 { lor(Rd, Rs, imm, cond, rtmp); }
964 void orr(Register Rd, Register Rs, Register Rt, Condition cond)
965 { lor(Rd, Rs, Rt, cond); }
966
967 void tst(Register Rd, Register Rs) { tst_(Rs, Rd); }
968 void tst(Register Rd, const Operand& imm, Register rtmp = sh4_rtmp);
969
970 void teq(Register Rd, const Operand& imm, Register rtmp = sh4_rtmp) {
971 lxor(rtmp, Rd, imm);
972 tst(rtmp, rtmp);
973 }
974 void teq(Register Rd, Register Rs, Register rtmp = sh4_rtmp) {
975 lxor(rtmp, Rd, Rs);
976 tst(rtmp, rtmp);
977 }
978
979 // Moves and conditional moves.
980 // This one allows pr as src or dst.
981 void mov(Register Rd, Register Rs, Condition cond = al);
982 void mov(Register Rd, const Operand& src, bool force = false);
983 void mov(Register Rd, const Operand& imm, Condition cond);
984
985 // load op.
986 void mov(Register Rd, const MemOperand& src, Register rtmp = sh4_rtmp);
987 // unsigned 8 bit load op.
988 void movb(Register Rd, const MemOperand& src, Register rtmp = sh4_rtmp);
989 // unsigned 16 bit load op.
990 void movw(Register Rd, const MemOperand& src, Register rtmp = sh4_rtmp);
991 // store op.
992 void mov(const MemOperand& dst, Register Rd, Register rtmp = sh4_rtmp);
993 // store 8 bits op.
994 void movb(const MemOperand& dst, Register Rd, Register rtmp = sh4_rtmp);
995 // store 16 bits op.
996 void movw(const MemOperand& dst, Register Rd, Register rtmp = sh4_rtmp);
997
998 void movd(DwVfpRegister Dd, Register Rs1, Register Rs2);
999 void movd(Register Rd1, Register Rd2, DwVfpRegister Ds);
1000
1001 inline void ldr(Register Rd, const MemOperand& src,
1002 Register rtmp = sh4_rtmp);
1003 void ldrb(Register Rd, const MemOperand& src, Register rtmp = sh4_rtmp)
1004 { movb(Rd, src, rtmp); }
1005 void ldrh(Register Rd, const MemOperand& src, Register rtmp = sh4_rtmp)
1006 { movw(Rd, src, rtmp); }
1007 // signed 8 bit load op.
1008 void ldrsb(Register Rd, const MemOperand& src, Register rtmp = sh4_rtmp);
1009 // signed 16 bit load op.
1010 void ldrsh(Register Rd, const MemOperand& src, Register rtmp = sh4_rtmp);
1011
1012 inline void str(Register Rs, const MemOperand& dst,
1013 Register rtmp = sh4_rtmp);
1014 void strh(Register Rs, const MemOperand& dst, Register rtmp = sh4_rtmp)
1015 { movw(dst, Rs, rtmp); }
1016 void strb(Register Rs, const MemOperand& dst, Register rtmp = sh4_rtmp)
1017 { movb(dst, Rs, rtmp); }
1018
1019 void ldrpr(Register Rd) { lds_PR_(Rd); }
1020 void strpr(Register Rs) { sts_PR_(Rs); }
1021
1022 void ldr_fpscr(Register Rs) { lds_FPSCR_(Rs); }
1023 void str_fpscr(Register Rd) { sts_FPSCR_(Rd); }
1024
1025 void mul(Register Rd, Register Rs, Register Rt);
1026 void dmuls(Register dstL, Register dstH, Register src1, Register src2);
1027
1028 void nop() { nop_(); }
1029
1030 void push(Register src);
1031 void push(DwVfpRegister src);
1032 // push an immediate on the stack: use rtmp register for that
1033 void push(const Operand& op, Register rtmp = sh4_rtmp);
1034 void pushm(RegList src, bool doubles = false);
1035
1036 void pop(Register dst);
1037 void pop() { add(sp, sp, Operand(kPointerSize)); }
1038 void pop(DwVfpRegister dst);
1039 void popm(RegList dst, bool doubles = false);
1040
1041 inline void rts();
1042
1043 // Exception-generating instructions and debugging support
1044 void stop(const char* msg);
1045 void bkpt();
1046
1047 // Align the code
1048 inline int align();
1049 inline int misalign();
1050
1051 // Copy some bytes
1052 // The count argument is scratched
1053 void memcpy(Register dst, Register src, Register count,
1054 Register scratch1, Register scratch2,
1055 Register scratch3, Register scratch4);
1056
1057 // Compare some bytes using a loop
1058 void memcmp(Register left, Register right, Register length,
1059 Register scratch1, Register scratch2, Label *not_equal);
1060
1061 bool predictable_code_size() const { return predictable_code_size_; }
1062
1063 // Insert the smallest number of nop instructions
1064 // possible to align the pc offset to a multiple
1065 // of m. m must be a power of 2 (>= 4).
1066 void Align(int m);
1067
1068 void call(Label* L);
1069
1070 inline void emit(Instr x);
1071
1072 // Mark address of the ExitJSFrame code.
1073 void RecordJSReturn();
1074
1075 // Record the AST id of the CallIC being compiled, so that it can be placed
1076 // in the relocation information.
1077 void SetRecordedAstId(TypeFeedbackId ast_id) {
1078 ASSERT(recorded_ast_id_.IsNone());
1079 recorded_ast_id_ = ast_id;
1080 }
1081
1082 TypeFeedbackId RecordedAstId() {
1083 ASSERT(!recorded_ast_id_.IsNone());
1084 return recorded_ast_id_;
1085 }
1086
1087 void ClearRecordedAstId() { recorded_ast_id_ = TypeFeedbackId::None(); }
1088
1089 // Use -code_comments to enable, or provide "force = true" flag to always
1090 // write a comment.
1091 void RecordComment(const char* msg, bool force = false);
1092
1093 // Writes a single byte or word of data in the code stream. Used for
1094 // inline tables, e.g., jump-tables.
1095 void db(uint8_t data);
1096 void dw(uint16_t data);
1097 void dd(uint32_t data);
1098
1099 int pc_offset() const { return pc_ - buffer_; }
1100
1101 // Return in Rd the value of pc_after + offset.
1102 // Where pc_after is the pc after this operation.
1103 // It clobbers pr which must be always passed in the Pr parameter
1104 void addpc(Register Rd, int offset, Register Pr);
1105
1106 // Check if there is less than kGap bytes available in the buffer.
1107 // If this is the case, we need to grow the buffer before emitting
1108 // an instruction or relocation information.
1109 inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; }
1110
1111 // Get the number of bytes available in the buffer.
1112 inline int available_space() const { return reloc_info_writer.pos() - pc_; }
1113
1114 PositionsRecorder* positions_recorder() { return &positions_recorder_; }
1115
1116
1117 protected:
1118 // Relocation for a type-recording IC has the AST id added to it. This
1119 // member variable is a way to pass the information from the call site to
1120 // the relocation info.
1121 TypeFeedbackId recorded_ast_id_;
1122
1123 bool emit_debug_code() const { return emit_debug_code_; }
1124
1125 int buffer_space() const { return reloc_info_writer.pos() - pc_; }
1126
1127
1128 private:
1129 // code generation wrappers
1130 void branch(Label* L, Register rtmp, branch_type type,
1131 Label::Distance distance = Label::kFar);
1132 void branch(int offset, Register rtmp, branch_type type,
1133 Label::Distance distance, bool patched_later);
1134 void conditional_branch(int offset, Register rtmp, Label::Distance distance,
1135 bool patched_later, bool type);
1136 void jmp(int offset, Register rtmp, Label::Distance distance,
1137 bool patched_later);
1138 void jsr(int offset, Register rtmp, bool patched_later);
1139
1140 void writeBranchTag(int nop_count, branch_type type);
1141 void patchBranchOffset(int fixup_pos, uint16_t *p_pos, int is_near_linked);
1142
1143 // The bound position, before this we cannot do instruction elimination.
1144 int last_bound_pos_;
1145
1146 // Code emission
1147 inline void CheckBuffer();
1148 void GrowBuffer();
1149
1150 void next(Label *L, Label::Distance distance = Label::kFar);
1151
1152 // record reloc info for current pc_
1153 void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
1154
1155 friend class CodePatcher;
1156 friend class EnsureSpace;
1157
1158 // Code buffer:
1159 // The buffer into which code and relocation info are generated.
1160 byte* buffer_;
1161 int buffer_size_;
1162 // True if the assembler owns the buffer, false if buffer is external.
1163 bool own_buffer_;
1164
1165 // Code generation
1166 // The relocation writer's position is at least kGap bytes below the end of
1167 // the generated instructions. This is so that multi-instruction sequences do
1168 // not have to check for overflow. The same is true for writes of large
1169 // relocation info entries.
1170 static const int kGap = 32;
1171 byte* pc_; // the program counter; moves forward
1172 RelocInfoWriter reloc_info_writer;
1173
1174 PositionsRecorder positions_recorder_;
1175
1176 bool emit_debug_code_;
1177 bool predictable_code_size_;
1178
1179 friend class PositionsRecorder;
1180
1181
1182 // ---------------------------------------------------------------------------
1183 // low level code generation (opcodes)
1184 #include "opcodes-sh4.h"
1185 };
1186
1187
1188 // Helper class that ensures that there is enough space for generating
1189 // instructions and relocation information. The constructor makes
1190 // sure that there is enough space and (in debug mode) the destructor
1191 // checks that we did not generate too much.
1192 class EnsureSpace BASE_EMBEDDED {
1193 public:
1194 explicit EnsureSpace(Assembler* assembler) : assembler_(assembler) {
1195 if (assembler_->overflow()) assembler_->GrowBuffer();
1196 #ifdef DEBUG
1197 space_before_ = assembler_->available_space();
1198 #endif
1199 }
1200
1201 #ifdef DEBUG
1202 ~EnsureSpace() {
1203 int bytes_generated = space_before_ - assembler_->available_space();
1204 ASSERT(bytes_generated < assembler_->kGap);
1205 }
1206 #endif
1207
1208 private:
1209 Assembler* assembler_;
1210 #ifdef DEBUG
1211 int space_before_;
1212 #endif
1213 };
1214
1215 } } // namespace v8::internal
1216
1217 #endif // V8_SH4_ASSEMBLER_SH4_H_
OLDNEW
« no previous file with comments | « src/regexp-macro-assembler-tracer.cc ('k') | src/sh4/assembler-sh4.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698