Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(6)

Side by Side Diff: src/DartARM32/assembler_arm.h

Issue 1394613002: Create local copy of Dart assembler code. (Closed) Base URL: https://chromium.googlesource.com/native_client/pnacl-subzero.git@master
Patch Set: Blacklist ARM32 Dart files. Created 5 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « Makefile.standalone ('k') | src/DartARM32/assembler_arm.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file.
4 //
5 // This is forked from Dart revision df52deea9f25690eb8b66c5995da92b70f7ac1fe
6 // Please update the (git) revision if we merge changes from Dart.
7 // https://code.google.com/p/dart/wiki/GettingTheSource
8
9 #ifndef VM_ASSEMBLER_ARM_H_
10 #define VM_ASSEMBLER_ARM_H_
11
12 #ifndef VM_ASSEMBLER_H_
13 #error Do not include assembler_arm.h directly; use assembler.h instead.
14 #endif
15
16 #include "platform/assert.h"
17 #include "platform/utils.h"
18 #include "vm/constants_arm.h"
19 #include "vm/cpu.h"
20 #include "vm/hash_map.h"
21 #include "vm/object.h"
22 #include "vm/simulator.h"
23
24 namespace dart {
25
26 // Forward declarations.
27 class RuntimeEntry;
28 class StubEntry;
29
30
31 // Instruction encoding bits.
32 enum {
33 H = 1 << 5, // halfword (or byte)
34 L = 1 << 20, // load (or store)
35 S = 1 << 20, // set condition code (or leave unchanged)
36 W = 1 << 21, // writeback base register (or leave unchanged)
37 A = 1 << 21, // accumulate in multiply instruction (or not)
38 B = 1 << 22, // unsigned byte (or word)
39 D = 1 << 22, // high/lo bit of start of s/d register range
40 N = 1 << 22, // long (or short)
41 U = 1 << 23, // positive (or negative) offset/index
42 P = 1 << 24, // offset/pre-indexed addressing (or post-indexed addressing)
43 I = 1 << 25, // immediate shifter operand (or not)
44
45 B0 = 1,
46 B1 = 1 << 1,
47 B2 = 1 << 2,
48 B3 = 1 << 3,
49 B4 = 1 << 4,
50 B5 = 1 << 5,
51 B6 = 1 << 6,
52 B7 = 1 << 7,
53 B8 = 1 << 8,
54 B9 = 1 << 9,
55 B10 = 1 << 10,
56 B11 = 1 << 11,
57 B12 = 1 << 12,
58 B16 = 1 << 16,
59 B17 = 1 << 17,
60 B18 = 1 << 18,
61 B19 = 1 << 19,
62 B20 = 1 << 20,
63 B21 = 1 << 21,
64 B22 = 1 << 22,
65 B23 = 1 << 23,
66 B24 = 1 << 24,
67 B25 = 1 << 25,
68 B26 = 1 << 26,
69 B27 = 1 << 27,
70 };
71
72
73 class Label : public ValueObject {
74 public:
75 Label() : position_(0) { }
76
77 ~Label() {
78 // Assert if label is being destroyed with unresolved branches pending.
79 ASSERT(!IsLinked());
80 }
81
82 // Returns the position for bound and linked labels. Cannot be used
83 // for unused labels.
84 intptr_t Position() const {
85 ASSERT(!IsUnused());
86 return IsBound() ? -position_ - kWordSize : position_ - kWordSize;
87 }
88
89 bool IsBound() const { return position_ < 0; }
90 bool IsUnused() const { return position_ == 0; }
91 bool IsLinked() const { return position_ > 0; }
92
93 private:
94 intptr_t position_;
95
96 void Reinitialize() {
97 position_ = 0;
98 }
99
100 void BindTo(intptr_t position) {
101 ASSERT(!IsBound());
102 position_ = -position - kWordSize;
103 ASSERT(IsBound());
104 }
105
106 void LinkTo(intptr_t position) {
107 ASSERT(!IsBound());
108 position_ = position + kWordSize;
109 ASSERT(IsLinked());
110 }
111
112 friend class Assembler;
113 DISALLOW_COPY_AND_ASSIGN(Label);
114 };
115
116
117 // Encodes Addressing Mode 1 - Data-processing operands.
118 class Operand : public ValueObject {
119 public:
120 // Data-processing operands - Uninitialized.
121 Operand() : type_(-1), encoding_(-1) { }
122
123 // Data-processing operands - Copy constructor.
124 Operand(const Operand& other)
125 : ValueObject(), type_(other.type_), encoding_(other.encoding_) { }
126
127 // Data-processing operands - Assignment operator.
128 Operand& operator=(const Operand& other) {
129 type_ = other.type_;
130 encoding_ = other.encoding_;
131 return *this;
132 }
133
134 // Data-processing operands - Immediate.
135 explicit Operand(uint32_t immediate) {
136 ASSERT(immediate < (1 << kImmed8Bits));
137 type_ = 1;
138 encoding_ = immediate;
139 }
140
141 // Data-processing operands - Rotated immediate.
142 Operand(uint32_t rotate, uint32_t immed8) {
143 ASSERT((rotate < (1 << kRotateBits)) && (immed8 < (1 << kImmed8Bits)));
144 type_ = 1;
145 encoding_ = (rotate << kRotateShift) | (immed8 << kImmed8Shift);
146 }
147
148 // Data-processing operands - Register.
149 explicit Operand(Register rm) {
150 type_ = 0;
151 encoding_ = static_cast<uint32_t>(rm);
152 }
153
154 // Data-processing operands - Logical shift/rotate by immediate.
155 Operand(Register rm, Shift shift, uint32_t shift_imm) {
156 ASSERT(shift_imm < (1 << kShiftImmBits));
157 type_ = 0;
158 encoding_ = shift_imm << kShiftImmShift |
159 static_cast<uint32_t>(shift) << kShiftShift |
160 static_cast<uint32_t>(rm);
161 }
162
163 // Data-processing operands - Logical shift/rotate by register.
164 Operand(Register rm, Shift shift, Register rs) {
165 type_ = 0;
166 encoding_ = static_cast<uint32_t>(rs) << kShiftRegisterShift |
167 static_cast<uint32_t>(shift) << kShiftShift | (1 << 4) |
168 static_cast<uint32_t>(rm);
169 }
170
171 static bool CanHold(uint32_t immediate, Operand* o) {
172 // Avoid the more expensive test for frequent small immediate values.
173 if (immediate < (1 << kImmed8Bits)) {
174 o->type_ = 1;
175 o->encoding_ = (0 << kRotateShift) | (immediate << kImmed8Shift);
176 return true;
177 }
178 // Note that immediate must be unsigned for the test to work correctly.
179 for (int rot = 0; rot < 16; rot++) {
180 uint32_t imm8 = (immediate << 2*rot) | (immediate >> (32 - 2*rot));
181 if (imm8 < (1 << kImmed8Bits)) {
182 o->type_ = 1;
183 o->encoding_ = (rot << kRotateShift) | (imm8 << kImmed8Shift);
184 return true;
185 }
186 }
187 return false;
188 }
189
190 private:
191 bool is_valid() const { return (type_ == 0) || (type_ == 1); }
192
193 uint32_t type() const {
194 ASSERT(is_valid());
195 return type_;
196 }
197
198 uint32_t encoding() const {
199 ASSERT(is_valid());
200 return encoding_;
201 }
202
203 uint32_t type_; // Encodes the type field (bits 27-25) in the instruction.
204 uint32_t encoding_;
205
206 friend class Assembler;
207 friend class Address;
208 };
209
210
211 enum OperandSize {
212 kByte,
213 kUnsignedByte,
214 kHalfword,
215 kUnsignedHalfword,
216 kWord,
217 kUnsignedWord,
218 kWordPair,
219 kSWord,
220 kDWord,
221 kRegList,
222 };
223
224
225 // Load/store multiple addressing mode.
226 enum BlockAddressMode {
227 // bit encoding P U W
228 DA = (0|0|0) << 21, // decrement after
229 IA = (0|4|0) << 21, // increment after
230 DB = (8|0|0) << 21, // decrement before
231 IB = (8|4|0) << 21, // increment before
232 DA_W = (0|0|1) << 21, // decrement after with writeback to base
233 IA_W = (0|4|1) << 21, // increment after with writeback to base
234 DB_W = (8|0|1) << 21, // decrement before with writeback to base
235 IB_W = (8|4|1) << 21 // increment before with writeback to base
236 };
237
238
239 class Address : public ValueObject {
240 public:
241 enum OffsetKind {
242 Immediate,
243 IndexRegister,
244 ScaledIndexRegister,
245 };
246
247 // Memory operand addressing mode
248 enum Mode {
249 kModeMask = (8|4|1) << 21,
250 // bit encoding P U W
251 Offset = (8|4|0) << 21, // offset (w/o writeback to base)
252 PreIndex = (8|4|1) << 21, // pre-indexed addressing with writeback
253 PostIndex = (0|4|0) << 21, // post-indexed addressing with writeback
254 NegOffset = (8|0|0) << 21, // negative offset (w/o writeback to base)
255 NegPreIndex = (8|0|1) << 21, // negative pre-indexed with writeback
256 NegPostIndex = (0|0|0) << 21 // negative post-indexed with writeback
257 };
258
259 Address(const Address& other)
260 : ValueObject(), encoding_(other.encoding_), kind_(other.kind_) {
261 }
262
263 Address& operator=(const Address& other) {
264 encoding_ = other.encoding_;
265 kind_ = other.kind_;
266 return *this;
267 }
268
269 bool Equals(const Address& other) const {
270 return (encoding_ == other.encoding_) && (kind_ == other.kind_);
271 }
272
273 explicit Address(Register rn, int32_t offset = 0, Mode am = Offset) {
274 ASSERT(Utils::IsAbsoluteUint(12, offset));
275 kind_ = Immediate;
276 if (offset < 0) {
277 encoding_ = (am ^ (1 << kUShift)) | -offset; // Flip U to adjust sign.
278 } else {
279 encoding_ = am | offset;
280 }
281 encoding_ |= static_cast<uint32_t>(rn) << kRnShift;
282 }
283
284 // There is no register offset mode unless Mode is Offset, in which case the
285 // shifted register case below should be used.
286 Address(Register rn, Register r, Mode am);
287
288 Address(Register rn, Register rm,
289 Shift shift = LSL, uint32_t shift_imm = 0, Mode am = Offset) {
290 Operand o(rm, shift, shift_imm);
291
292 if ((shift == LSL) && (shift_imm == 0)) {
293 kind_ = IndexRegister;
294 } else {
295 kind_ = ScaledIndexRegister;
296 }
297 encoding_ = o.encoding() | am | (static_cast<uint32_t>(rn) << kRnShift);
298 }
299
300 // There is no shifted register mode with a register shift.
301 Address(Register rn, Register rm, Shift shift, Register r, Mode am = Offset);
302
303 static OperandSize OperandSizeFor(intptr_t cid);
304
305 static bool CanHoldLoadOffset(OperandSize size,
306 int32_t offset,
307 int32_t* offset_mask);
308 static bool CanHoldStoreOffset(OperandSize size,
309 int32_t offset,
310 int32_t* offset_mask);
311 static bool CanHoldImmediateOffset(bool is_load,
312 intptr_t cid,
313 int64_t offset);
314
315 private:
316 Register rn() const {
317 return Instr::At(reinterpret_cast<uword>(&encoding_))->RnField();
318 }
319
320 Register rm() const {
321 return ((kind() == IndexRegister) || (kind() == ScaledIndexRegister)) ?
322 Instr::At(reinterpret_cast<uword>(&encoding_))->RmField() :
323 kNoRegister;
324 }
325
326 Mode mode() const { return static_cast<Mode>(encoding() & kModeMask); }
327
328 uint32_t encoding() const { return encoding_; }
329
330 // Encoding for addressing mode 3.
331 uint32_t encoding3() const;
332
333 // Encoding for vfp load/store addressing.
334 uint32_t vencoding() const;
335
336 OffsetKind kind() const { return kind_; }
337
338 uint32_t encoding_;
339
340 OffsetKind kind_;
341
342 friend class Assembler;
343 };
344
345
346 class FieldAddress : public Address {
347 public:
348 FieldAddress(Register base, int32_t disp)
349 : Address(base, disp - kHeapObjectTag) { }
350
351 // This addressing mode does not exist.
352 FieldAddress(Register base, Register r);
353
354 FieldAddress(const FieldAddress& other) : Address(other) { }
355
356 FieldAddress& operator=(const FieldAddress& other) {
357 Address::operator=(other);
358 return *this;
359 }
360 };
361
362
363 class Assembler : public ValueObject {
364 public:
365 explicit Assembler(bool use_far_branches = false)
366 : buffer_(),
367 prologue_offset_(-1),
368 use_far_branches_(use_far_branches),
369 comments_(),
370 constant_pool_allowed_(false) { }
371
372 ~Assembler() { }
373
374 void PopRegister(Register r) { Pop(r); }
375
376 void Bind(Label* label);
377 void Jump(Label* label) { b(label); }
378
379 // Misc. functionality
380 intptr_t CodeSize() const { return buffer_.Size(); }
381 intptr_t prologue_offset() const { return prologue_offset_; }
382
383 // Count the fixups that produce a pointer offset, without processing
384 // the fixups. On ARM there are no pointers in code.
385 intptr_t CountPointerOffsets() const { return 0; }
386
387 const ZoneGrowableArray<intptr_t>& GetPointerOffsets() const {
388 ASSERT(buffer_.pointer_offsets().length() == 0); // No pointers in code.
389 return buffer_.pointer_offsets();
390 }
391
392 ObjectPoolWrapper& object_pool_wrapper() { return object_pool_wrapper_; }
393
394 RawObjectPool* MakeObjectPool() {
395 return object_pool_wrapper_.MakeObjectPool();
396 }
397
398 bool use_far_branches() const {
399 return FLAG_use_far_branches || use_far_branches_;
400 }
401
402 #if defined(TESTING) || defined(DEBUG)
403 // Used in unit tests and to ensure predictable verification code size in
404 // FlowGraphCompiler::EmitEdgeCounter.
405 void set_use_far_branches(bool b) {
406 use_far_branches_ = b;
407 }
408 #endif // TESTING || DEBUG
409
410 void FinalizeInstructions(const MemoryRegion& region) {
411 buffer_.FinalizeInstructions(region);
412 }
413
414 // Debugging and bringup support.
415 void Stop(const char* message);
416 void Unimplemented(const char* message);
417 void Untested(const char* message);
418 void Unreachable(const char* message);
419
420 static void InitializeMemoryWithBreakpoints(uword data, intptr_t length);
421
422 void Comment(const char* format, ...) PRINTF_ATTRIBUTE(2, 3);
423 static bool EmittingComments();
424
425 const Code::Comments& GetCodeComments() const;
426
427 static const char* RegisterName(Register reg);
428
429 static const char* FpuRegisterName(FpuRegister reg);
430
431 // Data-processing instructions.
432 void and_(Register rd, Register rn, Operand o, Condition cond = AL);
433
434 void eor(Register rd, Register rn, Operand o, Condition cond = AL);
435
436 void sub(Register rd, Register rn, Operand o, Condition cond = AL);
437 void subs(Register rd, Register rn, Operand o, Condition cond = AL);
438
439 void rsb(Register rd, Register rn, Operand o, Condition cond = AL);
440 void rsbs(Register rd, Register rn, Operand o, Condition cond = AL);
441
442 void add(Register rd, Register rn, Operand o, Condition cond = AL);
443
444 void adds(Register rd, Register rn, Operand o, Condition cond = AL);
445
446 void adc(Register rd, Register rn, Operand o, Condition cond = AL);
447
448 void adcs(Register rd, Register rn, Operand o, Condition cond = AL);
449
450 void sbc(Register rd, Register rn, Operand o, Condition cond = AL);
451
452 void sbcs(Register rd, Register rn, Operand o, Condition cond = AL);
453
454 void rsc(Register rd, Register rn, Operand o, Condition cond = AL);
455
456 void tst(Register rn, Operand o, Condition cond = AL);
457
458 void teq(Register rn, Operand o, Condition cond = AL);
459
460 void cmp(Register rn, Operand o, Condition cond = AL);
461
462 void cmn(Register rn, Operand o, Condition cond = AL);
463
464 void orr(Register rd, Register rn, Operand o, Condition cond = AL);
465 void orrs(Register rd, Register rn, Operand o, Condition cond = AL);
466
467 void mov(Register rd, Operand o, Condition cond = AL);
468 void movs(Register rd, Operand o, Condition cond = AL);
469
470 void bic(Register rd, Register rn, Operand o, Condition cond = AL);
471 void bics(Register rd, Register rn, Operand o, Condition cond = AL);
472
473 void mvn(Register rd, Operand o, Condition cond = AL);
474 void mvns(Register rd, Operand o, Condition cond = AL);
475
476 // Miscellaneous data-processing instructions.
477 void clz(Register rd, Register rm, Condition cond = AL);
478
479 // Multiply instructions.
480 void mul(Register rd, Register rn, Register rm, Condition cond = AL);
481 void muls(Register rd, Register rn, Register rm, Condition cond = AL);
482 void mla(Register rd, Register rn, Register rm, Register ra,
483 Condition cond = AL);
484 void mls(Register rd, Register rn, Register rm, Register ra,
485 Condition cond = AL);
486 void smull(Register rd_lo, Register rd_hi, Register rn, Register rm,
487 Condition cond = AL);
488 void umull(Register rd_lo, Register rd_hi, Register rn, Register rm,
489 Condition cond = AL);
490 void smlal(Register rd_lo, Register rd_hi, Register rn, Register rm,
491 Condition cond = AL);
492 void umlal(Register rd_lo, Register rd_hi, Register rn, Register rm,
493 Condition cond = AL);
494
495 // Emulation of this instruction uses IP and the condition codes. Therefore,
496 // none of the registers can be IP, and the instruction can only be used
497 // unconditionally.
498 void umaal(Register rd_lo, Register rd_hi, Register rn, Register rm);
499
500 // Division instructions.
501 void sdiv(Register rd, Register rn, Register rm, Condition cond = AL);
502 void udiv(Register rd, Register rn, Register rm, Condition cond = AL);
503
504 // Load/store instructions.
505 void ldr(Register rd, Address ad, Condition cond = AL);
506 void str(Register rd, Address ad, Condition cond = AL);
507
508 void ldrb(Register rd, Address ad, Condition cond = AL);
509 void strb(Register rd, Address ad, Condition cond = AL);
510
511 void ldrh(Register rd, Address ad, Condition cond = AL);
512 void strh(Register rd, Address ad, Condition cond = AL);
513
514 void ldrsb(Register rd, Address ad, Condition cond = AL);
515 void ldrsh(Register rd, Address ad, Condition cond = AL);
516
517 // ldrd and strd actually support the full range of addressing modes, but
518 // we don't use them, and we need to split them up into two instructions for
519 // ARMv5TE, so we only support the base + offset mode.
520 void ldrd(Register rd, Register rn, int32_t offset, Condition cond = AL);
521 void strd(Register rd, Register rn, int32_t offset, Condition cond = AL);
522
523 void ldm(BlockAddressMode am, Register base,
524 RegList regs, Condition cond = AL);
525 void stm(BlockAddressMode am, Register base,
526 RegList regs, Condition cond = AL);
527
528 void ldrex(Register rd, Register rn, Condition cond = AL);
529 void strex(Register rd, Register rt, Register rn, Condition cond = AL);
530
531 // Miscellaneous instructions.
532 void clrex();
533 void nop(Condition cond = AL);
534
535 // Note that gdb sets breakpoints using the undefined instruction 0xe7f001f0.
536 void bkpt(uint16_t imm16);
537
538 static int32_t BkptEncoding(uint16_t imm16) {
539 // bkpt requires that the cond field is AL.
540 return (AL << kConditionShift) | B24 | B21 |
541 ((imm16 >> 4) << 8) | B6 | B5 | B4 | (imm16 & 0xf);
542 }
543
544 static uword GetBreakInstructionFiller() {
545 return BkptEncoding(0);
546 }
547
548 // Floating point instructions (VFPv3-D16 and VFPv3-D32 profiles).
549 void vmovsr(SRegister sn, Register rt, Condition cond = AL);
550 void vmovrs(Register rt, SRegister sn, Condition cond = AL);
551 void vmovsrr(SRegister sm, Register rt, Register rt2, Condition cond = AL);
552 void vmovrrs(Register rt, Register rt2, SRegister sm, Condition cond = AL);
553 void vmovdrr(DRegister dm, Register rt, Register rt2, Condition cond = AL);
554 void vmovrrd(Register rt, Register rt2, DRegister dm, Condition cond = AL);
555 void vmovdr(DRegister dd, int i, Register rt, Condition cond = AL);
556 void vmovs(SRegister sd, SRegister sm, Condition cond = AL);
557 void vmovd(DRegister dd, DRegister dm, Condition cond = AL);
558 void vmovq(QRegister qd, QRegister qm);
559
560 // Returns false if the immediate cannot be encoded.
561 bool vmovs(SRegister sd, float s_imm, Condition cond = AL);
562 bool vmovd(DRegister dd, double d_imm, Condition cond = AL);
563
564 void vldrs(SRegister sd, Address ad, Condition cond = AL);
565 void vstrs(SRegister sd, Address ad, Condition cond = AL);
566 void vldrd(DRegister dd, Address ad, Condition cond = AL);
567 void vstrd(DRegister dd, Address ad, Condition cond = AL);
568
569 void vldms(BlockAddressMode am, Register base,
570 SRegister first, SRegister last, Condition cond = AL);
571 void vstms(BlockAddressMode am, Register base,
572 SRegister first, SRegister last, Condition cond = AL);
573
574 void vldmd(BlockAddressMode am, Register base,
575 DRegister first, intptr_t count, Condition cond = AL);
576 void vstmd(BlockAddressMode am, Register base,
577 DRegister first, intptr_t count, Condition cond = AL);
578
579 void vadds(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL);
580 void vaddd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL);
581 void vaddqi(OperandSize sz, QRegister qd, QRegister qn, QRegister qm);
582 void vaddqs(QRegister qd, QRegister qn, QRegister qm);
583 void vsubs(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL);
584 void vsubd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL);
585 void vsubqi(OperandSize sz, QRegister qd, QRegister qn, QRegister qm);
586 void vsubqs(QRegister qd, QRegister qn, QRegister qm);
587 void vmuls(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL);
588 void vmuld(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL);
589 void vmulqi(OperandSize sz, QRegister qd, QRegister qn, QRegister qm);
590 void vmulqs(QRegister qd, QRegister qn, QRegister qm);
591 void vshlqi(OperandSize sz, QRegister qd, QRegister qm, QRegister qn);
592 void vshlqu(OperandSize sz, QRegister qd, QRegister qm, QRegister qn);
593 void vmlas(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL);
594 void vmlad(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL);
595 void vmlss(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL);
596 void vmlsd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL);
597 void vdivs(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL);
598 void vdivd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL);
599 void vminqs(QRegister qd, QRegister qn, QRegister qm);
600 void vmaxqs(QRegister qd, QRegister qn, QRegister qm);
601 void vrecpeqs(QRegister qd, QRegister qm);
602 void vrecpsqs(QRegister qd, QRegister qn, QRegister qm);
603 void vrsqrteqs(QRegister qd, QRegister qm);
604 void vrsqrtsqs(QRegister qd, QRegister qn, QRegister qm);
605
606 void veorq(QRegister qd, QRegister qn, QRegister qm);
607 void vorrq(QRegister qd, QRegister qn, QRegister qm);
608 void vornq(QRegister qd, QRegister qn, QRegister qm);
609 void vandq(QRegister qd, QRegister qn, QRegister qm);
610 void vmvnq(QRegister qd, QRegister qm);
611
612 void vceqqi(OperandSize sz, QRegister qd, QRegister qn, QRegister qm);
613 void vceqqs(QRegister qd, QRegister qn, QRegister qm);
614 void vcgeqi(OperandSize sz, QRegister qd, QRegister qn, QRegister qm);
615 void vcugeqi(OperandSize sz, QRegister qd, QRegister qn, QRegister qm);
616 void vcgeqs(QRegister qd, QRegister qn, QRegister qm);
617 void vcgtqi(OperandSize sz, QRegister qd, QRegister qn, QRegister qm);
618 void vcugtqi(OperandSize sz, QRegister qd, QRegister qn, QRegister qm);
619 void vcgtqs(QRegister qd, QRegister qn, QRegister qm);
620
621 void vabss(SRegister sd, SRegister sm, Condition cond = AL);
622 void vabsd(DRegister dd, DRegister dm, Condition cond = AL);
623 void vabsqs(QRegister qd, QRegister qm);
624 void vnegs(SRegister sd, SRegister sm, Condition cond = AL);
625 void vnegd(DRegister dd, DRegister dm, Condition cond = AL);
626 void vnegqs(QRegister qd, QRegister qm);
627 void vsqrts(SRegister sd, SRegister sm, Condition cond = AL);
628 void vsqrtd(DRegister dd, DRegister dm, Condition cond = AL);
629
630 void vcvtsd(SRegister sd, DRegister dm, Condition cond = AL);
631 void vcvtds(DRegister dd, SRegister sm, Condition cond = AL);
632 void vcvtis(SRegister sd, SRegister sm, Condition cond = AL);
633 void vcvtid(SRegister sd, DRegister dm, Condition cond = AL);
634 void vcvtsi(SRegister sd, SRegister sm, Condition cond = AL);
635 void vcvtdi(DRegister dd, SRegister sm, Condition cond = AL);
636 void vcvtus(SRegister sd, SRegister sm, Condition cond = AL);
637 void vcvtud(SRegister sd, DRegister dm, Condition cond = AL);
638 void vcvtsu(SRegister sd, SRegister sm, Condition cond = AL);
639 void vcvtdu(DRegister dd, SRegister sm, Condition cond = AL);
640
641 void vcmps(SRegister sd, SRegister sm, Condition cond = AL);
642 void vcmpd(DRegister dd, DRegister dm, Condition cond = AL);
643 void vcmpsz(SRegister sd, Condition cond = AL);
644 void vcmpdz(DRegister dd, Condition cond = AL);
645 void vmrs(Register rd, Condition cond = AL);
646 void vmstat(Condition cond = AL);
647
648 // Duplicates the operand of size sz at index idx from dm to all elements of
649 // qd. This is a special case of vtbl.
650 void vdup(OperandSize sz, QRegister qd, DRegister dm, int idx);
651
652 // Each byte of dm is an index into the table of bytes formed by concatenating
653 // a list of 'length' registers starting with dn. The result is placed in dd.
654 void vtbl(DRegister dd, DRegister dn, int length, DRegister dm);
655
656 // The words of qd and qm are interleaved with the low words of the result
657 // in qd and the high words in qm.
658 void vzipqw(QRegister qd, QRegister qm);
659
660 // Branch instructions.
661 void b(Label* label, Condition cond = AL);
662 void bl(Label* label, Condition cond = AL);
663 void bx(Register rm, Condition cond = AL);
664 void blx(Register rm, Condition cond = AL);
665
666 void Branch(const StubEntry& stub_entry,
667 Patchability patchable = kNotPatchable,
668 Register pp = PP,
669 Condition cond = AL);
670
671 void BranchLink(const StubEntry& stub_entry,
672 Patchability patchable = kNotPatchable);
673 void BranchLink(const Code& code, Patchability patchable);
674
675 // Branch and link to an entry address. Call sequence can be patched.
676 void BranchLinkPatchable(const StubEntry& stub_entry);
677 void BranchLinkPatchable(const Code& code);
678
679 // Branch and link to [base + offset]. Call sequence is never patched.
680 void BranchLinkOffset(Register base, int32_t offset);
681
682 // Add signed immediate value to rd. May clobber IP.
683 void AddImmediate(Register rd, int32_t value, Condition cond = AL);
684 void AddImmediate(Register rd, Register rn, int32_t value,
685 Condition cond = AL);
686 void AddImmediateSetFlags(Register rd, Register rn, int32_t value,
687 Condition cond = AL);
688 void SubImmediateSetFlags(Register rd, Register rn, int32_t value,
689 Condition cond = AL);
690 void AndImmediate(Register rd, Register rs, int32_t imm, Condition cond = AL);
691
692 // Test rn and immediate. May clobber IP.
693 void TestImmediate(Register rn, int32_t imm, Condition cond = AL);
694
695 // Compare rn with signed immediate value. May clobber IP.
696 void CompareImmediate(Register rn, int32_t value, Condition cond = AL);
697
698
699 // Signed integer division of left by right. Checks to see if integer
700 // division is supported. If not, uses the FPU for division with
701 // temporary registers tmpl and tmpr. tmpl and tmpr must be different
702 // registers.
703 void IntegerDivide(Register result, Register left, Register right,
704 DRegister tmpl, DRegister tmpr);
705
706 // Load and Store.
707 // These three do not clobber IP.
708 void LoadPatchableImmediate(Register rd, int32_t value, Condition cond = AL);
709 void LoadDecodableImmediate(Register rd, int32_t value, Condition cond = AL);
710 void LoadImmediate(Register rd, int32_t value, Condition cond = AL);
711 // These two may clobber IP.
712 void LoadSImmediate(SRegister sd, float value, Condition cond = AL);
713 void LoadDImmediate(DRegister dd, double value,
714 Register scratch, Condition cond = AL);
715
716 void MarkExceptionHandler(Label* label);
717
718 void Drop(intptr_t stack_elements);
719
720 void RestoreCodePointer();
721 void LoadPoolPointer(Register reg = PP);
722
723 void LoadIsolate(Register rd);
724
725 void LoadObject(Register rd, const Object& object, Condition cond = AL);
726 void LoadUniqueObject(Register rd, const Object& object, Condition cond = AL);
727 void LoadFunctionFromCalleePool(Register dst,
728 const Function& function,
729 Register new_pp);
730 void LoadNativeEntry(Register dst,
731 const ExternalLabel* label,
732 Patchability patchable,
733 Condition cond = AL);
734 void PushObject(const Object& object);
735 void CompareObject(Register rn, const Object& object);
736
737 // When storing into a heap object field, knowledge of the previous content
738 // is expressed through these constants.
739 enum FieldContent {
740 kEmptyOrSmiOrNull, // Empty = garbage/zapped in release/debug mode.
741 kHeapObjectOrSmi,
742 kOnlySmi,
743 };
744
745 void StoreIntoObject(Register object, // Object we are storing into.
746 const Address& dest, // Where we are storing into.
747 Register value, // Value we are storing.
748 bool can_value_be_smi = true);
749 void StoreIntoObjectOffset(Register object,
750 int32_t offset,
751 Register value,
752 bool can_value_be_smi = true);
753
754 void StoreIntoObjectNoBarrier(Register object,
755 const Address& dest,
756 Register value,
757 FieldContent old_content = kHeapObjectOrSmi);
758 void InitializeFieldNoBarrier(Register object,
759 const Address& dest,
760 Register value) {
761 StoreIntoObjectNoBarrier(object, dest, value, kEmptyOrSmiOrNull);
762 }
763 void StoreIntoObjectNoBarrierOffset(
764 Register object,
765 int32_t offset,
766 Register value,
767 FieldContent old_content = kHeapObjectOrSmi);
768 void StoreIntoObjectNoBarrier(Register object,
769 const Address& dest,
770 const Object& value,
771 FieldContent old_content = kHeapObjectOrSmi);
772 void StoreIntoObjectNoBarrierOffset(
773 Register object,
774 int32_t offset,
775 const Object& value,
776 FieldContent old_content = kHeapObjectOrSmi);
777
778 // Store value_even, value_odd, value_even, ... into the words in the address
779 // range [begin, end), assumed to be uninitialized fields in object (tagged).
780 // The stores must not need a generational store barrier (e.g., smi/null),
781 // and (value_even, value_odd) must be a valid register pair.
782 // Destroys register 'begin'.
783 void InitializeFieldsNoBarrier(Register object,
784 Register begin,
785 Register end,
786 Register value_even,
787 Register value_odd);
788 // Like above, for the range [base+begin_offset, base+end_offset), unrolled.
789 void InitializeFieldsNoBarrierUnrolled(Register object,
790 Register base,
791 intptr_t begin_offset,
792 intptr_t end_offset,
793 Register value_even,
794 Register value_odd);
795
796 // Stores a Smi value into a heap object field that always contains a Smi.
797 void StoreIntoSmiField(const Address& dest, Register value);
798
799 void LoadClassId(Register result, Register object, Condition cond = AL);
800 void LoadClassById(Register result, Register class_id);
801 void LoadClass(Register result, Register object, Register scratch);
802 void CompareClassId(Register object, intptr_t class_id, Register scratch);
803 void LoadClassIdMayBeSmi(Register result, Register object);
804 void LoadTaggedClassIdMayBeSmi(Register result, Register object);
805
806 void ComputeRange(Register result,
807 Register value,
808 Register scratch,
809 Label* miss);
810
811 void UpdateRangeFeedback(Register value,
812 intptr_t idx,
813 Register ic_data,
814 Register scratch1,
815 Register scratch2,
816 Label* miss);
817
818 intptr_t FindImmediate(int32_t imm);
819 bool CanLoadFromObjectPool(const Object& object) const;
820 void LoadFromOffset(OperandSize type,
821 Register reg,
822 Register base,
823 int32_t offset,
824 Condition cond = AL);
825 void LoadFieldFromOffset(OperandSize type,
826 Register reg,
827 Register base,
828 int32_t offset,
829 Condition cond = AL) {
830 LoadFromOffset(type, reg, base, offset - kHeapObjectTag, cond);
831 }
832 void StoreToOffset(OperandSize type,
833 Register reg,
834 Register base,
835 int32_t offset,
836 Condition cond = AL);
837 void LoadSFromOffset(SRegister reg,
838 Register base,
839 int32_t offset,
840 Condition cond = AL);
841 void StoreSToOffset(SRegister reg,
842 Register base,
843 int32_t offset,
844 Condition cond = AL);
845 void LoadDFromOffset(DRegister reg,
846 Register base,
847 int32_t offset,
848 Condition cond = AL);
849 void StoreDToOffset(DRegister reg,
850 Register base,
851 int32_t offset,
852 Condition cond = AL);
853
854 void LoadMultipleDFromOffset(DRegister first,
855 intptr_t count,
856 Register base,
857 int32_t offset);
858 void StoreMultipleDToOffset(DRegister first,
859 intptr_t count,
860 Register base,
861 int32_t offset);
862
863 void CopyDoubleField(Register dst, Register src,
864 Register tmp1, Register tmp2, DRegister dtmp);
865 void CopyFloat32x4Field(Register dst, Register src,
866 Register tmp1, Register tmp2, DRegister dtmp);
867 void CopyFloat64x2Field(Register dst, Register src,
868 Register tmp1, Register tmp2, DRegister dtmp);
869
870 void Push(Register rd, Condition cond = AL);
871 void Pop(Register rd, Condition cond = AL);
872
873 void PushList(RegList regs, Condition cond = AL);
874 void PopList(RegList regs, Condition cond = AL);
875
876 void MoveRegister(Register rd, Register rm, Condition cond = AL);
877
878 // Convenience shift instructions. Use mov instruction with shifter operand
879 // for variants setting the status flags.
880 void Lsl(Register rd, Register rm, const Operand& shift_imm,
881 Condition cond = AL);
882 void Lsl(Register rd, Register rm, Register rs, Condition cond = AL);
883 void Lsr(Register rd, Register rm, const Operand& shift_imm,
884 Condition cond = AL);
885 void Lsr(Register rd, Register rm, Register rs, Condition cond = AL);
886 void Asr(Register rd, Register rm, const Operand& shift_imm,
887 Condition cond = AL);
888 void Asr(Register rd, Register rm, Register rs, Condition cond = AL);
889 void Asrs(Register rd, Register rm, const Operand& shift_imm,
890 Condition cond = AL);
891 void Ror(Register rd, Register rm, const Operand& shift_imm,
892 Condition cond = AL);
893 void Ror(Register rd, Register rm, Register rs, Condition cond = AL);
894 void Rrx(Register rd, Register rm, Condition cond = AL);
895
896 // Fill rd with the sign of rm.
897 void SignFill(Register rd, Register rm, Condition cond = AL);
898
899 void Vreciprocalqs(QRegister qd, QRegister qm);
900 void VreciprocalSqrtqs(QRegister qd, QRegister qm);
901 // If qm must be preserved, then provide a (non-QTMP) temporary.
902 void Vsqrtqs(QRegister qd, QRegister qm, QRegister temp);
903 void Vdivqs(QRegister qd, QRegister qn, QRegister qm);
904
905 void SmiTag(Register reg, Condition cond = AL) {
906 Lsl(reg, reg, Operand(kSmiTagSize), cond);
907 }
908
909 void SmiTag(Register dst, Register src, Condition cond = AL) {
910 Lsl(dst, src, Operand(kSmiTagSize), cond);
911 }
912
913 void SmiUntag(Register reg, Condition cond = AL) {
914 Asr(reg, reg, Operand(kSmiTagSize), cond);
915 }
916
917 void SmiUntag(Register dst, Register src, Condition cond = AL) {
918 Asr(dst, src, Operand(kSmiTagSize), cond);
919 }
920
921 // Untag the value in the register assuming it is a smi.
922 // Untagging shifts tag bit into the carry flag - if carry is clear
923 // assumption was correct. In this case jump to the is_smi label.
924 // Otherwise fall-through.
925 void SmiUntag(Register dst, Register src, Label* is_smi) {
926 ASSERT(kSmiTagSize == 1);
927 Asrs(dst, src, Operand(kSmiTagSize));
928 b(is_smi, CC);
929 }
930
931 void CheckCodePointer();
932
933 // Function frame setup and tear down.
934 void EnterFrame(RegList regs, intptr_t frame_space);
935 void LeaveFrame(RegList regs);
936 void Ret();
937 void ReserveAlignedFrameSpace(intptr_t frame_space);
938
939 // Create a frame for calling into runtime that preserves all volatile
940 // registers. Frame's SP is guaranteed to be correctly aligned and
941 // frame_space bytes are reserved under it.
942 void EnterCallRuntimeFrame(intptr_t frame_space);
943 void LeaveCallRuntimeFrame();
944
945 void CallRuntime(const RuntimeEntry& entry, intptr_t argument_count);
946
947 // Set up a Dart frame on entry with a frame pointer and PC information to
948 // enable easy access to the RawInstruction object of code corresponding
949 // to this frame.
950 void EnterDartFrame(intptr_t frame_size);
951 void LeaveDartFrame(RestorePP restore_pp = kRestoreCallerPP);
952
953 // Set up a Dart frame for a function compiled for on-stack replacement.
954 // The frame layout is a normal Dart frame, but the frame is partially set
955 // up on entry (it is the frame of the unoptimized code).
956 void EnterOsrFrame(intptr_t extra_size);
957
958 // Set up a stub frame so that the stack traversal code can easily identify
959 // a stub frame.
960 void EnterStubFrame();
961 void LeaveStubFrame();
962
963 // The register into which the allocation stats table is loaded with
964 // LoadAllocationStatsAddress should be passed to
965 // IncrementAllocationStats(WithSize) as stats_addr_reg to update the
966 // allocation stats. These are separate assembler macros so we can
967 // avoid a dependent load too nearby the load of the table address.
968 void LoadAllocationStatsAddress(Register dest,
969 intptr_t cid,
970 bool inline_isolate = true);
971 void IncrementAllocationStats(Register stats_addr,
972 intptr_t cid,
973 Heap::Space space);
974 void IncrementAllocationStatsWithSize(Register stats_addr_reg,
975 Register size_reg,
976 Heap::Space space);
977
978 Address ElementAddressForIntIndex(bool is_load,
979 bool is_external,
980 intptr_t cid,
981 intptr_t index_scale,
982 Register array,
983 intptr_t index,
984 Register temp);
985
986 Address ElementAddressForRegIndex(bool is_load,
987 bool is_external,
988 intptr_t cid,
989 intptr_t index_scale,
990 Register array,
991 Register index);
992
993 // If allocation tracing for |cid| is enabled, will jump to |trace| label,
994 // which will allocate in the runtime where tracing occurs.
995 void MaybeTraceAllocation(intptr_t cid,
996 Register temp_reg,
997 Label* trace,
998 bool inline_isolate = true);
999
1000 // Inlined allocation of an instance of class 'cls', code has no runtime
1001 // calls. Jump to 'failure' if the instance cannot be allocated here.
1002 // Allocated instance is returned in 'instance_reg'.
1003 // Only the tags field of the object is initialized.
1004 void TryAllocate(const Class& cls,
1005 Label* failure,
1006 Register instance_reg,
1007 Register temp_reg);
1008
1009 void TryAllocateArray(intptr_t cid,
1010 intptr_t instance_size,
1011 Label* failure,
1012 Register instance,
1013 Register end_address,
1014 Register temp1,
1015 Register temp2);
1016
1017 // Emit data (e.g encoded instruction or immediate) in instruction stream.
1018 void Emit(int32_t value);
1019
1020 // On some other platforms, we draw a distinction between safe and unsafe
1021 // smis.
1022 static bool IsSafe(const Object& object) { return true; }
1023 static bool IsSafeSmi(const Object& object) { return object.IsSmi(); }
1024
1025 bool constant_pool_allowed() const {
1026 return constant_pool_allowed_;
1027 }
1028 void set_constant_pool_allowed(bool b) {
1029 constant_pool_allowed_ = b;
1030 }
1031
1032 private:
1033 AssemblerBuffer buffer_; // Contains position independent code.
1034 ObjectPoolWrapper object_pool_wrapper_;
1035
1036 int32_t prologue_offset_;
1037
1038 bool use_far_branches_;
1039
1040 // If you are thinking of using one or both of these instructions directly,
1041 // instead LoadImmediate should probably be used.
1042 void movw(Register rd, uint16_t imm16, Condition cond = AL);
1043 void movt(Register rd, uint16_t imm16, Condition cond = AL);
1044
1045 void BindARMv6(Label* label);
1046 void BindARMv7(Label* label);
1047
1048 void LoadWordFromPoolOffset(Register rd,
1049 int32_t offset,
1050 Register pp,
1051 Condition cond);
1052
1053 void BranchLink(const ExternalLabel* label);
1054
1055 class CodeComment : public ZoneAllocated {
1056 public:
1057 CodeComment(intptr_t pc_offset, const String& comment)
1058 : pc_offset_(pc_offset), comment_(comment) { }
1059
1060 intptr_t pc_offset() const { return pc_offset_; }
1061 const String& comment() const { return comment_; }
1062
1063 private:
1064 intptr_t pc_offset_;
1065 const String& comment_;
1066
1067 DISALLOW_COPY_AND_ASSIGN(CodeComment);
1068 };
1069
1070 GrowableArray<CodeComment*> comments_;
1071
1072 bool constant_pool_allowed_;
1073
1074 void LoadObjectHelper(Register rd,
1075 const Object& object,
1076 Condition cond,
1077 bool is_unique,
1078 Register pp);
1079
1080 void EmitType01(Condition cond,
1081 int type,
1082 Opcode opcode,
1083 int set_cc,
1084 Register rn,
1085 Register rd,
1086 Operand o);
1087
1088 void EmitType5(Condition cond, int32_t offset, bool link);
1089
1090 void EmitMemOp(Condition cond,
1091 bool load,
1092 bool byte,
1093 Register rd,
1094 Address ad);
1095
1096 void EmitMemOpAddressMode3(Condition cond,
1097 int32_t mode,
1098 Register rd,
1099 Address ad);
1100
1101 void EmitMultiMemOp(Condition cond,
1102 BlockAddressMode am,
1103 bool load,
1104 Register base,
1105 RegList regs);
1106
1107 void EmitShiftImmediate(Condition cond,
1108 Shift opcode,
1109 Register rd,
1110 Register rm,
1111 Operand o);
1112
1113 void EmitShiftRegister(Condition cond,
1114 Shift opcode,
1115 Register rd,
1116 Register rm,
1117 Operand o);
1118
1119 void EmitMulOp(Condition cond,
1120 int32_t opcode,
1121 Register rd,
1122 Register rn,
1123 Register rm,
1124 Register rs);
1125
1126 void EmitDivOp(Condition cond,
1127 int32_t opcode,
1128 Register rd,
1129 Register rn,
1130 Register rm);
1131
1132 void EmitMultiVSMemOp(Condition cond,
1133 BlockAddressMode am,
1134 bool load,
1135 Register base,
1136 SRegister start,
1137 uint32_t count);
1138
1139 void EmitMultiVDMemOp(Condition cond,
1140 BlockAddressMode am,
1141 bool load,
1142 Register base,
1143 DRegister start,
1144 int32_t count);
1145
1146 void EmitVFPsss(Condition cond,
1147 int32_t opcode,
1148 SRegister sd,
1149 SRegister sn,
1150 SRegister sm);
1151
1152 void EmitVFPddd(Condition cond,
1153 int32_t opcode,
1154 DRegister dd,
1155 DRegister dn,
1156 DRegister dm);
1157
1158 void EmitVFPsd(Condition cond,
1159 int32_t opcode,
1160 SRegister sd,
1161 DRegister dm);
1162
1163 void EmitVFPds(Condition cond,
1164 int32_t opcode,
1165 DRegister dd,
1166 SRegister sm);
1167
1168 void EmitSIMDqqq(int32_t opcode, OperandSize sz,
1169 QRegister qd, QRegister qn, QRegister qm);
1170
1171 void EmitSIMDddd(int32_t opcode, OperandSize sz,
1172 DRegister dd, DRegister dn, DRegister dm);
1173
1174 void EmitFarBranch(Condition cond, int32_t offset, bool link);
1175 void EmitBranch(Condition cond, Label* label, bool link);
1176 int32_t EncodeBranchOffset(int32_t offset, int32_t inst);
1177 static int32_t DecodeBranchOffset(int32_t inst);
1178 int32_t EncodeTstOffset(int32_t offset, int32_t inst);
1179 int32_t DecodeTstOffset(int32_t inst);
1180
1181 void StoreIntoObjectFilter(Register object, Register value, Label* no_update);
1182
1183 // Shorter filtering sequence that assumes that value is not a smi.
1184 void StoreIntoObjectFilterNoSmi(Register object,
1185 Register value,
1186 Label* no_update);
1187
1188 // Helpers for write-barrier verification.
1189
1190 // Returns VerifiedMemory::offset() as an Operand.
1191 Operand GetVerifiedMemoryShadow();
1192 // Writes value to [base + offset] and also its shadow location, if enabled.
1193 void WriteShadowedField(Register base,
1194 intptr_t offset,
1195 Register value,
1196 Condition cond = AL);
1197 void WriteShadowedFieldPair(Register base,
1198 intptr_t offset,
1199 Register value_even,
1200 Register value_odd,
1201 Condition cond = AL);
1202 // Writes new_value to address and its shadow location, if enabled, after
1203 // verifying that its old value matches its shadow.
1204 void VerifiedWrite(const Address& address,
1205 Register new_value,
1206 FieldContent old_content);
1207
1208 DISALLOW_ALLOCATION();
1209 DISALLOW_COPY_AND_ASSIGN(Assembler);
1210 };
1211
1212 } // namespace dart
1213
1214 #endif // VM_ASSEMBLER_ARM_H_
OLDNEW
« no previous file with comments | « Makefile.standalone ('k') | src/DartARM32/assembler_arm.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698