Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(100)

Side by Side Diff: src/arm/assembler-arm.cc

Issue 6529032: Merge 6168:6800 from bleeding_edge to experimental/gc branch. (Closed) Base URL: http://v8.googlecode.com/svn/branches/experimental/gc/
Patch Set: Created 9 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/assembler-arm.h ('k') | src/arm/assembler-arm-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 1994-2006 Sun Microsystems Inc. 1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved. 2 // All Rights Reserved.
3 // 3 //
4 // Redistribution and use in source and binary forms, with or without 4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions 5 // modification, are permitted provided that the following conditions
6 // are met: 6 // are met:
7 // 7 //
8 // - Redistributions of source code must retain the above copyright notice, 8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer. 9 // this list of conditions and the following disclaimer.
10 // 10 //
(...skipping 195 matching lines...) Expand 10 before | Expand all | Expand 10 after
206 ASSERT(is_uint5(shift_imm)); 206 ASSERT(is_uint5(shift_imm));
207 rn_ = rn; 207 rn_ = rn;
208 rm_ = rm; 208 rm_ = rm;
209 shift_op_ = shift_op; 209 shift_op_ = shift_op;
210 shift_imm_ = shift_imm & 31; 210 shift_imm_ = shift_imm & 31;
211 am_ = am; 211 am_ = am;
212 } 212 }
213 213
214 214
215 // ----------------------------------------------------------------------------- 215 // -----------------------------------------------------------------------------
216 // Implementation of Assembler. 216 // Specific instructions, constants, and masks.
217
218 // Instruction encoding bits.
219 enum {
220 H = 1 << 5, // halfword (or byte)
221 S6 = 1 << 6, // signed (or unsigned)
222 L = 1 << 20, // load (or store)
223 S = 1 << 20, // set condition code (or leave unchanged)
224 W = 1 << 21, // writeback base register (or leave unchanged)
225 A = 1 << 21, // accumulate in multiply instruction (or not)
226 B = 1 << 22, // unsigned byte (or word)
227 N = 1 << 22, // long (or short)
228 U = 1 << 23, // positive (or negative) offset/index
229 P = 1 << 24, // offset/pre-indexed addressing (or post-indexed addressing)
230 I = 1 << 25, // immediate shifter operand (or not)
231
232 B4 = 1 << 4,
233 B5 = 1 << 5,
234 B6 = 1 << 6,
235 B7 = 1 << 7,
236 B8 = 1 << 8,
237 B9 = 1 << 9,
238 B12 = 1 << 12,
239 B16 = 1 << 16,
240 B18 = 1 << 18,
241 B19 = 1 << 19,
242 B20 = 1 << 20,
243 B21 = 1 << 21,
244 B22 = 1 << 22,
245 B23 = 1 << 23,
246 B24 = 1 << 24,
247 B25 = 1 << 25,
248 B26 = 1 << 26,
249 B27 = 1 << 27,
250
251 // Instruction bit masks.
252 RdMask = 15 << 12, // in str instruction
253 CondMask = 15 << 28,
254 CoprocessorMask = 15 << 8,
255 OpCodeMask = 15 << 21, // in data-processing instructions
256 Imm24Mask = (1 << 24) - 1,
257 Off12Mask = (1 << 12) - 1,
258 // Reserved condition.
259 nv = 15 << 28
260 };
261
262 217
263 // add(sp, sp, 4) instruction (aka Pop()) 218 // add(sp, sp, 4) instruction (aka Pop())
264 static const Instr kPopInstruction = 219 const Instr kPopInstruction =
265 al | 4 * B21 | 4 | LeaveCC | I | sp.code() * B16 | sp.code() * B12; 220 al | PostIndex | 4 | LeaveCC | I | sp.code() * B16 | sp.code() * B12;
266 // str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r)) 221 // str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
267 // register r is not encoded. 222 // register r is not encoded.
268 static const Instr kPushRegPattern = 223 const Instr kPushRegPattern =
269 al | B26 | 4 | NegPreIndex | sp.code() * B16; 224 al | B26 | 4 | NegPreIndex | sp.code() * B16;
270 // ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r)) 225 // ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
271 // register r is not encoded. 226 // register r is not encoded.
272 static const Instr kPopRegPattern = 227 const Instr kPopRegPattern =
273 al | B26 | L | 4 | PostIndex | sp.code() * B16; 228 al | B26 | L | 4 | PostIndex | sp.code() * B16;
274 // mov lr, pc 229 // mov lr, pc
275 const Instr kMovLrPc = al | 13*B21 | pc.code() | lr.code() * B12; 230 const Instr kMovLrPc = al | MOV | pc.code() | lr.code() * B12;
276 // ldr rd, [pc, #offset] 231 // ldr rd, [pc, #offset]
277 const Instr kLdrPCMask = CondMask | 15 * B24 | 7 * B20 | 15 * B16; 232 const Instr kLdrPCMask = kCondMask | 15 * B24 | 7 * B20 | 15 * B16;
278 const Instr kLdrPCPattern = al | 5 * B24 | L | pc.code() * B16; 233 const Instr kLdrPCPattern = al | 5 * B24 | L | pc.code() * B16;
279 // blxcc rm 234 // blxcc rm
280 const Instr kBlxRegMask = 235 const Instr kBlxRegMask =
281 15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4; 236 15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
282 const Instr kBlxRegPattern = 237 const Instr kBlxRegPattern =
283 B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | 3 * B4; 238 B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | BLX;
284 const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16; 239 const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16;
285 const Instr kMovMvnPattern = 0xd * B21; 240 const Instr kMovMvnPattern = 0xd * B21;
286 const Instr kMovMvnFlip = B22; 241 const Instr kMovMvnFlip = B22;
287 const Instr kMovLeaveCCMask = 0xdff * B16; 242 const Instr kMovLeaveCCMask = 0xdff * B16;
288 const Instr kMovLeaveCCPattern = 0x1a0 * B16; 243 const Instr kMovLeaveCCPattern = 0x1a0 * B16;
289 const Instr kMovwMask = 0xff * B20; 244 const Instr kMovwMask = 0xff * B20;
290 const Instr kMovwPattern = 0x30 * B20; 245 const Instr kMovwPattern = 0x30 * B20;
291 const Instr kMovwLeaveCCFlip = 0x5 * B21; 246 const Instr kMovwLeaveCCFlip = 0x5 * B21;
292 const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12; 247 const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12;
293 const Instr kCmpCmnPattern = 0x15 * B20; 248 const Instr kCmpCmnPattern = 0x15 * B20;
294 const Instr kCmpCmnFlip = B21; 249 const Instr kCmpCmnFlip = B21;
295 const Instr kALUMask = 0x6f * B21;
296 const Instr kAddPattern = 0x4 * B21;
297 const Instr kSubPattern = 0x2 * B21;
298 const Instr kBicPattern = 0xe * B21;
299 const Instr kAndPattern = 0x0 * B21;
300 const Instr kAddSubFlip = 0x6 * B21; 250 const Instr kAddSubFlip = 0x6 * B21;
301 const Instr kAndBicFlip = 0xe * B21; 251 const Instr kAndBicFlip = 0xe * B21;
302 252
303 // A mask for the Rd register for push, pop, ldr, str instructions. 253 // A mask for the Rd register for push, pop, ldr, str instructions.
304 const Instr kRdMask = 0x0000f000; 254 const Instr kLdrRegFpOffsetPattern =
305 static const int kRdShift = 12;
306 static const Instr kLdrRegFpOffsetPattern =
307 al | B26 | L | Offset | fp.code() * B16; 255 al | B26 | L | Offset | fp.code() * B16;
308 static const Instr kStrRegFpOffsetPattern = 256 const Instr kStrRegFpOffsetPattern =
309 al | B26 | Offset | fp.code() * B16; 257 al | B26 | Offset | fp.code() * B16;
310 static const Instr kLdrRegFpNegOffsetPattern = 258 const Instr kLdrRegFpNegOffsetPattern =
311 al | B26 | L | NegOffset | fp.code() * B16; 259 al | B26 | L | NegOffset | fp.code() * B16;
312 static const Instr kStrRegFpNegOffsetPattern = 260 const Instr kStrRegFpNegOffsetPattern =
313 al | B26 | NegOffset | fp.code() * B16; 261 al | B26 | NegOffset | fp.code() * B16;
314 static const Instr kLdrStrInstrTypeMask = 0xffff0000; 262 const Instr kLdrStrInstrTypeMask = 0xffff0000;
315 static const Instr kLdrStrInstrArgumentMask = 0x0000ffff; 263 const Instr kLdrStrInstrArgumentMask = 0x0000ffff;
316 static const Instr kLdrStrOffsetMask = 0x00000fff; 264 const Instr kLdrStrOffsetMask = 0x00000fff;
265
317 266
318 // Spare buffer. 267 // Spare buffer.
319 static const int kMinimalBufferSize = 4*KB; 268 static const int kMinimalBufferSize = 4*KB;
320 static byte* spare_buffer_ = NULL; 269 static byte* spare_buffer_ = NULL;
321 270
271
322 Assembler::Assembler(void* buffer, int buffer_size) 272 Assembler::Assembler(void* buffer, int buffer_size)
323 : positions_recorder_(this), 273 : positions_recorder_(this),
324 allow_peephole_optimization_(false) { 274 allow_peephole_optimization_(false) {
325 // BUG(3245989): disable peephole optimization if crankshaft is enabled.
326 allow_peephole_optimization_ = FLAG_peephole_optimization; 275 allow_peephole_optimization_ = FLAG_peephole_optimization;
327 if (buffer == NULL) { 276 if (buffer == NULL) {
328 // Do our own buffer management. 277 // Do our own buffer management.
329 if (buffer_size <= kMinimalBufferSize) { 278 if (buffer_size <= kMinimalBufferSize) {
330 buffer_size = kMinimalBufferSize; 279 buffer_size = kMinimalBufferSize;
331 280
332 if (spare_buffer_ != NULL) { 281 if (spare_buffer_ != NULL) {
333 buffer = spare_buffer_; 282 buffer = spare_buffer_;
334 spare_buffer_ = NULL; 283 spare_buffer_ = NULL;
335 } 284 }
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
395 } 344 }
396 } 345 }
397 346
398 347
399 void Assembler::CodeTargetAlign() { 348 void Assembler::CodeTargetAlign() {
400 // Preferred alignment of jump targets on some ARM chips. 349 // Preferred alignment of jump targets on some ARM chips.
401 Align(8); 350 Align(8);
402 } 351 }
403 352
404 353
354 Condition Assembler::GetCondition(Instr instr) {
355 return Instruction::ConditionField(instr);
356 }
357
358
405 bool Assembler::IsBranch(Instr instr) { 359 bool Assembler::IsBranch(Instr instr) {
406 return (instr & (B27 | B25)) == (B27 | B25); 360 return (instr & (B27 | B25)) == (B27 | B25);
407 } 361 }
408 362
409 363
410 int Assembler::GetBranchOffset(Instr instr) { 364 int Assembler::GetBranchOffset(Instr instr) {
411 ASSERT(IsBranch(instr)); 365 ASSERT(IsBranch(instr));
412 // Take the jump offset in the lower 24 bits, sign extend it and multiply it 366 // Take the jump offset in the lower 24 bits, sign extend it and multiply it
413 // with 4 to get the offset in bytes. 367 // with 4 to get the offset in bytes.
414 return ((instr & Imm24Mask) << 8) >> 6; 368 return ((instr & kImm24Mask) << 8) >> 6;
415 } 369 }
416 370
417 371
418 bool Assembler::IsLdrRegisterImmediate(Instr instr) { 372 bool Assembler::IsLdrRegisterImmediate(Instr instr) {
419 return (instr & (B27 | B26 | B25 | B22 | B20)) == (B26 | B20); 373 return (instr & (B27 | B26 | B25 | B22 | B20)) == (B26 | B20);
420 } 374 }
421 375
422 376
423 int Assembler::GetLdrRegisterImmediateOffset(Instr instr) { 377 int Assembler::GetLdrRegisterImmediateOffset(Instr instr) {
424 ASSERT(IsLdrRegisterImmediate(instr)); 378 ASSERT(IsLdrRegisterImmediate(instr));
425 bool positive = (instr & B23) == B23; 379 bool positive = (instr & B23) == B23;
426 int offset = instr & Off12Mask; // Zero extended offset. 380 int offset = instr & kOff12Mask; // Zero extended offset.
427 return positive ? offset : -offset; 381 return positive ? offset : -offset;
428 } 382 }
429 383
430 384
431 Instr Assembler::SetLdrRegisterImmediateOffset(Instr instr, int offset) { 385 Instr Assembler::SetLdrRegisterImmediateOffset(Instr instr, int offset) {
432 ASSERT(IsLdrRegisterImmediate(instr)); 386 ASSERT(IsLdrRegisterImmediate(instr));
433 bool positive = offset >= 0; 387 bool positive = offset >= 0;
434 if (!positive) offset = -offset; 388 if (!positive) offset = -offset;
435 ASSERT(is_uint12(offset)); 389 ASSERT(is_uint12(offset));
436 // Set bit indicating whether the offset should be added. 390 // Set bit indicating whether the offset should be added.
437 instr = (instr & ~B23) | (positive ? B23 : 0); 391 instr = (instr & ~B23) | (positive ? B23 : 0);
438 // Set the actual offset. 392 // Set the actual offset.
439 return (instr & ~Off12Mask) | offset; 393 return (instr & ~kOff12Mask) | offset;
440 } 394 }
441 395
442 396
443 bool Assembler::IsStrRegisterImmediate(Instr instr) { 397 bool Assembler::IsStrRegisterImmediate(Instr instr) {
444 return (instr & (B27 | B26 | B25 | B22 | B20)) == B26; 398 return (instr & (B27 | B26 | B25 | B22 | B20)) == B26;
445 } 399 }
446 400
447 401
448 Instr Assembler::SetStrRegisterImmediateOffset(Instr instr, int offset) { 402 Instr Assembler::SetStrRegisterImmediateOffset(Instr instr, int offset) {
449 ASSERT(IsStrRegisterImmediate(instr)); 403 ASSERT(IsStrRegisterImmediate(instr));
450 bool positive = offset >= 0; 404 bool positive = offset >= 0;
451 if (!positive) offset = -offset; 405 if (!positive) offset = -offset;
452 ASSERT(is_uint12(offset)); 406 ASSERT(is_uint12(offset));
453 // Set bit indicating whether the offset should be added. 407 // Set bit indicating whether the offset should be added.
454 instr = (instr & ~B23) | (positive ? B23 : 0); 408 instr = (instr & ~B23) | (positive ? B23 : 0);
455 // Set the actual offset. 409 // Set the actual offset.
456 return (instr & ~Off12Mask) | offset; 410 return (instr & ~kOff12Mask) | offset;
457 } 411 }
458 412
459 413
460 bool Assembler::IsAddRegisterImmediate(Instr instr) { 414 bool Assembler::IsAddRegisterImmediate(Instr instr) {
461 return (instr & (B27 | B26 | B25 | B24 | B23 | B22 | B21)) == (B25 | B23); 415 return (instr & (B27 | B26 | B25 | B24 | B23 | B22 | B21)) == (B25 | B23);
462 } 416 }
463 417
464 418
465 Instr Assembler::SetAddRegisterImmediateOffset(Instr instr, int offset) { 419 Instr Assembler::SetAddRegisterImmediateOffset(Instr instr, int offset) {
466 ASSERT(IsAddRegisterImmediate(instr)); 420 ASSERT(IsAddRegisterImmediate(instr));
467 ASSERT(offset >= 0); 421 ASSERT(offset >= 0);
468 ASSERT(is_uint12(offset)); 422 ASSERT(is_uint12(offset));
469 // Set the offset. 423 // Set the offset.
470 return (instr & ~Off12Mask) | offset; 424 return (instr & ~kOff12Mask) | offset;
471 } 425 }
472 426
473 427
474 Register Assembler::GetRd(Instr instr) { 428 Register Assembler::GetRd(Instr instr) {
475 Register reg; 429 Register reg;
476 reg.code_ = ((instr & kRdMask) >> kRdShift); 430 reg.code_ = Instruction::RdValue(instr);
431 return reg;
432 }
433
434
435 Register Assembler::GetRn(Instr instr) {
436 Register reg;
437 reg.code_ = Instruction::RnValue(instr);
438 return reg;
439 }
440
441
442 Register Assembler::GetRm(Instr instr) {
443 Register reg;
444 reg.code_ = Instruction::RmValue(instr);
477 return reg; 445 return reg;
478 } 446 }
479 447
480 448
481 bool Assembler::IsPush(Instr instr) { 449 bool Assembler::IsPush(Instr instr) {
482 return ((instr & ~kRdMask) == kPushRegPattern); 450 return ((instr & ~kRdMask) == kPushRegPattern);
483 } 451 }
484 452
485 453
486 bool Assembler::IsPop(Instr instr) { 454 bool Assembler::IsPop(Instr instr) {
(...skipping 17 matching lines...) Expand all
504 472
505 473
506 bool Assembler::IsLdrRegFpNegOffset(Instr instr) { 474 bool Assembler::IsLdrRegFpNegOffset(Instr instr) {
507 return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpNegOffsetPattern); 475 return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpNegOffsetPattern);
508 } 476 }
509 477
510 478
511 bool Assembler::IsLdrPcImmediateOffset(Instr instr) { 479 bool Assembler::IsLdrPcImmediateOffset(Instr instr) {
512 // Check the instruction is indeed a 480 // Check the instruction is indeed a
513 // ldr<cond> <Rd>, [pc +/- offset_12]. 481 // ldr<cond> <Rd>, [pc +/- offset_12].
514 return (instr & 0x0f7f0000) == 0x051f0000; 482 return (instr & (kLdrPCMask & ~kCondMask)) == 0x051f0000;
515 } 483 }
516 484
517 485
486 bool Assembler::IsTstImmediate(Instr instr) {
487 return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
488 (I | TST | S);
489 }
490
491
492 bool Assembler::IsCmpRegister(Instr instr) {
493 return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask | B4)) ==
494 (CMP | S);
495 }
496
497
498 bool Assembler::IsCmpImmediate(Instr instr) {
499 return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
500 (I | CMP | S);
501 }
502
503
504 Register Assembler::GetCmpImmediateRegister(Instr instr) {
505 ASSERT(IsCmpImmediate(instr));
506 return GetRn(instr);
507 }
508
509
510 int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
511 ASSERT(IsCmpImmediate(instr));
512 return instr & kOff12Mask;
513 }
514
518 // Labels refer to positions in the (to be) generated code. 515 // Labels refer to positions in the (to be) generated code.
519 // There are bound, linked, and unused labels. 516 // There are bound, linked, and unused labels.
520 // 517 //
521 // Bound labels refer to known positions in the already 518 // Bound labels refer to known positions in the already
522 // generated code. pos() is the position the label refers to. 519 // generated code. pos() is the position the label refers to.
523 // 520 //
524 // Linked labels refer to unknown positions in the code 521 // Linked labels refer to unknown positions in the code
525 // to be generated; pos() is the position of the last 522 // to be generated; pos() is the position of the last
526 // instruction using the label. 523 // instruction using the label.
527 524
528 525
529 // The link chain is terminated by a negative code position (must be aligned) 526 // The link chain is terminated by a negative code position (must be aligned)
530 const int kEndOfChain = -4; 527 const int kEndOfChain = -4;
531 528
532 529
533 int Assembler::target_at(int pos) { 530 int Assembler::target_at(int pos) {
534 Instr instr = instr_at(pos); 531 Instr instr = instr_at(pos);
535 if ((instr & ~Imm24Mask) == 0) { 532 if ((instr & ~kImm24Mask) == 0) {
536 // Emitted label constant, not part of a branch. 533 // Emitted label constant, not part of a branch.
537 return instr - (Code::kHeaderSize - kHeapObjectTag); 534 return instr - (Code::kHeaderSize - kHeapObjectTag);
538 } 535 }
539 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24 536 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
540 int imm26 = ((instr & Imm24Mask) << 8) >> 6; 537 int imm26 = ((instr & kImm24Mask) << 8) >> 6;
541 if ((instr & CondMask) == nv && (instr & B24) != 0) { 538 if ((Instruction::ConditionField(instr) == kSpecialCondition) &&
539 ((instr & B24) != 0)) {
542 // blx uses bit 24 to encode bit 2 of imm26 540 // blx uses bit 24 to encode bit 2 of imm26
543 imm26 += 2; 541 imm26 += 2;
544 } 542 }
545 return pos + kPcLoadDelta + imm26; 543 return pos + kPcLoadDelta + imm26;
546 } 544 }
547 545
548 546
549 void Assembler::target_at_put(int pos, int target_pos) { 547 void Assembler::target_at_put(int pos, int target_pos) {
550 Instr instr = instr_at(pos); 548 Instr instr = instr_at(pos);
551 if ((instr & ~Imm24Mask) == 0) { 549 if ((instr & ~kImm24Mask) == 0) {
552 ASSERT(target_pos == kEndOfChain || target_pos >= 0); 550 ASSERT(target_pos == kEndOfChain || target_pos >= 0);
553 // Emitted label constant, not part of a branch. 551 // Emitted label constant, not part of a branch.
554 // Make label relative to Code* of generated Code object. 552 // Make label relative to Code* of generated Code object.
555 instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag)); 553 instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
556 return; 554 return;
557 } 555 }
558 int imm26 = target_pos - (pos + kPcLoadDelta); 556 int imm26 = target_pos - (pos + kPcLoadDelta);
559 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24 557 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
560 if ((instr & CondMask) == nv) { 558 if (Instruction::ConditionField(instr) == kSpecialCondition) {
561 // blx uses bit 24 to encode bit 2 of imm26 559 // blx uses bit 24 to encode bit 2 of imm26
562 ASSERT((imm26 & 1) == 0); 560 ASSERT((imm26 & 1) == 0);
563 instr = (instr & ~(B24 | Imm24Mask)) | ((imm26 & 2) >> 1)*B24; 561 instr = (instr & ~(B24 | kImm24Mask)) | ((imm26 & 2) >> 1)*B24;
564 } else { 562 } else {
565 ASSERT((imm26 & 3) == 0); 563 ASSERT((imm26 & 3) == 0);
566 instr &= ~Imm24Mask; 564 instr &= ~kImm24Mask;
567 } 565 }
568 int imm24 = imm26 >> 2; 566 int imm24 = imm26 >> 2;
569 ASSERT(is_int24(imm24)); 567 ASSERT(is_int24(imm24));
570 instr_at_put(pos, instr | (imm24 & Imm24Mask)); 568 instr_at_put(pos, instr | (imm24 & kImm24Mask));
571 } 569 }
572 570
573 571
574 void Assembler::print(Label* L) { 572 void Assembler::print(Label* L) {
575 if (L->is_unused()) { 573 if (L->is_unused()) {
576 PrintF("unused label\n"); 574 PrintF("unused label\n");
577 } else if (L->is_bound()) { 575 } else if (L->is_bound()) {
578 PrintF("bound label to %d\n", L->pos()); 576 PrintF("bound label to %d\n", L->pos());
579 } else if (L->is_linked()) { 577 } else if (L->is_linked()) {
580 Label l = *L; 578 Label l = *L;
581 PrintF("unbound label"); 579 PrintF("unbound label");
582 while (l.is_linked()) { 580 while (l.is_linked()) {
583 PrintF("@ %d ", l.pos()); 581 PrintF("@ %d ", l.pos());
584 Instr instr = instr_at(l.pos()); 582 Instr instr = instr_at(l.pos());
585 if ((instr & ~Imm24Mask) == 0) { 583 if ((instr & ~kImm24Mask) == 0) {
586 PrintF("value\n"); 584 PrintF("value\n");
587 } else { 585 } else {
588 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx 586 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx
589 int cond = instr & CondMask; 587 Condition cond = Instruction::ConditionField(instr);
590 const char* b; 588 const char* b;
591 const char* c; 589 const char* c;
592 if (cond == nv) { 590 if (cond == kSpecialCondition) {
593 b = "blx"; 591 b = "blx";
594 c = ""; 592 c = "";
595 } else { 593 } else {
596 if ((instr & B24) != 0) 594 if ((instr & B24) != 0)
597 b = "bl"; 595 b = "bl";
598 else 596 else
599 b = "b"; 597 b = "b";
600 598
601 switch (cond) { 599 switch (cond) {
602 case eq: c = "eq"; break; 600 case eq: c = "eq"; break;
(...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after
724 } 722 }
725 } 723 }
726 } 724 }
727 } else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) { 725 } else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) {
728 if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) { 726 if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) {
729 *instr ^= kCmpCmnFlip; 727 *instr ^= kCmpCmnFlip;
730 return true; 728 return true;
731 } 729 }
732 } else { 730 } else {
733 Instr alu_insn = (*instr & kALUMask); 731 Instr alu_insn = (*instr & kALUMask);
734 if (alu_insn == kAddPattern || 732 if (alu_insn == ADD ||
735 alu_insn == kSubPattern) { 733 alu_insn == SUB) {
736 if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) { 734 if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) {
737 *instr ^= kAddSubFlip; 735 *instr ^= kAddSubFlip;
738 return true; 736 return true;
739 } 737 }
740 } else if (alu_insn == kAndPattern || 738 } else if (alu_insn == AND ||
741 alu_insn == kBicPattern) { 739 alu_insn == BIC) {
742 if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) { 740 if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
743 *instr ^= kAndBicFlip; 741 *instr ^= kAndBicFlip;
744 return true; 742 return true;
745 } 743 }
746 } 744 }
747 } 745 }
748 } 746 }
749 return false; 747 return false;
750 } 748 }
751 749
(...skipping 23 matching lines...) Expand all
775 uint32_t dummy1, dummy2; 773 uint32_t dummy1, dummy2;
776 return fits_shifter(imm32_, &dummy1, &dummy2, NULL); 774 return fits_shifter(imm32_, &dummy1, &dummy2, NULL);
777 } 775 }
778 776
779 777
780 void Assembler::addrmod1(Instr instr, 778 void Assembler::addrmod1(Instr instr,
781 Register rn, 779 Register rn,
782 Register rd, 780 Register rd,
783 const Operand& x) { 781 const Operand& x) {
784 CheckBuffer(); 782 CheckBuffer();
785 ASSERT((instr & ~(CondMask | OpCodeMask | S)) == 0); 783 ASSERT((instr & ~(kCondMask | kOpCodeMask | S)) == 0);
786 if (!x.rm_.is_valid()) { 784 if (!x.rm_.is_valid()) {
787 // Immediate. 785 // Immediate.
788 uint32_t rotate_imm; 786 uint32_t rotate_imm;
789 uint32_t immed_8; 787 uint32_t immed_8;
790 if (x.must_use_constant_pool() || 788 if (x.must_use_constant_pool() ||
791 !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) { 789 !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
792 // The immediate operand cannot be encoded as a shifter operand, so load 790 // The immediate operand cannot be encoded as a shifter operand, so load
793 // it first to register ip and change the original instruction to use ip. 791 // it first to register ip and change the original instruction to use ip.
794 // However, if the original instruction is a 'mov rd, x' (not setting the 792 // However, if the original instruction is a 'mov rd, x' (not setting the
795 // condition code), then replace it with a 'ldr rd, [pc]'. 793 // condition code), then replace it with a 'ldr rd, [pc]'.
796 CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed 794 CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
797 Condition cond = static_cast<Condition>(instr & CondMask); 795 Condition cond = Instruction::ConditionField(instr);
798 if ((instr & ~CondMask) == 13*B21) { // mov, S not set 796 if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
799 if (x.must_use_constant_pool() || !CpuFeatures::IsSupported(ARMv7)) { 797 if (x.must_use_constant_pool() || !CpuFeatures::IsSupported(ARMv7)) {
800 RecordRelocInfo(x.rmode_, x.imm32_); 798 RecordRelocInfo(x.rmode_, x.imm32_);
801 ldr(rd, MemOperand(pc, 0), cond); 799 ldr(rd, MemOperand(pc, 0), cond);
802 } else { 800 } else {
803 // Will probably use movw, will certainly not use constant pool. 801 // Will probably use movw, will certainly not use constant pool.
804 mov(rd, Operand(x.imm32_ & 0xffff), LeaveCC, cond); 802 mov(rd, Operand(x.imm32_ & 0xffff), LeaveCC, cond);
805 movt(rd, static_cast<uint32_t>(x.imm32_) >> 16, cond); 803 movt(rd, static_cast<uint32_t>(x.imm32_) >> 16, cond);
806 } 804 }
807 } else { 805 } else {
808 // If this is not a mov or mvn instruction we may still be able to avoid 806 // If this is not a mov or mvn instruction we may still be able to avoid
(...skipping 20 matching lines...) Expand all
829 } 827 }
830 emit(instr | rn.code()*B16 | rd.code()*B12); 828 emit(instr | rn.code()*B16 | rd.code()*B12);
831 if (rn.is(pc) || x.rm_.is(pc)) { 829 if (rn.is(pc) || x.rm_.is(pc)) {
832 // Block constant pool emission for one instruction after reading pc. 830 // Block constant pool emission for one instruction after reading pc.
833 BlockConstPoolBefore(pc_offset() + kInstrSize); 831 BlockConstPoolBefore(pc_offset() + kInstrSize);
834 } 832 }
835 } 833 }
836 834
837 835
838 void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) { 836 void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
839 ASSERT((instr & ~(CondMask | B | L)) == B26); 837 ASSERT((instr & ~(kCondMask | B | L)) == B26);
840 int am = x.am_; 838 int am = x.am_;
841 if (!x.rm_.is_valid()) { 839 if (!x.rm_.is_valid()) {
842 // Immediate offset. 840 // Immediate offset.
843 int offset_12 = x.offset_; 841 int offset_12 = x.offset_;
844 if (offset_12 < 0) { 842 if (offset_12 < 0) {
845 offset_12 = -offset_12; 843 offset_12 = -offset_12;
846 am ^= U; 844 am ^= U;
847 } 845 }
848 if (!is_uint12(offset_12)) { 846 if (!is_uint12(offset_12)) {
849 // Immediate offset cannot be encoded, load it first to register ip 847 // Immediate offset cannot be encoded, load it first to register ip
850 // rn (and rd in a load) should never be ip, or will be trashed. 848 // rn (and rd in a load) should never be ip, or will be trashed.
851 ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip))); 849 ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
852 mov(ip, Operand(x.offset_), LeaveCC, 850 mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
853 static_cast<Condition>(instr & CondMask));
854 addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_)); 851 addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_));
855 return; 852 return;
856 } 853 }
857 ASSERT(offset_12 >= 0); // no masking needed 854 ASSERT(offset_12 >= 0); // no masking needed
858 instr |= offset_12; 855 instr |= offset_12;
859 } else { 856 } else {
860 // Register offset (shift_imm_ and shift_op_ are 0) or scaled 857 // Register offset (shift_imm_ and shift_op_ are 0) or scaled
861 // register offset the constructors make sure than both shift_imm_ 858 // register offset the constructors make sure than both shift_imm_
862 // and shift_op_ are initialized. 859 // and shift_op_ are initialized.
863 ASSERT(!x.rm_.is(pc)); 860 ASSERT(!x.rm_.is(pc));
864 instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code(); 861 instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
865 } 862 }
866 ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback 863 ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
867 emit(instr | am | x.rn_.code()*B16 | rd.code()*B12); 864 emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
868 } 865 }
869 866
870 867
871 void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) { 868 void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
872 ASSERT((instr & ~(CondMask | L | S6 | H)) == (B4 | B7)); 869 ASSERT((instr & ~(kCondMask | L | S6 | H)) == (B4 | B7));
873 ASSERT(x.rn_.is_valid()); 870 ASSERT(x.rn_.is_valid());
874 int am = x.am_; 871 int am = x.am_;
875 if (!x.rm_.is_valid()) { 872 if (!x.rm_.is_valid()) {
876 // Immediate offset. 873 // Immediate offset.
877 int offset_8 = x.offset_; 874 int offset_8 = x.offset_;
878 if (offset_8 < 0) { 875 if (offset_8 < 0) {
879 offset_8 = -offset_8; 876 offset_8 = -offset_8;
880 am ^= U; 877 am ^= U;
881 } 878 }
882 if (!is_uint8(offset_8)) { 879 if (!is_uint8(offset_8)) {
883 // Immediate offset cannot be encoded, load it first to register ip 880 // Immediate offset cannot be encoded, load it first to register ip
884 // rn (and rd in a load) should never be ip, or will be trashed. 881 // rn (and rd in a load) should never be ip, or will be trashed.
885 ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip))); 882 ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
886 mov(ip, Operand(x.offset_), LeaveCC, 883 mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
887 static_cast<Condition>(instr & CondMask));
888 addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_)); 884 addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
889 return; 885 return;
890 } 886 }
891 ASSERT(offset_8 >= 0); // no masking needed 887 ASSERT(offset_8 >= 0); // no masking needed
892 instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf); 888 instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
893 } else if (x.shift_imm_ != 0) { 889 } else if (x.shift_imm_ != 0) {
894 // Scaled register offset not supported, load index first 890 // Scaled register offset not supported, load index first
895 // rn (and rd in a load) should never be ip, or will be trashed. 891 // rn (and rd in a load) should never be ip, or will be trashed.
896 ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip))); 892 ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
897 mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC, 893 mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
898 static_cast<Condition>(instr & CondMask)); 894 Instruction::ConditionField(instr));
899 addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_)); 895 addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
900 return; 896 return;
901 } else { 897 } else {
902 // Register offset. 898 // Register offset.
903 ASSERT((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback 899 ASSERT((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback
904 instr |= x.rm_.code(); 900 instr |= x.rm_.code();
905 } 901 }
906 ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback 902 ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
907 emit(instr | am | x.rn_.code()*B16 | rd.code()*B12); 903 emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
908 } 904 }
909 905
910 906
911 void Assembler::addrmod4(Instr instr, Register rn, RegList rl) { 907 void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
912 ASSERT((instr & ~(CondMask | P | U | W | L)) == B27); 908 ASSERT((instr & ~(kCondMask | P | U | W | L)) == B27);
913 ASSERT(rl != 0); 909 ASSERT(rl != 0);
914 ASSERT(!rn.is(pc)); 910 ASSERT(!rn.is(pc));
915 emit(instr | rn.code()*B16 | rl); 911 emit(instr | rn.code()*B16 | rl);
916 } 912 }
917 913
918 914
919 void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) { 915 void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
920 // Unindexed addressing is not encoded by this function. 916 // Unindexed addressing is not encoded by this function.
921 ASSERT_EQ((B27 | B26), 917 ASSERT_EQ((B27 | B26),
922 (instr & ~(CondMask | CoprocessorMask | P | U | N | W | L))); 918 (instr & ~(kCondMask | kCoprocessorMask | P | U | N | W | L)));
923 ASSERT(x.rn_.is_valid() && !x.rm_.is_valid()); 919 ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
924 int am = x.am_; 920 int am = x.am_;
925 int offset_8 = x.offset_; 921 int offset_8 = x.offset_;
926 ASSERT((offset_8 & 3) == 0); // offset must be an aligned word offset 922 ASSERT((offset_8 & 3) == 0); // offset must be an aligned word offset
927 offset_8 >>= 2; 923 offset_8 >>= 2;
928 if (offset_8 < 0) { 924 if (offset_8 < 0) {
929 offset_8 = -offset_8; 925 offset_8 = -offset_8;
930 am ^= U; 926 am ^= U;
931 } 927 }
932 ASSERT(is_uint8(offset_8)); // unsigned word offset must fit in a byte 928 ASSERT(is_uint8(offset_8)); // unsigned word offset must fit in a byte
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
975 instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag)); 971 instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
976 } 972 }
977 } 973 }
978 974
979 975
980 // Branch instructions. 976 // Branch instructions.
981 void Assembler::b(int branch_offset, Condition cond) { 977 void Assembler::b(int branch_offset, Condition cond) {
982 ASSERT((branch_offset & 3) == 0); 978 ASSERT((branch_offset & 3) == 0);
983 int imm24 = branch_offset >> 2; 979 int imm24 = branch_offset >> 2;
984 ASSERT(is_int24(imm24)); 980 ASSERT(is_int24(imm24));
985 emit(cond | B27 | B25 | (imm24 & Imm24Mask)); 981 emit(cond | B27 | B25 | (imm24 & kImm24Mask));
986 982
987 if (cond == al) { 983 if (cond == al) {
988 // Dead code is a good location to emit the constant pool. 984 // Dead code is a good location to emit the constant pool.
989 CheckConstPool(false, false); 985 CheckConstPool(false, false);
990 } 986 }
991 } 987 }
992 988
993 989
994 void Assembler::bl(int branch_offset, Condition cond) { 990 void Assembler::bl(int branch_offset, Condition cond) {
995 positions_recorder()->WriteRecordedPositions(); 991 positions_recorder()->WriteRecordedPositions();
996 ASSERT((branch_offset & 3) == 0); 992 ASSERT((branch_offset & 3) == 0);
997 int imm24 = branch_offset >> 2; 993 int imm24 = branch_offset >> 2;
998 ASSERT(is_int24(imm24)); 994 ASSERT(is_int24(imm24));
999 emit(cond | B27 | B25 | B24 | (imm24 & Imm24Mask)); 995 emit(cond | B27 | B25 | B24 | (imm24 & kImm24Mask));
1000 } 996 }
1001 997
1002 998
1003 void Assembler::blx(int branch_offset) { // v5 and above 999 void Assembler::blx(int branch_offset) { // v5 and above
1004 positions_recorder()->WriteRecordedPositions(); 1000 positions_recorder()->WriteRecordedPositions();
1005 ASSERT((branch_offset & 1) == 0); 1001 ASSERT((branch_offset & 1) == 0);
1006 int h = ((branch_offset & 2) >> 1)*B24; 1002 int h = ((branch_offset & 2) >> 1)*B24;
1007 int imm24 = branch_offset >> 2; 1003 int imm24 = branch_offset >> 2;
1008 ASSERT(is_int24(imm24)); 1004 ASSERT(is_int24(imm24));
1009 emit(nv | B27 | B25 | h | (imm24 & Imm24Mask)); 1005 emit(kSpecialCondition | B27 | B25 | h | (imm24 & kImm24Mask));
1010 } 1006 }
1011 1007
1012 1008
1013 void Assembler::blx(Register target, Condition cond) { // v5 and above 1009 void Assembler::blx(Register target, Condition cond) { // v5 and above
1014 positions_recorder()->WriteRecordedPositions(); 1010 positions_recorder()->WriteRecordedPositions();
1015 ASSERT(!target.is(pc)); 1011 ASSERT(!target.is(pc));
1016 emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | 3*B4 | target.code()); 1012 emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | target.code());
1017 } 1013 }
1018 1014
1019 1015
1020 void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t 1016 void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
1021 positions_recorder()->WriteRecordedPositions(); 1017 positions_recorder()->WriteRecordedPositions();
1022 ASSERT(!target.is(pc)); // use of pc is actually allowed, but discouraged 1018 ASSERT(!target.is(pc)); // use of pc is actually allowed, but discouraged
1023 emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | B4 | target.code()); 1019 emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BX | target.code());
1024 } 1020 }
1025 1021
1026 1022
1027 // Data-processing instructions. 1023 // Data-processing instructions.
1028 1024
1029 void Assembler::and_(Register dst, Register src1, const Operand& src2, 1025 void Assembler::and_(Register dst, Register src1, const Operand& src2,
1030 SBit s, Condition cond) { 1026 SBit s, Condition cond) {
1031 addrmod1(cond | 0*B21 | s, src1, dst, src2); 1027 addrmod1(cond | AND | s, src1, dst, src2);
1032 } 1028 }
1033 1029
1034 1030
1035 void Assembler::eor(Register dst, Register src1, const Operand& src2, 1031 void Assembler::eor(Register dst, Register src1, const Operand& src2,
1036 SBit s, Condition cond) { 1032 SBit s, Condition cond) {
1037 addrmod1(cond | 1*B21 | s, src1, dst, src2); 1033 addrmod1(cond | EOR | s, src1, dst, src2);
1038 } 1034 }
1039 1035
1040 1036
1041 void Assembler::sub(Register dst, Register src1, const Operand& src2, 1037 void Assembler::sub(Register dst, Register src1, const Operand& src2,
1042 SBit s, Condition cond) { 1038 SBit s, Condition cond) {
1043 addrmod1(cond | 2*B21 | s, src1, dst, src2); 1039 addrmod1(cond | SUB | s, src1, dst, src2);
1044 } 1040 }
1045 1041
1046 1042
1047 void Assembler::rsb(Register dst, Register src1, const Operand& src2, 1043 void Assembler::rsb(Register dst, Register src1, const Operand& src2,
1048 SBit s, Condition cond) { 1044 SBit s, Condition cond) {
1049 addrmod1(cond | 3*B21 | s, src1, dst, src2); 1045 addrmod1(cond | RSB | s, src1, dst, src2);
1050 } 1046 }
1051 1047
1052 1048
1053 void Assembler::add(Register dst, Register src1, const Operand& src2, 1049 void Assembler::add(Register dst, Register src1, const Operand& src2,
1054 SBit s, Condition cond) { 1050 SBit s, Condition cond) {
1055 addrmod1(cond | 4*B21 | s, src1, dst, src2); 1051 addrmod1(cond | ADD | s, src1, dst, src2);
1056 1052
1057 // Eliminate pattern: push(r), pop() 1053 // Eliminate pattern: push(r), pop()
1058 // str(src, MemOperand(sp, 4, NegPreIndex), al); 1054 // str(src, MemOperand(sp, 4, NegPreIndex), al);
1059 // add(sp, sp, Operand(kPointerSize)); 1055 // add(sp, sp, Operand(kPointerSize));
1060 // Both instructions can be eliminated. 1056 // Both instructions can be eliminated.
1061 if (can_peephole_optimize(2) && 1057 if (can_peephole_optimize(2) &&
1062 // Pattern. 1058 // Pattern.
1063 instr_at(pc_ - 1 * kInstrSize) == kPopInstruction && 1059 instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
1064 (instr_at(pc_ - 2 * kInstrSize) & ~RdMask) == kPushRegPattern) { 1060 (instr_at(pc_ - 2 * kInstrSize) & ~kRdMask) == kPushRegPattern) {
1065 pc_ -= 2 * kInstrSize; 1061 pc_ -= 2 * kInstrSize;
1066 if (FLAG_print_peephole_optimization) { 1062 if (FLAG_print_peephole_optimization) {
1067 PrintF("%x push(reg)/pop() eliminated\n", pc_offset()); 1063 PrintF("%x push(reg)/pop() eliminated\n", pc_offset());
1068 } 1064 }
1069 } 1065 }
1070 } 1066 }
1071 1067
1072 1068
1073 void Assembler::adc(Register dst, Register src1, const Operand& src2, 1069 void Assembler::adc(Register dst, Register src1, const Operand& src2,
1074 SBit s, Condition cond) { 1070 SBit s, Condition cond) {
1075 addrmod1(cond | 5*B21 | s, src1, dst, src2); 1071 addrmod1(cond | ADC | s, src1, dst, src2);
1076 } 1072 }
1077 1073
1078 1074
1079 void Assembler::sbc(Register dst, Register src1, const Operand& src2, 1075 void Assembler::sbc(Register dst, Register src1, const Operand& src2,
1080 SBit s, Condition cond) { 1076 SBit s, Condition cond) {
1081 addrmod1(cond | 6*B21 | s, src1, dst, src2); 1077 addrmod1(cond | SBC | s, src1, dst, src2);
1082 } 1078 }
1083 1079
1084 1080
1085 void Assembler::rsc(Register dst, Register src1, const Operand& src2, 1081 void Assembler::rsc(Register dst, Register src1, const Operand& src2,
1086 SBit s, Condition cond) { 1082 SBit s, Condition cond) {
1087 addrmod1(cond | 7*B21 | s, src1, dst, src2); 1083 addrmod1(cond | RSC | s, src1, dst, src2);
1088 } 1084 }
1089 1085
1090 1086
1091 void Assembler::tst(Register src1, const Operand& src2, Condition cond) { 1087 void Assembler::tst(Register src1, const Operand& src2, Condition cond) {
1092 addrmod1(cond | 8*B21 | S, src1, r0, src2); 1088 addrmod1(cond | TST | S, src1, r0, src2);
1093 } 1089 }
1094 1090
1095 1091
1096 void Assembler::teq(Register src1, const Operand& src2, Condition cond) { 1092 void Assembler::teq(Register src1, const Operand& src2, Condition cond) {
1097 addrmod1(cond | 9*B21 | S, src1, r0, src2); 1093 addrmod1(cond | TEQ | S, src1, r0, src2);
1098 } 1094 }
1099 1095
1100 1096
1101 void Assembler::cmp(Register src1, const Operand& src2, Condition cond) { 1097 void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
1102 addrmod1(cond | 10*B21 | S, src1, r0, src2); 1098 addrmod1(cond | CMP | S, src1, r0, src2);
1099 }
1100
1101
1102 void Assembler::cmp_raw_immediate(
1103 Register src, int raw_immediate, Condition cond) {
1104 ASSERT(is_uint12(raw_immediate));
1105 emit(cond | I | CMP | S | src.code() << 16 | raw_immediate);
1103 } 1106 }
1104 1107
1105 1108
1106 void Assembler::cmn(Register src1, const Operand& src2, Condition cond) { 1109 void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
1107 addrmod1(cond | 11*B21 | S, src1, r0, src2); 1110 addrmod1(cond | CMN | S, src1, r0, src2);
1108 } 1111 }
1109 1112
1110 1113
1111 void Assembler::orr(Register dst, Register src1, const Operand& src2, 1114 void Assembler::orr(Register dst, Register src1, const Operand& src2,
1112 SBit s, Condition cond) { 1115 SBit s, Condition cond) {
1113 addrmod1(cond | 12*B21 | s, src1, dst, src2); 1116 addrmod1(cond | ORR | s, src1, dst, src2);
1114 } 1117 }
1115 1118
1116 1119
1117 void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) { 1120 void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
1118 if (dst.is(pc)) { 1121 if (dst.is(pc)) {
1119 positions_recorder()->WriteRecordedPositions(); 1122 positions_recorder()->WriteRecordedPositions();
1120 } 1123 }
1121 // Don't allow nop instructions in the form mov rn, rn to be generated using 1124 // Don't allow nop instructions in the form mov rn, rn to be generated using
1122 // the mov instruction. They must be generated using nop(int/NopMarkerTypes) 1125 // the mov instruction. They must be generated using nop(int/NopMarkerTypes)
1123 // or MarkCode(int/NopMarkerTypes) pseudo instructions. 1126 // or MarkCode(int/NopMarkerTypes) pseudo instructions.
1124 ASSERT(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al)); 1127 ASSERT(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al));
1125 addrmod1(cond | 13*B21 | s, r0, dst, src); 1128 addrmod1(cond | MOV | s, r0, dst, src);
1126 } 1129 }
1127 1130
1128 1131
1129 void Assembler::movw(Register reg, uint32_t immediate, Condition cond) { 1132 void Assembler::movw(Register reg, uint32_t immediate, Condition cond) {
1130 ASSERT(immediate < 0x10000); 1133 ASSERT(immediate < 0x10000);
1131 mov(reg, Operand(immediate), LeaveCC, cond); 1134 mov(reg, Operand(immediate), LeaveCC, cond);
1132 } 1135 }
1133 1136
1134 1137
1135 void Assembler::movt(Register reg, uint32_t immediate, Condition cond) { 1138 void Assembler::movt(Register reg, uint32_t immediate, Condition cond) {
1136 emit(cond | 0x34*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate)); 1139 emit(cond | 0x34*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate));
1137 } 1140 }
1138 1141
1139 1142
1140 void Assembler::bic(Register dst, Register src1, const Operand& src2, 1143 void Assembler::bic(Register dst, Register src1, const Operand& src2,
1141 SBit s, Condition cond) { 1144 SBit s, Condition cond) {
1142 addrmod1(cond | 14*B21 | s, src1, dst, src2); 1145 addrmod1(cond | BIC | s, src1, dst, src2);
1143 } 1146 }
1144 1147
1145 1148
1146 void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) { 1149 void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
1147 addrmod1(cond | 15*B21 | s, r0, dst, src); 1150 addrmod1(cond | MVN | s, r0, dst, src);
1148 } 1151 }
1149 1152
1150 1153
1151 // Multiply instructions. 1154 // Multiply instructions.
1152 void Assembler::mla(Register dst, Register src1, Register src2, Register srcA, 1155 void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
1153 SBit s, Condition cond) { 1156 SBit s, Condition cond) {
1154 ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc)); 1157 ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
1155 emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 | 1158 emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
1156 src2.code()*B8 | B7 | B4 | src1.code()); 1159 src2.code()*B8 | B7 | B4 | src1.code());
1157 } 1160 }
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
1215 emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 | 1218 emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
1216 src2.code()*B8 | B7 | B4 | src1.code()); 1219 src2.code()*B8 | B7 | B4 | src1.code());
1217 } 1220 }
1218 1221
1219 1222
1220 // Miscellaneous arithmetic instructions. 1223 // Miscellaneous arithmetic instructions.
1221 void Assembler::clz(Register dst, Register src, Condition cond) { 1224 void Assembler::clz(Register dst, Register src, Condition cond) {
1222 // v5 and above. 1225 // v5 and above.
1223 ASSERT(!dst.is(pc) && !src.is(pc)); 1226 ASSERT(!dst.is(pc) && !src.is(pc));
1224 emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 | 1227 emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
1225 15*B8 | B4 | src.code()); 1228 15*B8 | CLZ | src.code());
1226 } 1229 }
1227 1230
1228 1231
1229 // Saturating instructions. 1232 // Saturating instructions.
1230 1233
1231 // Unsigned saturate. 1234 // Unsigned saturate.
1232 void Assembler::usat(Register dst, 1235 void Assembler::usat(Register dst,
1233 int satpos, 1236 int satpos,
1234 const Operand& src, 1237 const Operand& src,
1235 Condition cond) { 1238 Condition cond) {
(...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after
1369 // str(ry, MemOperand(sp, 4, NegPreIndex), al) 1372 // str(ry, MemOperand(sp, 4, NegPreIndex), al)
1370 // ldr(rx, MemOperand(sp, 4, PostIndex), al) 1373 // ldr(rx, MemOperand(sp, 4, PostIndex), al)
1371 // Both instructions can be eliminated if ry = rx. 1374 // Both instructions can be eliminated if ry = rx.
1372 // If ry != rx, a register copy from ry to rx is inserted 1375 // If ry != rx, a register copy from ry to rx is inserted
1373 // after eliminating the push and the pop instructions. 1376 // after eliminating the push and the pop instructions.
1374 if (can_peephole_optimize(2)) { 1377 if (can_peephole_optimize(2)) {
1375 Instr push_instr = instr_at(pc_ - 2 * kInstrSize); 1378 Instr push_instr = instr_at(pc_ - 2 * kInstrSize);
1376 Instr pop_instr = instr_at(pc_ - 1 * kInstrSize); 1379 Instr pop_instr = instr_at(pc_ - 1 * kInstrSize);
1377 1380
1378 if (IsPush(push_instr) && IsPop(pop_instr)) { 1381 if (IsPush(push_instr) && IsPop(pop_instr)) {
1379 if ((pop_instr & kRdMask) != (push_instr & kRdMask)) { 1382 if (Instruction::RdValue(pop_instr) != Instruction::RdValue(push_instr)) {
1380 // For consecutive push and pop on different registers, 1383 // For consecutive push and pop on different registers,
1381 // we delete both the push & pop and insert a register move. 1384 // we delete both the push & pop and insert a register move.
1382 // push ry, pop rx --> mov rx, ry 1385 // push ry, pop rx --> mov rx, ry
1383 Register reg_pushed, reg_popped; 1386 Register reg_pushed, reg_popped;
1384 reg_pushed = GetRd(push_instr); 1387 reg_pushed = GetRd(push_instr);
1385 reg_popped = GetRd(pop_instr); 1388 reg_popped = GetRd(pop_instr);
1386 pc_ -= 2 * kInstrSize; 1389 pc_ -= 2 * kInstrSize;
1387 // Insert a mov instruction, which is better than a pair of push & pop 1390 // Insert a mov instruction, which is better than a pair of push & pop
1388 mov(reg_popped, reg_pushed); 1391 mov(reg_popped, reg_pushed);
1389 if (FLAG_print_peephole_optimization) { 1392 if (FLAG_print_peephole_optimization) {
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
1450 } 1453 }
1451 1454
1452 if (can_peephole_optimize(3)) { 1455 if (can_peephole_optimize(3)) {
1453 Instr mem_write_instr = instr_at(pc_ - 3 * kInstrSize); 1456 Instr mem_write_instr = instr_at(pc_ - 3 * kInstrSize);
1454 Instr ldr_instr = instr_at(pc_ - 2 * kInstrSize); 1457 Instr ldr_instr = instr_at(pc_ - 2 * kInstrSize);
1455 Instr mem_read_instr = instr_at(pc_ - 1 * kInstrSize); 1458 Instr mem_read_instr = instr_at(pc_ - 1 * kInstrSize);
1456 if (IsPush(mem_write_instr) && 1459 if (IsPush(mem_write_instr) &&
1457 IsPop(mem_read_instr)) { 1460 IsPop(mem_read_instr)) {
1458 if ((IsLdrRegFpOffset(ldr_instr) || 1461 if ((IsLdrRegFpOffset(ldr_instr) ||
1459 IsLdrRegFpNegOffset(ldr_instr))) { 1462 IsLdrRegFpNegOffset(ldr_instr))) {
1460 if ((mem_write_instr & kRdMask) == 1463 if (Instruction::RdValue(mem_write_instr) ==
1461 (mem_read_instr & kRdMask)) { 1464 Instruction::RdValue(mem_read_instr)) {
1462 // Pattern: push & pop from/to same register, 1465 // Pattern: push & pop from/to same register,
1463 // with a fp+offset ldr in between 1466 // with a fp+offset ldr in between
1464 // 1467 //
1465 // The following: 1468 // The following:
1466 // str rx, [sp, #-4]! 1469 // str rx, [sp, #-4]!
1467 // ldr rz, [fp, #-24] 1470 // ldr rz, [fp, #-24]
1468 // ldr rx, [sp], #+4 1471 // ldr rx, [sp], #+4
1469 // 1472 //
1470 // Becomes: 1473 // Becomes:
1471 // if(rx == rz) 1474 // if(rx == rz)
1472 // delete all 1475 // delete all
1473 // else 1476 // else
1474 // ldr rz, [fp, #-24] 1477 // ldr rz, [fp, #-24]
1475 1478
1476 if ((mem_write_instr & kRdMask) == (ldr_instr & kRdMask)) { 1479 if (Instruction::RdValue(mem_write_instr) ==
1480 Instruction::RdValue(ldr_instr)) {
1477 pc_ -= 3 * kInstrSize; 1481 pc_ -= 3 * kInstrSize;
1478 } else { 1482 } else {
1479 pc_ -= 3 * kInstrSize; 1483 pc_ -= 3 * kInstrSize;
1480 // Reinsert back the ldr rz. 1484 // Reinsert back the ldr rz.
1481 emit(ldr_instr); 1485 emit(ldr_instr);
1482 } 1486 }
1483 if (FLAG_print_peephole_optimization) { 1487 if (FLAG_print_peephole_optimization) {
1484 PrintF("%x push/pop -dead ldr fp+offset in middle\n", pc_offset()); 1488 PrintF("%x push/pop -dead ldr fp+offset in middle\n", pc_offset());
1485 } 1489 }
1486 } else { 1490 } else {
1487 // Pattern: push & pop from/to different registers 1491 // Pattern: push & pop from/to different registers
1488 // with a fp+offset ldr in between 1492 // with a fp+offset ldr in between
1489 // 1493 //
1490 // The following: 1494 // The following:
1491 // str rx, [sp, #-4]! 1495 // str rx, [sp, #-4]!
1492 // ldr rz, [fp, #-24] 1496 // ldr rz, [fp, #-24]
1493 // ldr ry, [sp], #+4 1497 // ldr ry, [sp], #+4
1494 // 1498 //
1495 // Becomes: 1499 // Becomes:
1496 // if(ry == rz) 1500 // if(ry == rz)
1497 // mov ry, rx; 1501 // mov ry, rx;
1498 // else if(rx != rz) 1502 // else if(rx != rz)
1499 // ldr rz, [fp, #-24] 1503 // ldr rz, [fp, #-24]
1500 // mov ry, rx 1504 // mov ry, rx
1501 // else if((ry != rz) || (rx == rz)) becomes: 1505 // else if((ry != rz) || (rx == rz)) becomes:
1502 // mov ry, rx 1506 // mov ry, rx
1503 // ldr rz, [fp, #-24] 1507 // ldr rz, [fp, #-24]
1504 1508
1505 Register reg_pushed, reg_popped; 1509 Register reg_pushed, reg_popped;
1506 if ((mem_read_instr & kRdMask) == (ldr_instr & kRdMask)) { 1510 if (Instruction::RdValue(mem_read_instr) ==
1511 Instruction::RdValue(ldr_instr)) {
1507 reg_pushed = GetRd(mem_write_instr); 1512 reg_pushed = GetRd(mem_write_instr);
1508 reg_popped = GetRd(mem_read_instr); 1513 reg_popped = GetRd(mem_read_instr);
1509 pc_ -= 3 * kInstrSize; 1514 pc_ -= 3 * kInstrSize;
1510 mov(reg_popped, reg_pushed); 1515 mov(reg_popped, reg_pushed);
1511 } else if ((mem_write_instr & kRdMask) 1516 } else if (Instruction::RdValue(mem_write_instr) !=
1512 != (ldr_instr & kRdMask)) { 1517 Instruction::RdValue(ldr_instr)) {
1513 reg_pushed = GetRd(mem_write_instr); 1518 reg_pushed = GetRd(mem_write_instr);
1514 reg_popped = GetRd(mem_read_instr); 1519 reg_popped = GetRd(mem_read_instr);
1515 pc_ -= 3 * kInstrSize; 1520 pc_ -= 3 * kInstrSize;
1516 emit(ldr_instr); 1521 emit(ldr_instr);
1517 mov(reg_popped, reg_pushed); 1522 mov(reg_popped, reg_pushed);
1518 } else if (((mem_read_instr & kRdMask) 1523 } else if ((Instruction::RdValue(mem_read_instr) !=
1519 != (ldr_instr & kRdMask)) || 1524 Instruction::RdValue(ldr_instr)) ||
1520 ((mem_write_instr & kRdMask) 1525 (Instruction::RdValue(mem_write_instr) ==
1521 == (ldr_instr & kRdMask)) ) { 1526 Instruction::RdValue(ldr_instr))) {
1522 reg_pushed = GetRd(mem_write_instr); 1527 reg_pushed = GetRd(mem_write_instr);
1523 reg_popped = GetRd(mem_read_instr); 1528 reg_popped = GetRd(mem_read_instr);
1524 pc_ -= 3 * kInstrSize; 1529 pc_ -= 3 * kInstrSize;
1525 mov(reg_popped, reg_pushed); 1530 mov(reg_popped, reg_pushed);
1526 emit(ldr_instr); 1531 emit(ldr_instr);
1527 } 1532 }
1528 if (FLAG_print_peephole_optimization) { 1533 if (FLAG_print_peephole_optimization) {
1529 PrintF("%x push/pop (ldr fp+off in middle)\n", pc_offset()); 1534 PrintF("%x push/pop (ldr fp+off in middle)\n", pc_offset());
1530 } 1535 }
1531 } 1536 }
(...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after
1633 Condition cond) { 1638 Condition cond) {
1634 addrmod4(cond | B27 | am, base, src); 1639 addrmod4(cond | B27 | am, base, src);
1635 } 1640 }
1636 1641
1637 1642
1638 // Exception-generating instructions and debugging support. 1643 // Exception-generating instructions and debugging support.
1639 // Stops with a non-negative code less than kNumOfWatchedStops support 1644 // Stops with a non-negative code less than kNumOfWatchedStops support
1640 // enabling/disabling and a counter feature. See simulator-arm.h . 1645 // enabling/disabling and a counter feature. See simulator-arm.h .
1641 void Assembler::stop(const char* msg, Condition cond, int32_t code) { 1646 void Assembler::stop(const char* msg, Condition cond, int32_t code) {
1642 #ifndef __arm__ 1647 #ifndef __arm__
1643 // See constants-arm.h SoftwareInterruptCodes. Unluckily the Assembler and
1644 // Simulator do not share constants declaration.
1645 ASSERT(code >= kDefaultStopCode); 1648 ASSERT(code >= kDefaultStopCode);
1646 static const uint32_t kStopInterruptCode = 1 << 23;
1647 static const uint32_t kMaxStopCode = kStopInterruptCode - 1;
1648 // The Simulator will handle the stop instruction and get the message address. 1649 // The Simulator will handle the stop instruction and get the message address.
1649 // It expects to find the address just after the svc instruction. 1650 // It expects to find the address just after the svc instruction.
1650 BlockConstPoolFor(2); 1651 BlockConstPoolFor(2);
1651 if (code >= 0) { 1652 if (code >= 0) {
1652 svc(kStopInterruptCode + code, cond); 1653 svc(kStopCode + code, cond);
1653 } else { 1654 } else {
1654 svc(kStopInterruptCode + kMaxStopCode, cond); 1655 svc(kStopCode + kMaxStopCode, cond);
1655 } 1656 }
1656 emit(reinterpret_cast<Instr>(msg)); 1657 emit(reinterpret_cast<Instr>(msg));
1657 #else // def __arm__ 1658 #else // def __arm__
1658 #ifdef CAN_USE_ARMV5_INSTRUCTIONS 1659 #ifdef CAN_USE_ARMV5_INSTRUCTIONS
1659 ASSERT(cond == al); 1660 if (cond != al) {
1660 bkpt(0); 1661 Label skip;
1662 b(&skip, NegateCondition(cond));
1663 bkpt(0);
1664 bind(&skip);
1665 } else {
1666 bkpt(0);
1667 }
1661 #else // ndef CAN_USE_ARMV5_INSTRUCTIONS 1668 #else // ndef CAN_USE_ARMV5_INSTRUCTIONS
1662 svc(0x9f0001, cond); 1669 svc(0x9f0001, cond);
1663 #endif // ndef CAN_USE_ARMV5_INSTRUCTIONS 1670 #endif // ndef CAN_USE_ARMV5_INSTRUCTIONS
1664 #endif // def __arm__ 1671 #endif // def __arm__
1665 } 1672 }
1666 1673
1667 1674
1668 void Assembler::bkpt(uint32_t imm16) { // v5 and above 1675 void Assembler::bkpt(uint32_t imm16) { // v5 and above
1669 ASSERT(is_uint16(imm16)); 1676 ASSERT(is_uint16(imm16));
1670 emit(al | B24 | B21 | (imm16 >> 4)*B8 | 7*B4 | (imm16 & 0xf)); 1677 emit(al | B24 | B21 | (imm16 >> 4)*B8 | BKPT | (imm16 & 0xf));
1671 } 1678 }
1672 1679
1673 1680
1674 void Assembler::svc(uint32_t imm24, Condition cond) { 1681 void Assembler::svc(uint32_t imm24, Condition cond) {
1675 ASSERT(is_uint24(imm24)); 1682 ASSERT(is_uint24(imm24));
1676 emit(cond | 15*B24 | imm24); 1683 emit(cond | 15*B24 | imm24);
1677 } 1684 }
1678 1685
1679 1686
1680 // Coprocessor instructions. 1687 // Coprocessor instructions.
1681 void Assembler::cdp(Coprocessor coproc, 1688 void Assembler::cdp(Coprocessor coproc,
1682 int opcode_1, 1689 int opcode_1,
1683 CRegister crd, 1690 CRegister crd,
1684 CRegister crn, 1691 CRegister crn,
1685 CRegister crm, 1692 CRegister crm,
1686 int opcode_2, 1693 int opcode_2,
1687 Condition cond) { 1694 Condition cond) {
1688 ASSERT(is_uint4(opcode_1) && is_uint3(opcode_2)); 1695 ASSERT(is_uint4(opcode_1) && is_uint3(opcode_2));
1689 emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 | 1696 emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 |
1690 crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code()); 1697 crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code());
1691 } 1698 }
1692 1699
1693 1700
1694 void Assembler::cdp2(Coprocessor coproc, 1701 void Assembler::cdp2(Coprocessor coproc,
1695 int opcode_1, 1702 int opcode_1,
1696 CRegister crd, 1703 CRegister crd,
1697 CRegister crn, 1704 CRegister crn,
1698 CRegister crm, 1705 CRegister crm,
1699 int opcode_2) { // v5 and above 1706 int opcode_2) { // v5 and above
1700 cdp(coproc, opcode_1, crd, crn, crm, opcode_2, static_cast<Condition>(nv)); 1707 cdp(coproc, opcode_1, crd, crn, crm, opcode_2, kSpecialCondition);
1701 } 1708 }
1702 1709
1703 1710
1704 void Assembler::mcr(Coprocessor coproc, 1711 void Assembler::mcr(Coprocessor coproc,
1705 int opcode_1, 1712 int opcode_1,
1706 Register rd, 1713 Register rd,
1707 CRegister crn, 1714 CRegister crn,
1708 CRegister crm, 1715 CRegister crm,
1709 int opcode_2, 1716 int opcode_2,
1710 Condition cond) { 1717 Condition cond) {
1711 ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2)); 1718 ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
1712 emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 | 1719 emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 |
1713 rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code()); 1720 rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
1714 } 1721 }
1715 1722
1716 1723
1717 void Assembler::mcr2(Coprocessor coproc, 1724 void Assembler::mcr2(Coprocessor coproc,
1718 int opcode_1, 1725 int opcode_1,
1719 Register rd, 1726 Register rd,
1720 CRegister crn, 1727 CRegister crn,
1721 CRegister crm, 1728 CRegister crm,
1722 int opcode_2) { // v5 and above 1729 int opcode_2) { // v5 and above
1723 mcr(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv)); 1730 mcr(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
1724 } 1731 }
1725 1732
1726 1733
1727 void Assembler::mrc(Coprocessor coproc, 1734 void Assembler::mrc(Coprocessor coproc,
1728 int opcode_1, 1735 int opcode_1,
1729 Register rd, 1736 Register rd,
1730 CRegister crn, 1737 CRegister crn,
1731 CRegister crm, 1738 CRegister crm,
1732 int opcode_2, 1739 int opcode_2,
1733 Condition cond) { 1740 Condition cond) {
1734 ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2)); 1741 ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
1735 emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 | 1742 emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 |
1736 rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code()); 1743 rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
1737 } 1744 }
1738 1745
1739 1746
1740 void Assembler::mrc2(Coprocessor coproc, 1747 void Assembler::mrc2(Coprocessor coproc,
1741 int opcode_1, 1748 int opcode_1,
1742 Register rd, 1749 Register rd,
1743 CRegister crn, 1750 CRegister crn,
1744 CRegister crm, 1751 CRegister crm,
1745 int opcode_2) { // v5 and above 1752 int opcode_2) { // v5 and above
1746 mrc(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv)); 1753 mrc(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
1747 } 1754 }
1748 1755
1749 1756
1750 void Assembler::ldc(Coprocessor coproc, 1757 void Assembler::ldc(Coprocessor coproc,
1751 CRegister crd, 1758 CRegister crd,
1752 const MemOperand& src, 1759 const MemOperand& src,
1753 LFlag l, 1760 LFlag l,
1754 Condition cond) { 1761 Condition cond) {
1755 addrmod5(cond | B27 | B26 | l | L | coproc*B8, crd, src); 1762 addrmod5(cond | B27 | B26 | l | L | coproc*B8, crd, src);
1756 } 1763 }
1757 1764
1758 1765
1759 void Assembler::ldc(Coprocessor coproc, 1766 void Assembler::ldc(Coprocessor coproc,
1760 CRegister crd, 1767 CRegister crd,
1761 Register rn, 1768 Register rn,
1762 int option, 1769 int option,
1763 LFlag l, 1770 LFlag l,
1764 Condition cond) { 1771 Condition cond) {
1765 // Unindexed addressing. 1772 // Unindexed addressing.
1766 ASSERT(is_uint8(option)); 1773 ASSERT(is_uint8(option));
1767 emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 | 1774 emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
1768 coproc*B8 | (option & 255)); 1775 coproc*B8 | (option & 255));
1769 } 1776 }
1770 1777
1771 1778
1772 void Assembler::ldc2(Coprocessor coproc, 1779 void Assembler::ldc2(Coprocessor coproc,
1773 CRegister crd, 1780 CRegister crd,
1774 const MemOperand& src, 1781 const MemOperand& src,
1775 LFlag l) { // v5 and above 1782 LFlag l) { // v5 and above
1776 ldc(coproc, crd, src, l, static_cast<Condition>(nv)); 1783 ldc(coproc, crd, src, l, kSpecialCondition);
1777 } 1784 }
1778 1785
1779 1786
1780 void Assembler::ldc2(Coprocessor coproc, 1787 void Assembler::ldc2(Coprocessor coproc,
1781 CRegister crd, 1788 CRegister crd,
1782 Register rn, 1789 Register rn,
1783 int option, 1790 int option,
1784 LFlag l) { // v5 and above 1791 LFlag l) { // v5 and above
1785 ldc(coproc, crd, rn, option, l, static_cast<Condition>(nv)); 1792 ldc(coproc, crd, rn, option, l, kSpecialCondition);
1786 } 1793 }
1787 1794
1788 1795
1789 void Assembler::stc(Coprocessor coproc, 1796 void Assembler::stc(Coprocessor coproc,
1790 CRegister crd, 1797 CRegister crd,
1791 const MemOperand& dst, 1798 const MemOperand& dst,
1792 LFlag l, 1799 LFlag l,
1793 Condition cond) { 1800 Condition cond) {
1794 addrmod5(cond | B27 | B26 | l | coproc*B8, crd, dst); 1801 addrmod5(cond | B27 | B26 | l | coproc*B8, crd, dst);
1795 } 1802 }
1796 1803
1797 1804
1798 void Assembler::stc(Coprocessor coproc, 1805 void Assembler::stc(Coprocessor coproc,
1799 CRegister crd, 1806 CRegister crd,
1800 Register rn, 1807 Register rn,
1801 int option, 1808 int option,
1802 LFlag l, 1809 LFlag l,
1803 Condition cond) { 1810 Condition cond) {
1804 // Unindexed addressing. 1811 // Unindexed addressing.
1805 ASSERT(is_uint8(option)); 1812 ASSERT(is_uint8(option));
1806 emit(cond | B27 | B26 | U | l | rn.code()*B16 | crd.code()*B12 | 1813 emit(cond | B27 | B26 | U | l | rn.code()*B16 | crd.code()*B12 |
1807 coproc*B8 | (option & 255)); 1814 coproc*B8 | (option & 255));
1808 } 1815 }
1809 1816
1810 1817
1811 void Assembler::stc2(Coprocessor 1818 void Assembler::stc2(Coprocessor
1812 coproc, CRegister crd, 1819 coproc, CRegister crd,
1813 const MemOperand& dst, 1820 const MemOperand& dst,
1814 LFlag l) { // v5 and above 1821 LFlag l) { // v5 and above
1815 stc(coproc, crd, dst, l, static_cast<Condition>(nv)); 1822 stc(coproc, crd, dst, l, kSpecialCondition);
1816 } 1823 }
1817 1824
1818 1825
1819 void Assembler::stc2(Coprocessor coproc, 1826 void Assembler::stc2(Coprocessor coproc,
1820 CRegister crd, 1827 CRegister crd,
1821 Register rn, 1828 Register rn,
1822 int option, 1829 int option,
1823 LFlag l) { // v5 and above 1830 LFlag l) { // v5 and above
1824 stc(coproc, crd, rn, option, l, static_cast<Condition>(nv)); 1831 stc(coproc, crd, rn, option, l, kSpecialCondition);
1825 } 1832 }
1826 1833
1827 1834
1828 // Support for VFP. 1835 // Support for VFP.
1829 1836
1830 void Assembler::vldr(const DwVfpRegister dst, 1837 void Assembler::vldr(const DwVfpRegister dst,
1831 const Register base, 1838 const Register base,
1832 int offset, 1839 int offset,
1833 const Condition cond) { 1840 const Condition cond) {
1834 // Ddst = MEM(Rbase + offset). 1841 // Ddst = MEM(Rbase + offset).
(...skipping 329 matching lines...) Expand 10 before | Expand all | Expand 10 after
2164 *vm = reg_code & 0x0F; 2171 *vm = reg_code & 0x0F;
2165 } 2172 }
2166 } 2173 }
2167 2174
2168 2175
2169 // Encode vcvt.src_type.dst_type instruction. 2176 // Encode vcvt.src_type.dst_type instruction.
2170 static Instr EncodeVCVT(const VFPType dst_type, 2177 static Instr EncodeVCVT(const VFPType dst_type,
2171 const int dst_code, 2178 const int dst_code,
2172 const VFPType src_type, 2179 const VFPType src_type,
2173 const int src_code, 2180 const int src_code,
2174 Assembler::ConversionMode mode, 2181 VFPConversionMode mode,
2175 const Condition cond) { 2182 const Condition cond) {
2176 ASSERT(src_type != dst_type); 2183 ASSERT(src_type != dst_type);
2177 int D, Vd, M, Vm; 2184 int D, Vd, M, Vm;
2178 SplitRegCode(src_type, src_code, &Vm, &M); 2185 SplitRegCode(src_type, src_code, &Vm, &M);
2179 SplitRegCode(dst_type, dst_code, &Vd, &D); 2186 SplitRegCode(dst_type, dst_code, &Vd, &D);
2180 2187
2181 if (IsIntegerVFPType(dst_type) || IsIntegerVFPType(src_type)) { 2188 if (IsIntegerVFPType(dst_type) || IsIntegerVFPType(src_type)) {
2182 // Conversion between IEEE floating point and 32-bit integer. 2189 // Conversion between IEEE floating point and 32-bit integer.
2183 // Instruction details available in ARM DDI 0406B, A8.6.295. 2190 // Instruction details available in ARM DDI 0406B, A8.6.295.
2184 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 1(19) | opc2(18-16) | 2191 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 1(19) | opc2(18-16) |
(...skipping 22 matching lines...) Expand all
2207 // Vd(15-12) | 101(11-9) | sz(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0) 2214 // Vd(15-12) | 101(11-9) | sz(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2208 int sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0; 2215 int sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
2209 return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | 0x7*B16 | 2216 return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | 0x7*B16 |
2210 Vd*B12 | 0x5*B9 | sz*B8 | B7 | B6 | M*B5 | Vm); 2217 Vd*B12 | 0x5*B9 | sz*B8 | B7 | B6 | M*B5 | Vm);
2211 } 2218 }
2212 } 2219 }
2213 2220
2214 2221
2215 void Assembler::vcvt_f64_s32(const DwVfpRegister dst, 2222 void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
2216 const SwVfpRegister src, 2223 const SwVfpRegister src,
2217 ConversionMode mode, 2224 VFPConversionMode mode,
2218 const Condition cond) { 2225 const Condition cond) {
2219 ASSERT(CpuFeatures::IsEnabled(VFP3)); 2226 ASSERT(CpuFeatures::IsEnabled(VFP3));
2220 emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond)); 2227 emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond));
2221 } 2228 }
2222 2229
2223 2230
2224 void Assembler::vcvt_f32_s32(const SwVfpRegister dst, 2231 void Assembler::vcvt_f32_s32(const SwVfpRegister dst,
2225 const SwVfpRegister src, 2232 const SwVfpRegister src,
2226 ConversionMode mode, 2233 VFPConversionMode mode,
2227 const Condition cond) { 2234 const Condition cond) {
2228 ASSERT(CpuFeatures::IsEnabled(VFP3)); 2235 ASSERT(CpuFeatures::IsEnabled(VFP3));
2229 emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond)); 2236 emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond));
2230 } 2237 }
2231 2238
2232 2239
2233 void Assembler::vcvt_f64_u32(const DwVfpRegister dst, 2240 void Assembler::vcvt_f64_u32(const DwVfpRegister dst,
2234 const SwVfpRegister src, 2241 const SwVfpRegister src,
2235 ConversionMode mode, 2242 VFPConversionMode mode,
2236 const Condition cond) { 2243 const Condition cond) {
2237 ASSERT(CpuFeatures::IsEnabled(VFP3)); 2244 ASSERT(CpuFeatures::IsEnabled(VFP3));
2238 emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond)); 2245 emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond));
2239 } 2246 }
2240 2247
2241 2248
2242 void Assembler::vcvt_s32_f64(const SwVfpRegister dst, 2249 void Assembler::vcvt_s32_f64(const SwVfpRegister dst,
2243 const DwVfpRegister src, 2250 const DwVfpRegister src,
2244 ConversionMode mode, 2251 VFPConversionMode mode,
2245 const Condition cond) { 2252 const Condition cond) {
2246 ASSERT(CpuFeatures::IsEnabled(VFP3)); 2253 ASSERT(CpuFeatures::IsEnabled(VFP3));
2247 emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond)); 2254 emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond));
2248 } 2255 }
2249 2256
2250 2257
2251 void Assembler::vcvt_u32_f64(const SwVfpRegister dst, 2258 void Assembler::vcvt_u32_f64(const SwVfpRegister dst,
2252 const DwVfpRegister src, 2259 const DwVfpRegister src,
2253 ConversionMode mode, 2260 VFPConversionMode mode,
2254 const Condition cond) { 2261 const Condition cond) {
2255 ASSERT(CpuFeatures::IsEnabled(VFP3)); 2262 ASSERT(CpuFeatures::IsEnabled(VFP3));
2256 emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond)); 2263 emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond));
2257 } 2264 }
2258 2265
2259 2266
2260 void Assembler::vcvt_f64_f32(const DwVfpRegister dst, 2267 void Assembler::vcvt_f64_f32(const DwVfpRegister dst,
2261 const SwVfpRegister src, 2268 const SwVfpRegister src,
2262 ConversionMode mode, 2269 VFPConversionMode mode,
2263 const Condition cond) { 2270 const Condition cond) {
2264 ASSERT(CpuFeatures::IsEnabled(VFP3)); 2271 ASSERT(CpuFeatures::IsEnabled(VFP3));
2265 emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond)); 2272 emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond));
2266 } 2273 }
2267 2274
2268 2275
2269 void Assembler::vcvt_f32_f64(const SwVfpRegister dst, 2276 void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
2270 const DwVfpRegister src, 2277 const DwVfpRegister src,
2271 ConversionMode mode, 2278 VFPConversionMode mode,
2272 const Condition cond) { 2279 const Condition cond) {
2273 ASSERT(CpuFeatures::IsEnabled(VFP3)); 2280 ASSERT(CpuFeatures::IsEnabled(VFP3));
2274 emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond)); 2281 emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond));
2275 } 2282 }
2276 2283
2277 2284
2285 void Assembler::vabs(const DwVfpRegister dst,
2286 const DwVfpRegister src,
2287 const Condition cond) {
2288 emit(cond | 0xE*B24 | 0xB*B20 | dst.code()*B12 |
2289 0x5*B9 | B8 | 0x3*B6 | src.code());
2290 }
2291
2292
2278 void Assembler::vadd(const DwVfpRegister dst, 2293 void Assembler::vadd(const DwVfpRegister dst,
2279 const DwVfpRegister src1, 2294 const DwVfpRegister src1,
2280 const DwVfpRegister src2, 2295 const DwVfpRegister src2,
2281 const Condition cond) { 2296 const Condition cond) {
2282 // Dd = vadd(Dn, Dm) double precision floating point addition. 2297 // Dd = vadd(Dn, Dm) double precision floating point addition.
2283 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm. 2298 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2284 // Instruction details available in ARM DDI 0406A, A8-536. 2299 // Instruction details available in ARM DDI 0406A, A8-536.
2285 // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) | 2300 // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
2286 // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0) 2301 // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
2287 ASSERT(CpuFeatures::IsEnabled(VFP3)); 2302 ASSERT(CpuFeatures::IsEnabled(VFP3));
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
2330 // cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) | 2345 // cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) |
2331 // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=? | 0(6) | M=?(5) | 0(4) | Vm(3-0) 2346 // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=? | 0(6) | M=?(5) | 0(4) | Vm(3-0)
2332 ASSERT(CpuFeatures::IsEnabled(VFP3)); 2347 ASSERT(CpuFeatures::IsEnabled(VFP3));
2333 emit(cond | 0xE*B24 | B23 | src1.code()*B16 | 2348 emit(cond | 0xE*B24 | B23 | src1.code()*B16 |
2334 dst.code()*B12 | 0x5*B9 | B8 | src2.code()); 2349 dst.code()*B12 | 0x5*B9 | B8 | src2.code());
2335 } 2350 }
2336 2351
2337 2352
2338 void Assembler::vcmp(const DwVfpRegister src1, 2353 void Assembler::vcmp(const DwVfpRegister src1,
2339 const DwVfpRegister src2, 2354 const DwVfpRegister src2,
2340 const SBit s,
2341 const Condition cond) { 2355 const Condition cond) {
2342 // vcmp(Dd, Dm) double precision floating point comparison. 2356 // vcmp(Dd, Dm) double precision floating point comparison.
2343 // Instruction details available in ARM DDI 0406A, A8-570. 2357 // Instruction details available in ARM DDI 0406A, A8-570.
2344 // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0100 (19-16) | 2358 // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0100 (19-16) |
2345 // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=? | 1(6) | M(5)=? | 0(4) | Vm(3-0) 2359 // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | Vm(3-0)
2346 ASSERT(CpuFeatures::IsEnabled(VFP3)); 2360 ASSERT(CpuFeatures::IsEnabled(VFP3));
2347 emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 | 2361 emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 |
2348 src1.code()*B12 | 0x5*B9 | B8 | B6 | src2.code()); 2362 src1.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
2349 } 2363 }
2350 2364
2351 2365
2352 void Assembler::vcmp(const DwVfpRegister src1, 2366 void Assembler::vcmp(const DwVfpRegister src1,
2353 const double src2, 2367 const double src2,
2354 const SBit s,
2355 const Condition cond) { 2368 const Condition cond) {
2356 // vcmp(Dd, Dm) double precision floating point comparison. 2369 // vcmp(Dd, Dm) double precision floating point comparison.
2357 // Instruction details available in ARM DDI 0406A, A8-570. 2370 // Instruction details available in ARM DDI 0406A, A8-570.
2358 // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0101 (19-16) | 2371 // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0101 (19-16) |
2359 // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=? | 1(6) | M(5)=? | 0(4) | 0000(3-0) 2372 // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | 0000(3-0)
2360 ASSERT(CpuFeatures::IsEnabled(VFP3)); 2373 ASSERT(CpuFeatures::IsEnabled(VFP3));
2361 ASSERT(src2 == 0.0); 2374 ASSERT(src2 == 0.0);
2362 emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 | B16 | 2375 emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 | B16 |
2363 src1.code()*B12 | 0x5*B9 | B8 | B6); 2376 src1.code()*B12 | 0x5*B9 | B8 | B6);
2364 } 2377 }
2365 2378
2366 2379
2367 void Assembler::vmsr(Register dst, Condition cond) { 2380 void Assembler::vmsr(Register dst, Condition cond) {
2368 // Instruction details available in ARM DDI 0406A, A8-652. 2381 // Instruction details available in ARM DDI 0406A, A8-652.
2369 // cond(31-28) | 1110 (27-24) | 1110(23-20)| 0001 (19-16) | 2382 // cond(31-28) | 1110 (27-24) | 1110(23-20)| 0001 (19-16) |
(...skipping 27 matching lines...) Expand all
2397 2410
2398 // Pseudo instructions. 2411 // Pseudo instructions.
2399 void Assembler::nop(int type) { 2412 void Assembler::nop(int type) {
2400 // This is mov rx, rx. 2413 // This is mov rx, rx.
2401 ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop. 2414 ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop.
2402 emit(al | 13*B21 | type*B12 | type); 2415 emit(al | 13*B21 | type*B12 | type);
2403 } 2416 }
2404 2417
2405 2418
2406 bool Assembler::IsNop(Instr instr, int type) { 2419 bool Assembler::IsNop(Instr instr, int type) {
2407 // Check for mov rx, rx. 2420 // Check for mov rx, rx where x = type.
2408 ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop. 2421 ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop.
2409 return instr == (al | 13*B21 | type*B12 | type); 2422 return instr == (al | 13*B21 | type*B12 | type);
2410 } 2423 }
2411 2424
2412 2425
2413 bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) { 2426 bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
2414 uint32_t dummy1; 2427 uint32_t dummy1;
2415 uint32_t dummy2; 2428 uint32_t dummy2;
2416 return fits_shifter(imm32, &dummy1, &dummy2, NULL); 2429 return fits_shifter(imm32, &dummy1, &dummy2, NULL);
2417 } 2430 }
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after
2490 ASSERT(rinfo.rmode() != RelocInfo::COMMENT && 2503 ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
2491 rinfo.rmode() != RelocInfo::POSITION); 2504 rinfo.rmode() != RelocInfo::POSITION);
2492 if (rinfo.rmode() != RelocInfo::JS_RETURN) { 2505 if (rinfo.rmode() != RelocInfo::JS_RETURN) {
2493 rinfo.set_pc(rinfo.pc() + pc_delta); 2506 rinfo.set_pc(rinfo.pc() + pc_delta);
2494 } 2507 }
2495 } 2508 }
2496 } 2509 }
2497 2510
2498 2511
2499 void Assembler::db(uint8_t data) { 2512 void Assembler::db(uint8_t data) {
2513 // No relocation info should be pending while using db. db is used
2514 // to write pure data with no pointers and the constant pool should
2515 // be emitted before using db.
2516 ASSERT(num_prinfo_ == 0);
2500 CheckBuffer(); 2517 CheckBuffer();
2501 *reinterpret_cast<uint8_t*>(pc_) = data; 2518 *reinterpret_cast<uint8_t*>(pc_) = data;
2502 pc_ += sizeof(uint8_t); 2519 pc_ += sizeof(uint8_t);
2503 } 2520 }
2504 2521
2505 2522
2506 void Assembler::dd(uint32_t data) { 2523 void Assembler::dd(uint32_t data) {
2524 // No relocation info should be pending while using dd. dd is used
2525 // to write pure data with no pointers and the constant pool should
2526 // be emitted before using dd.
2527 ASSERT(num_prinfo_ == 0);
2507 CheckBuffer(); 2528 CheckBuffer();
2508 *reinterpret_cast<uint32_t*>(pc_) = data; 2529 *reinterpret_cast<uint32_t*>(pc_) = data;
2509 pc_ += sizeof(uint32_t); 2530 pc_ += sizeof(uint32_t);
2510 } 2531 }
2511 2532
2512 2533
2513 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { 2534 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
2514 RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants 2535 RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants
2515 if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) { 2536 if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
2516 // Adjust code for new modes. 2537 // Adjust code for new modes.
(...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after
2618 // Emit constant pool entries. 2639 // Emit constant pool entries.
2619 for (int i = 0; i < num_prinfo_; i++) { 2640 for (int i = 0; i < num_prinfo_; i++) {
2620 RelocInfo& rinfo = prinfo_[i]; 2641 RelocInfo& rinfo = prinfo_[i];
2621 ASSERT(rinfo.rmode() != RelocInfo::COMMENT && 2642 ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
2622 rinfo.rmode() != RelocInfo::POSITION && 2643 rinfo.rmode() != RelocInfo::POSITION &&
2623 rinfo.rmode() != RelocInfo::STATEMENT_POSITION); 2644 rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
2624 Instr instr = instr_at(rinfo.pc()); 2645 Instr instr = instr_at(rinfo.pc());
2625 2646
2626 // Instruction to patch must be a ldr/str [pc, #offset]. 2647 // Instruction to patch must be a ldr/str [pc, #offset].
2627 // P and U set, B and W clear, Rn == pc, offset12 still 0. 2648 // P and U set, B and W clear, Rn == pc, offset12 still 0.
2628 ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | Off12Mask)) == 2649 ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | kOff12Mask)) ==
2629 (2*B25 | P | U | pc.code()*B16)); 2650 (2*B25 | P | U | pc.code()*B16));
2630 int delta = pc_ - rinfo.pc() - 8; 2651 int delta = pc_ - rinfo.pc() - 8;
2631 ASSERT(delta >= -4); // instr could be ldr pc, [pc, #-4] followed by targ32 2652 ASSERT(delta >= -4); // instr could be ldr pc, [pc, #-4] followed by targ32
2632 if (delta < 0) { 2653 if (delta < 0) {
2633 instr &= ~U; 2654 instr &= ~U;
2634 delta = -delta; 2655 delta = -delta;
2635 } 2656 }
2636 ASSERT(is_uint12(delta)); 2657 ASSERT(is_uint12(delta));
2637 instr_at_put(rinfo.pc(), instr + delta); 2658 instr_at_put(rinfo.pc(), instr + delta);
2638 emit(rinfo.data()); 2659 emit(rinfo.data());
2639 } 2660 }
2640 num_prinfo_ = 0; 2661 num_prinfo_ = 0;
2641 last_const_pool_end_ = pc_offset(); 2662 last_const_pool_end_ = pc_offset();
2642 2663
2643 RecordComment("]"); 2664 RecordComment("]");
2644 2665
2645 if (after_pool.is_linked()) { 2666 if (after_pool.is_linked()) {
2646 bind(&after_pool); 2667 bind(&after_pool);
2647 } 2668 }
2648 2669
2649 // Since a constant pool was just emitted, move the check offset forward by 2670 // Since a constant pool was just emitted, move the check offset forward by
2650 // the standard interval. 2671 // the standard interval.
2651 next_buffer_check_ = pc_offset() + kCheckConstInterval; 2672 next_buffer_check_ = pc_offset() + kCheckConstInterval;
2652 } 2673 }
2653 2674
2654 2675
2655 } } // namespace v8::internal 2676 } } // namespace v8::internal
2656 2677
2657 #endif // V8_TARGET_ARCH_ARM 2678 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « src/arm/assembler-arm.h ('k') | src/arm/assembler-arm-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698