Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. | 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. |
| 2 // All Rights Reserved. | 2 // All Rights Reserved. |
| 3 // | 3 // |
| 4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
| 5 // modification, are permitted provided that the following conditions | 5 // modification, are permitted provided that the following conditions |
| 6 // are met: | 6 // are met: |
| 7 // | 7 // |
| 8 // - Redistributions of source code must retain the above copyright notice, | 8 // - Redistributions of source code must retain the above copyright notice, |
| 9 // this list of conditions and the following disclaimer. | 9 // this list of conditions and the following disclaimer. |
| 10 // | 10 // |
| (...skipping 195 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 206 ASSERT(is_uint5(shift_imm)); | 206 ASSERT(is_uint5(shift_imm)); |
| 207 rn_ = rn; | 207 rn_ = rn; |
| 208 rm_ = rm; | 208 rm_ = rm; |
| 209 shift_op_ = shift_op; | 209 shift_op_ = shift_op; |
| 210 shift_imm_ = shift_imm & 31; | 210 shift_imm_ = shift_imm & 31; |
| 211 am_ = am; | 211 am_ = am; |
| 212 } | 212 } |
| 213 | 213 |
| 214 | 214 |
| 215 // ----------------------------------------------------------------------------- | 215 // ----------------------------------------------------------------------------- |
| 216 // Implementation of Assembler. | 216 // Specific instructions, constants, and masks. |
| 217 | |
| 218 // Instruction encoding bits. | |
| 219 enum { | |
| 220 H = 1 << 5, // halfword (or byte) | |
| 221 S6 = 1 << 6, // signed (or unsigned) | |
| 222 L = 1 << 20, // load (or store) | |
| 223 S = 1 << 20, // set condition code (or leave unchanged) | |
| 224 W = 1 << 21, // writeback base register (or leave unchanged) | |
| 225 A = 1 << 21, // accumulate in multiply instruction (or not) | |
| 226 B = 1 << 22, // unsigned byte (or word) | |
| 227 N = 1 << 22, // long (or short) | |
| 228 U = 1 << 23, // positive (or negative) offset/index | |
| 229 P = 1 << 24, // offset/pre-indexed addressing (or post-indexed addressing) | |
| 230 I = 1 << 25, // immediate shifter operand (or not) | |
| 231 | |
| 232 B4 = 1 << 4, | |
| 233 B5 = 1 << 5, | |
| 234 B6 = 1 << 6, | |
| 235 B7 = 1 << 7, | |
| 236 B8 = 1 << 8, | |
| 237 B9 = 1 << 9, | |
| 238 B12 = 1 << 12, | |
| 239 B16 = 1 << 16, | |
| 240 B18 = 1 << 18, | |
| 241 B19 = 1 << 19, | |
| 242 B20 = 1 << 20, | |
| 243 B21 = 1 << 21, | |
| 244 B22 = 1 << 22, | |
| 245 B23 = 1 << 23, | |
| 246 B24 = 1 << 24, | |
| 247 B25 = 1 << 25, | |
| 248 B26 = 1 << 26, | |
| 249 B27 = 1 << 27, | |
| 250 | |
| 251 // Instruction bit masks. | |
| 252 RdMask = 15 << 12, // in str instruction | |
| 253 CondMask = 15 << 28, | |
| 254 CoprocessorMask = 15 << 8, | |
| 255 OpCodeMask = 15 << 21, // in data-processing instructions | |
| 256 Imm24Mask = (1 << 24) - 1, | |
| 257 Off12Mask = (1 << 12) - 1, | |
| 258 // Reserved condition. | |
| 259 nv = 15 << 28 | |
| 260 }; | |
| 261 | |
| 262 | 217 |
| 263 // add(sp, sp, 4) instruction (aka Pop()) | 218 // add(sp, sp, 4) instruction (aka Pop()) |
| 264 static const Instr kPopInstruction = | 219 const Instr kPopInstruction = |
| 265 al | 4 * B21 | 4 | LeaveCC | I | sp.code() * B16 | sp.code() * B12; | 220 al | PostIndex | 4 | LeaveCC | I | sp.code() * B16 | sp.code() * B12; |
| 266 // str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r)) | 221 // str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r)) |
| 267 // register r is not encoded. | 222 // register r is not encoded. |
| 268 static const Instr kPushRegPattern = | 223 const Instr kPushRegPattern = |
| 269 al | B26 | 4 | NegPreIndex | sp.code() * B16; | 224 al | B26 | 4 | NegPreIndex | sp.code() * B16; |
| 270 // ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r)) | 225 // ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r)) |
| 271 // register r is not encoded. | 226 // register r is not encoded. |
| 272 static const Instr kPopRegPattern = | 227 const Instr kPopRegPattern = |
| 273 al | B26 | L | 4 | PostIndex | sp.code() * B16; | 228 al | B26 | L | 4 | PostIndex | sp.code() * B16; |
| 274 // mov lr, pc | 229 // mov lr, pc |
| 275 const Instr kMovLrPc = al | 13*B21 | pc.code() | lr.code() * B12; | 230 const Instr kMovLrPc = al | MOV | pc.code() | lr.code() * B12; |
| 276 // ldr rd, [pc, #offset] | 231 // ldr rd, [pc, #offset] |
| 277 const Instr kLdrPCMask = CondMask | 15 * B24 | 7 * B20 | 15 * B16; | 232 const Instr kLdrPCMask = kCondMask | 15 * B24 | 7 * B20 | 15 * B16; |
| 278 const Instr kLdrPCPattern = al | 5 * B24 | L | pc.code() * B16; | 233 const Instr kLdrPCPattern = al | 5 * B24 | L | pc.code() * B16; |
| 279 // blxcc rm | 234 // blxcc rm |
| 280 const Instr kBlxRegMask = | 235 const Instr kBlxRegMask = |
| 281 15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4; | 236 15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4; |
| 282 const Instr kBlxRegPattern = | 237 const Instr kBlxRegPattern = |
| 283 B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | 3 * B4; | 238 B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | BLX; |
| 284 const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16; | 239 const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16; |
| 285 const Instr kMovMvnPattern = 0xd * B21; | 240 const Instr kMovMvnPattern = 0xd * B21; |
| 286 const Instr kMovMvnFlip = B22; | 241 const Instr kMovMvnFlip = B22; |
| 287 const Instr kMovLeaveCCMask = 0xdff * B16; | 242 const Instr kMovLeaveCCMask = 0xdff * B16; |
| 288 const Instr kMovLeaveCCPattern = 0x1a0 * B16; | 243 const Instr kMovLeaveCCPattern = 0x1a0 * B16; |
| 289 const Instr kMovwMask = 0xff * B20; | 244 const Instr kMovwMask = 0xff * B20; |
| 290 const Instr kMovwPattern = 0x30 * B20; | 245 const Instr kMovwPattern = 0x30 * B20; |
| 291 const Instr kMovwLeaveCCFlip = 0x5 * B21; | 246 const Instr kMovwLeaveCCFlip = 0x5 * B21; |
| 292 const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12; | 247 const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12; |
| 293 const Instr kCmpCmnPattern = 0x15 * B20; | 248 const Instr kCmpCmnPattern = 0x15 * B20; |
| 294 const Instr kCmpCmnFlip = B21; | 249 const Instr kCmpCmnFlip = B21; |
| 295 const Instr kALUMask = 0x6f * B21; | |
| 296 const Instr kAddPattern = 0x4 * B21; | |
| 297 const Instr kSubPattern = 0x2 * B21; | |
| 298 const Instr kBicPattern = 0xe * B21; | |
| 299 const Instr kAndPattern = 0x0 * B21; | |
| 300 const Instr kAddSubFlip = 0x6 * B21; | 250 const Instr kAddSubFlip = 0x6 * B21; |
| 301 const Instr kAndBicFlip = 0xe * B21; | 251 const Instr kAndBicFlip = 0xe * B21; |
| 302 | 252 |
| 303 // A mask for the Rd register for push, pop, ldr, str instructions. | 253 // A mask for the Rd register for push, pop, ldr, str instructions. |
| 304 const Instr kRdMask = 0x0000f000; | 254 const Instr kLdrRegFpOffsetPattern = |
| 305 static const int kRdShift = 12; | |
| 306 static const Instr kLdrRegFpOffsetPattern = | |
| 307 al | B26 | L | Offset | fp.code() * B16; | 255 al | B26 | L | Offset | fp.code() * B16; |
| 308 static const Instr kStrRegFpOffsetPattern = | 256 const Instr kStrRegFpOffsetPattern = |
| 309 al | B26 | Offset | fp.code() * B16; | 257 al | B26 | Offset | fp.code() * B16; |
| 310 static const Instr kLdrRegFpNegOffsetPattern = | 258 const Instr kLdrRegFpNegOffsetPattern = |
| 311 al | B26 | L | NegOffset | fp.code() * B16; | 259 al | B26 | L | NegOffset | fp.code() * B16; |
| 312 static const Instr kStrRegFpNegOffsetPattern = | 260 const Instr kStrRegFpNegOffsetPattern = |
| 313 al | B26 | NegOffset | fp.code() * B16; | 261 al | B26 | NegOffset | fp.code() * B16; |
| 314 static const Instr kLdrStrInstrTypeMask = 0xffff0000; | 262 const Instr kLdrStrInstrTypeMask = 0xffff0000; |
| 315 static const Instr kLdrStrInstrArgumentMask = 0x0000ffff; | 263 const Instr kLdrStrInstrArgumentMask = 0x0000ffff; |
| 316 static const Instr kLdrStrOffsetMask = 0x00000fff; | 264 const Instr kLdrStrOffsetMask = 0x00000fff; |
| 265 | |
| 317 | 266 |
| 318 // Spare buffer. | 267 // Spare buffer. |
| 319 static const int kMinimalBufferSize = 4*KB; | 268 static const int kMinimalBufferSize = 4*KB; |
| 320 static byte* spare_buffer_ = NULL; | 269 static byte* spare_buffer_ = NULL; |
| 321 | 270 |
| 271 | |
| 322 Assembler::Assembler(void* buffer, int buffer_size) | 272 Assembler::Assembler(void* buffer, int buffer_size) |
| 323 : positions_recorder_(this), | 273 : positions_recorder_(this), |
| 324 allow_peephole_optimization_(false) { | 274 allow_peephole_optimization_(false) { |
| 325 // BUG(3245989): disable peephole optimization if crankshaft is enabled. | 275 // BUG(3245989): disable peephole optimization if crankshaft is enabled. |
| 326 allow_peephole_optimization_ = FLAG_peephole_optimization; | 276 allow_peephole_optimization_ = FLAG_peephole_optimization; |
| 327 if (buffer == NULL) { | 277 if (buffer == NULL) { |
| 328 // Do our own buffer management. | 278 // Do our own buffer management. |
| 329 if (buffer_size <= kMinimalBufferSize) { | 279 if (buffer_size <= kMinimalBufferSize) { |
| 330 buffer_size = kMinimalBufferSize; | 280 buffer_size = kMinimalBufferSize; |
| 331 | 281 |
| (...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 404 | 354 |
| 405 bool Assembler::IsBranch(Instr instr) { | 355 bool Assembler::IsBranch(Instr instr) { |
| 406 return (instr & (B27 | B25)) == (B27 | B25); | 356 return (instr & (B27 | B25)) == (B27 | B25); |
| 407 } | 357 } |
| 408 | 358 |
| 409 | 359 |
| 410 int Assembler::GetBranchOffset(Instr instr) { | 360 int Assembler::GetBranchOffset(Instr instr) { |
| 411 ASSERT(IsBranch(instr)); | 361 ASSERT(IsBranch(instr)); |
| 412 // Take the jump offset in the lower 24 bits, sign extend it and multiply it | 362 // Take the jump offset in the lower 24 bits, sign extend it and multiply it |
| 413 // with 4 to get the offset in bytes. | 363 // with 4 to get the offset in bytes. |
| 414 return ((instr & Imm24Mask) << 8) >> 6; | 364 return ((instr & kImm24Mask) << 8) >> 6; |
| 415 } | 365 } |
| 416 | 366 |
| 417 | 367 |
| 418 bool Assembler::IsLdrRegisterImmediate(Instr instr) { | 368 bool Assembler::IsLdrRegisterImmediate(Instr instr) { |
| 419 return (instr & (B27 | B26 | B25 | B22 | B20)) == (B26 | B20); | 369 return (instr & (B27 | B26 | B25 | B22 | B20)) == (B26 | B20); |
| 420 } | 370 } |
| 421 | 371 |
| 422 | 372 |
| 423 int Assembler::GetLdrRegisterImmediateOffset(Instr instr) { | 373 int Assembler::GetLdrRegisterImmediateOffset(Instr instr) { |
| 424 ASSERT(IsLdrRegisterImmediate(instr)); | 374 ASSERT(IsLdrRegisterImmediate(instr)); |
| 425 bool positive = (instr & B23) == B23; | 375 bool positive = (instr & B23) == B23; |
| 426 int offset = instr & Off12Mask; // Zero extended offset. | 376 int offset = instr & kOff12Mask; // Zero extended offset. |
| 427 return positive ? offset : -offset; | 377 return positive ? offset : -offset; |
| 428 } | 378 } |
| 429 | 379 |
| 430 | 380 |
| 431 Instr Assembler::SetLdrRegisterImmediateOffset(Instr instr, int offset) { | 381 Instr Assembler::SetLdrRegisterImmediateOffset(Instr instr, int offset) { |
| 432 ASSERT(IsLdrRegisterImmediate(instr)); | 382 ASSERT(IsLdrRegisterImmediate(instr)); |
| 433 bool positive = offset >= 0; | 383 bool positive = offset >= 0; |
| 434 if (!positive) offset = -offset; | 384 if (!positive) offset = -offset; |
| 435 ASSERT(is_uint12(offset)); | 385 ASSERT(is_uint12(offset)); |
| 436 // Set bit indicating whether the offset should be added. | 386 // Set bit indicating whether the offset should be added. |
| 437 instr = (instr & ~B23) | (positive ? B23 : 0); | 387 instr = (instr & ~B23) | (positive ? B23 : 0); |
| 438 // Set the actual offset. | 388 // Set the actual offset. |
| 439 return (instr & ~Off12Mask) | offset; | 389 return (instr & ~kOff12Mask) | offset; |
| 440 } | 390 } |
| 441 | 391 |
| 442 | 392 |
| 443 bool Assembler::IsStrRegisterImmediate(Instr instr) { | 393 bool Assembler::IsStrRegisterImmediate(Instr instr) { |
| 444 return (instr & (B27 | B26 | B25 | B22 | B20)) == B26; | 394 return (instr & (B27 | B26 | B25 | B22 | B20)) == B26; |
| 445 } | 395 } |
| 446 | 396 |
| 447 | 397 |
| 448 Instr Assembler::SetStrRegisterImmediateOffset(Instr instr, int offset) { | 398 Instr Assembler::SetStrRegisterImmediateOffset(Instr instr, int offset) { |
| 449 ASSERT(IsStrRegisterImmediate(instr)); | 399 ASSERT(IsStrRegisterImmediate(instr)); |
| 450 bool positive = offset >= 0; | 400 bool positive = offset >= 0; |
| 451 if (!positive) offset = -offset; | 401 if (!positive) offset = -offset; |
| 452 ASSERT(is_uint12(offset)); | 402 ASSERT(is_uint12(offset)); |
| 453 // Set bit indicating whether the offset should be added. | 403 // Set bit indicating whether the offset should be added. |
| 454 instr = (instr & ~B23) | (positive ? B23 : 0); | 404 instr = (instr & ~B23) | (positive ? B23 : 0); |
| 455 // Set the actual offset. | 405 // Set the actual offset. |
| 456 return (instr & ~Off12Mask) | offset; | 406 return (instr & ~kOff12Mask) | offset; |
| 457 } | 407 } |
| 458 | 408 |
| 459 | 409 |
| 460 bool Assembler::IsAddRegisterImmediate(Instr instr) { | 410 bool Assembler::IsAddRegisterImmediate(Instr instr) { |
| 461 return (instr & (B27 | B26 | B25 | B24 | B23 | B22 | B21)) == (B25 | B23); | 411 return (instr & (B27 | B26 | B25 | B24 | B23 | B22 | B21)) == (B25 | B23); |
| 462 } | 412 } |
| 463 | 413 |
| 464 | 414 |
| 465 Instr Assembler::SetAddRegisterImmediateOffset(Instr instr, int offset) { | 415 Instr Assembler::SetAddRegisterImmediateOffset(Instr instr, int offset) { |
| 466 ASSERT(IsAddRegisterImmediate(instr)); | 416 ASSERT(IsAddRegisterImmediate(instr)); |
| 467 ASSERT(offset >= 0); | 417 ASSERT(offset >= 0); |
| 468 ASSERT(is_uint12(offset)); | 418 ASSERT(is_uint12(offset)); |
| 469 // Set the offset. | 419 // Set the offset. |
| 470 return (instr & ~Off12Mask) | offset; | 420 return (instr & ~kOff12Mask) | offset; |
| 471 } | 421 } |
| 472 | 422 |
| 473 | 423 |
| 474 Register Assembler::GetRd(Instr instr) { | 424 Register Assembler::GetRd(Instr instr) { |
| 475 Register reg; | 425 Register reg; |
| 476 reg.code_ = ((instr & kRdMask) >> kRdShift); | 426 reg.code_ = Instruction::RdValue(instr); |
| 477 return reg; | 427 return reg; |
| 478 } | 428 } |
| 479 | 429 |
| 480 | 430 |
| 481 bool Assembler::IsPush(Instr instr) { | 431 bool Assembler::IsPush(Instr instr) { |
| 482 return ((instr & ~kRdMask) == kPushRegPattern); | 432 return ((instr & ~kRdMask) == kPushRegPattern); |
| 483 } | 433 } |
| 484 | 434 |
| 485 | 435 |
| 486 bool Assembler::IsPop(Instr instr) { | 436 bool Assembler::IsPop(Instr instr) { |
| (...skipping 17 matching lines...) Expand all Loading... | |
| 504 | 454 |
| 505 | 455 |
| 506 bool Assembler::IsLdrRegFpNegOffset(Instr instr) { | 456 bool Assembler::IsLdrRegFpNegOffset(Instr instr) { |
| 507 return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpNegOffsetPattern); | 457 return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpNegOffsetPattern); |
| 508 } | 458 } |
| 509 | 459 |
| 510 | 460 |
| 511 bool Assembler::IsLdrPcImmediateOffset(Instr instr) { | 461 bool Assembler::IsLdrPcImmediateOffset(Instr instr) { |
| 512 // Check the instruction is indeed a | 462 // Check the instruction is indeed a |
| 513 // ldr<cond> <Rd>, [pc +/- offset_12]. | 463 // ldr<cond> <Rd>, [pc +/- offset_12]. |
| 514 return (instr & 0x0f7f0000) == 0x051f0000; | 464 return (instr & (kLdrPCMask & ~kCondMask)) == 0x051f0000; |
| 515 } | 465 } |
| 516 | 466 |
| 517 | 467 |
| 518 // Labels refer to positions in the (to be) generated code. | 468 // Labels refer to positions in the (to be) generated code. |
| 519 // There are bound, linked, and unused labels. | 469 // There are bound, linked, and unused labels. |
| 520 // | 470 // |
| 521 // Bound labels refer to known positions in the already | 471 // Bound labels refer to known positions in the already |
| 522 // generated code. pos() is the position the label refers to. | 472 // generated code. pos() is the position the label refers to. |
| 523 // | 473 // |
| 524 // Linked labels refer to unknown positions in the code | 474 // Linked labels refer to unknown positions in the code |
| 525 // to be generated; pos() is the position of the last | 475 // to be generated; pos() is the position of the last |
| 526 // instruction using the label. | 476 // instruction using the label. |
| 527 | 477 |
| 528 | 478 |
| 529 // The link chain is terminated by a negative code position (must be aligned) | 479 // The link chain is terminated by a negative code position (must be aligned) |
| 530 const int kEndOfChain = -4; | 480 const int kEndOfChain = -4; |
| 531 | 481 |
| 532 | 482 |
| 533 int Assembler::target_at(int pos) { | 483 int Assembler::target_at(int pos) { |
| 534 Instr instr = instr_at(pos); | 484 Instr instr = instr_at(pos); |
| 535 if ((instr & ~Imm24Mask) == 0) { | 485 if ((instr & ~kImm24Mask) == 0) { |
| 536 // Emitted label constant, not part of a branch. | 486 // Emitted label constant, not part of a branch. |
| 537 return instr - (Code::kHeaderSize - kHeapObjectTag); | 487 return instr - (Code::kHeaderSize - kHeapObjectTag); |
| 538 } | 488 } |
| 539 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24 | 489 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24 |
| 540 int imm26 = ((instr & Imm24Mask) << 8) >> 6; | 490 int imm26 = ((instr & kImm24Mask) << 8) >> 6; |
| 541 if ((instr & CondMask) == nv && (instr & B24) != 0) { | 491 if ((Instruction::ConditionField(instr) == kSpecialCondition) && |
| 492 ((instr & B24) != 0)) { | |
| 542 // blx uses bit 24 to encode bit 2 of imm26 | 493 // blx uses bit 24 to encode bit 2 of imm26 |
| 543 imm26 += 2; | 494 imm26 += 2; |
| 544 } | 495 } |
| 545 return pos + kPcLoadDelta + imm26; | 496 return pos + kPcLoadDelta + imm26; |
| 546 } | 497 } |
| 547 | 498 |
| 548 | 499 |
| 549 void Assembler::target_at_put(int pos, int target_pos) { | 500 void Assembler::target_at_put(int pos, int target_pos) { |
| 550 Instr instr = instr_at(pos); | 501 Instr instr = instr_at(pos); |
| 551 if ((instr & ~Imm24Mask) == 0) { | 502 if ((instr & ~kImm24Mask) == 0) { |
| 552 ASSERT(target_pos == kEndOfChain || target_pos >= 0); | 503 ASSERT(target_pos == kEndOfChain || target_pos >= 0); |
| 553 // Emitted label constant, not part of a branch. | 504 // Emitted label constant, not part of a branch. |
| 554 // Make label relative to Code* of generated Code object. | 505 // Make label relative to Code* of generated Code object. |
| 555 instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag)); | 506 instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag)); |
| 556 return; | 507 return; |
| 557 } | 508 } |
| 558 int imm26 = target_pos - (pos + kPcLoadDelta); | 509 int imm26 = target_pos - (pos + kPcLoadDelta); |
| 559 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24 | 510 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24 |
| 560 if ((instr & CondMask) == nv) { | 511 if (Instruction::ConditionField(instr) == kSpecialCondition) { |
| 561 // blx uses bit 24 to encode bit 2 of imm26 | 512 // blx uses bit 24 to encode bit 2 of imm26 |
| 562 ASSERT((imm26 & 1) == 0); | 513 ASSERT((imm26 & 1) == 0); |
| 563 instr = (instr & ~(B24 | Imm24Mask)) | ((imm26 & 2) >> 1)*B24; | 514 instr = (instr & ~(B24 | kImm24Mask)) | ((imm26 & 2) >> 1)*B24; |
| 564 } else { | 515 } else { |
| 565 ASSERT((imm26 & 3) == 0); | 516 ASSERT((imm26 & 3) == 0); |
| 566 instr &= ~Imm24Mask; | 517 instr &= ~kImm24Mask; |
| 567 } | 518 } |
| 568 int imm24 = imm26 >> 2; | 519 int imm24 = imm26 >> 2; |
| 569 ASSERT(is_int24(imm24)); | 520 ASSERT(is_int24(imm24)); |
| 570 instr_at_put(pos, instr | (imm24 & Imm24Mask)); | 521 instr_at_put(pos, instr | (imm24 & kImm24Mask)); |
| 571 } | 522 } |
| 572 | 523 |
| 573 | 524 |
| 574 void Assembler::print(Label* L) { | 525 void Assembler::print(Label* L) { |
| 575 if (L->is_unused()) { | 526 if (L->is_unused()) { |
| 576 PrintF("unused label\n"); | 527 PrintF("unused label\n"); |
| 577 } else if (L->is_bound()) { | 528 } else if (L->is_bound()) { |
| 578 PrintF("bound label to %d\n", L->pos()); | 529 PrintF("bound label to %d\n", L->pos()); |
| 579 } else if (L->is_linked()) { | 530 } else if (L->is_linked()) { |
| 580 Label l = *L; | 531 Label l = *L; |
| 581 PrintF("unbound label"); | 532 PrintF("unbound label"); |
| 582 while (l.is_linked()) { | 533 while (l.is_linked()) { |
| 583 PrintF("@ %d ", l.pos()); | 534 PrintF("@ %d ", l.pos()); |
| 584 Instr instr = instr_at(l.pos()); | 535 Instr instr = instr_at(l.pos()); |
| 585 if ((instr & ~Imm24Mask) == 0) { | 536 if ((instr & ~kImm24Mask) == 0) { |
| 586 PrintF("value\n"); | 537 PrintF("value\n"); |
| 587 } else { | 538 } else { |
| 588 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx | 539 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx |
| 589 int cond = instr & CondMask; | 540 Condition cond = Instruction::ConditionField(instr); |
| 590 const char* b; | 541 const char* b; |
| 591 const char* c; | 542 const char* c; |
| 592 if (cond == nv) { | 543 if (cond == kSpecialCondition) { |
| 593 b = "blx"; | 544 b = "blx"; |
| 594 c = ""; | 545 c = ""; |
| 595 } else { | 546 } else { |
| 596 if ((instr & B24) != 0) | 547 if ((instr & B24) != 0) |
| 597 b = "bl"; | 548 b = "bl"; |
| 598 else | 549 else |
| 599 b = "b"; | 550 b = "b"; |
| 600 | 551 |
| 601 switch (cond) { | 552 switch (cond) { |
| 602 case eq: c = "eq"; break; | 553 case eq: c = "eq"; break; |
| (...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 724 } | 675 } |
| 725 } | 676 } |
| 726 } | 677 } |
| 727 } else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) { | 678 } else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) { |
| 728 if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) { | 679 if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) { |
| 729 *instr ^= kCmpCmnFlip; | 680 *instr ^= kCmpCmnFlip; |
| 730 return true; | 681 return true; |
| 731 } | 682 } |
| 732 } else { | 683 } else { |
| 733 Instr alu_insn = (*instr & kALUMask); | 684 Instr alu_insn = (*instr & kALUMask); |
| 734 if (alu_insn == kAddPattern || | 685 if (alu_insn == ADD || |
| 735 alu_insn == kSubPattern) { | 686 alu_insn == SUB) { |
| 736 if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) { | 687 if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) { |
| 737 *instr ^= kAddSubFlip; | 688 *instr ^= kAddSubFlip; |
| 738 return true; | 689 return true; |
| 739 } | 690 } |
| 740 } else if (alu_insn == kAndPattern || | 691 } else if (alu_insn == AND || |
| 741 alu_insn == kBicPattern) { | 692 alu_insn == BIC) { |
| 742 if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) { | 693 if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) { |
| 743 *instr ^= kAndBicFlip; | 694 *instr ^= kAndBicFlip; |
| 744 return true; | 695 return true; |
| 745 } | 696 } |
| 746 } | 697 } |
| 747 } | 698 } |
| 748 } | 699 } |
| 749 return false; | 700 return false; |
| 750 } | 701 } |
| 751 | 702 |
| (...skipping 23 matching lines...) Expand all Loading... | |
| 775 uint32_t dummy1, dummy2; | 726 uint32_t dummy1, dummy2; |
| 776 return fits_shifter(imm32_, &dummy1, &dummy2, NULL); | 727 return fits_shifter(imm32_, &dummy1, &dummy2, NULL); |
| 777 } | 728 } |
| 778 | 729 |
| 779 | 730 |
| 780 void Assembler::addrmod1(Instr instr, | 731 void Assembler::addrmod1(Instr instr, |
| 781 Register rn, | 732 Register rn, |
| 782 Register rd, | 733 Register rd, |
| 783 const Operand& x) { | 734 const Operand& x) { |
| 784 CheckBuffer(); | 735 CheckBuffer(); |
| 785 ASSERT((instr & ~(CondMask | OpCodeMask | S)) == 0); | 736 ASSERT((instr & ~(kCondMask | kOpCodeMask | S)) == 0); |
| 786 if (!x.rm_.is_valid()) { | 737 if (!x.rm_.is_valid()) { |
| 787 // Immediate. | 738 // Immediate. |
| 788 uint32_t rotate_imm; | 739 uint32_t rotate_imm; |
| 789 uint32_t immed_8; | 740 uint32_t immed_8; |
| 790 if (x.must_use_constant_pool() || | 741 if (x.must_use_constant_pool() || |
| 791 !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) { | 742 !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) { |
| 792 // The immediate operand cannot be encoded as a shifter operand, so load | 743 // The immediate operand cannot be encoded as a shifter operand, so load |
| 793 // it first to register ip and change the original instruction to use ip. | 744 // it first to register ip and change the original instruction to use ip. |
| 794 // However, if the original instruction is a 'mov rd, x' (not setting the | 745 // However, if the original instruction is a 'mov rd, x' (not setting the |
| 795 // condition code), then replace it with a 'ldr rd, [pc]'. | 746 // condition code), then replace it with a 'ldr rd, [pc]'. |
| 796 CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed | 747 CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed |
| 797 Condition cond = static_cast<Condition>(instr & CondMask); | 748 Condition cond = Instruction::ConditionField(instr); |
| 798 if ((instr & ~CondMask) == 13*B21) { // mov, S not set | 749 if ((instr & ~kCondMask) == 13*B21) { // mov, S not set |
| 799 if (x.must_use_constant_pool() || !CpuFeatures::IsSupported(ARMv7)) { | 750 if (x.must_use_constant_pool() || !CpuFeatures::IsSupported(ARMv7)) { |
| 800 RecordRelocInfo(x.rmode_, x.imm32_); | 751 RecordRelocInfo(x.rmode_, x.imm32_); |
| 801 ldr(rd, MemOperand(pc, 0), cond); | 752 ldr(rd, MemOperand(pc, 0), cond); |
| 802 } else { | 753 } else { |
| 803 // Will probably use movw, will certainly not use constant pool. | 754 // Will probably use movw, will certainly not use constant pool. |
| 804 mov(rd, Operand(x.imm32_ & 0xffff), LeaveCC, cond); | 755 mov(rd, Operand(x.imm32_ & 0xffff), LeaveCC, cond); |
| 805 movt(rd, static_cast<uint32_t>(x.imm32_) >> 16, cond); | 756 movt(rd, static_cast<uint32_t>(x.imm32_) >> 16, cond); |
| 806 } | 757 } |
| 807 } else { | 758 } else { |
| 808 // If this is not a mov or mvn instruction we may still be able to avoid | 759 // If this is not a mov or mvn instruction we may still be able to avoid |
| (...skipping 20 matching lines...) Expand all Loading... | |
| 829 } | 780 } |
| 830 emit(instr | rn.code()*B16 | rd.code()*B12); | 781 emit(instr | rn.code()*B16 | rd.code()*B12); |
| 831 if (rn.is(pc) || x.rm_.is(pc)) { | 782 if (rn.is(pc) || x.rm_.is(pc)) { |
| 832 // Block constant pool emission for one instruction after reading pc. | 783 // Block constant pool emission for one instruction after reading pc. |
| 833 BlockConstPoolBefore(pc_offset() + kInstrSize); | 784 BlockConstPoolBefore(pc_offset() + kInstrSize); |
| 834 } | 785 } |
| 835 } | 786 } |
| 836 | 787 |
| 837 | 788 |
| 838 void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) { | 789 void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) { |
| 839 ASSERT((instr & ~(CondMask | B | L)) == B26); | 790 ASSERT((instr & ~(kCondMask | B | L)) == B26); |
| 840 int am = x.am_; | 791 int am = x.am_; |
| 841 if (!x.rm_.is_valid()) { | 792 if (!x.rm_.is_valid()) { |
| 842 // Immediate offset. | 793 // Immediate offset. |
| 843 int offset_12 = x.offset_; | 794 int offset_12 = x.offset_; |
| 844 if (offset_12 < 0) { | 795 if (offset_12 < 0) { |
| 845 offset_12 = -offset_12; | 796 offset_12 = -offset_12; |
| 846 am ^= U; | 797 am ^= U; |
| 847 } | 798 } |
| 848 if (!is_uint12(offset_12)) { | 799 if (!is_uint12(offset_12)) { |
| 849 // Immediate offset cannot be encoded, load it first to register ip | 800 // Immediate offset cannot be encoded, load it first to register ip |
| 850 // rn (and rd in a load) should never be ip, or will be trashed. | 801 // rn (and rd in a load) should never be ip, or will be trashed. |
| 851 ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip))); | 802 ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip))); |
| 852 mov(ip, Operand(x.offset_), LeaveCC, | 803 mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr)); |
| 853 static_cast<Condition>(instr & CondMask)); | |
| 854 addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_)); | 804 addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_)); |
| 855 return; | 805 return; |
| 856 } | 806 } |
| 857 ASSERT(offset_12 >= 0); // no masking needed | 807 ASSERT(offset_12 >= 0); // no masking needed |
| 858 instr |= offset_12; | 808 instr |= offset_12; |
| 859 } else { | 809 } else { |
| 860 // Register offset (shift_imm_ and shift_op_ are 0) or scaled | 810 // Register offset (shift_imm_ and shift_op_ are 0) or scaled |
| 861 // register offset the constructors make sure than both shift_imm_ | 811 // register offset the constructors make sure than both shift_imm_ |
| 862 // and shift_op_ are initialized. | 812 // and shift_op_ are initialized. |
| 863 ASSERT(!x.rm_.is(pc)); | 813 ASSERT(!x.rm_.is(pc)); |
| 864 instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code(); | 814 instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code(); |
| 865 } | 815 } |
| 866 ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback | 816 ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback |
| 867 emit(instr | am | x.rn_.code()*B16 | rd.code()*B12); | 817 emit(instr | am | x.rn_.code()*B16 | rd.code()*B12); |
| 868 } | 818 } |
| 869 | 819 |
| 870 | 820 |
| 871 void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) { | 821 void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) { |
| 872 ASSERT((instr & ~(CondMask | L | S6 | H)) == (B4 | B7)); | 822 ASSERT((instr & ~(kCondMask | L | S6 | H)) == (B4 | B7)); |
| 873 ASSERT(x.rn_.is_valid()); | 823 ASSERT(x.rn_.is_valid()); |
| 874 int am = x.am_; | 824 int am = x.am_; |
| 875 if (!x.rm_.is_valid()) { | 825 if (!x.rm_.is_valid()) { |
| 876 // Immediate offset. | 826 // Immediate offset. |
| 877 int offset_8 = x.offset_; | 827 int offset_8 = x.offset_; |
| 878 if (offset_8 < 0) { | 828 if (offset_8 < 0) { |
| 879 offset_8 = -offset_8; | 829 offset_8 = -offset_8; |
| 880 am ^= U; | 830 am ^= U; |
| 881 } | 831 } |
| 882 if (!is_uint8(offset_8)) { | 832 if (!is_uint8(offset_8)) { |
| 883 // Immediate offset cannot be encoded, load it first to register ip | 833 // Immediate offset cannot be encoded, load it first to register ip |
| 884 // rn (and rd in a load) should never be ip, or will be trashed. | 834 // rn (and rd in a load) should never be ip, or will be trashed. |
| 885 ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip))); | 835 ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip))); |
| 886 mov(ip, Operand(x.offset_), LeaveCC, | 836 mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr)); |
| 887 static_cast<Condition>(instr & CondMask)); | |
| 888 addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_)); | 837 addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_)); |
| 889 return; | 838 return; |
| 890 } | 839 } |
| 891 ASSERT(offset_8 >= 0); // no masking needed | 840 ASSERT(offset_8 >= 0); // no masking needed |
| 892 instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf); | 841 instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf); |
| 893 } else if (x.shift_imm_ != 0) { | 842 } else if (x.shift_imm_ != 0) { |
| 894 // Scaled register offset not supported, load index first | 843 // Scaled register offset not supported, load index first |
| 895 // rn (and rd in a load) should never be ip, or will be trashed. | 844 // rn (and rd in a load) should never be ip, or will be trashed. |
| 896 ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip))); | 845 ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip))); |
| 897 mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC, | 846 mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC, |
| 898 static_cast<Condition>(instr & CondMask)); | 847 Instruction::ConditionField(instr)); |
| 899 addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_)); | 848 addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_)); |
| 900 return; | 849 return; |
| 901 } else { | 850 } else { |
| 902 // Register offset. | 851 // Register offset. |
| 903 ASSERT((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback | 852 ASSERT((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback |
| 904 instr |= x.rm_.code(); | 853 instr |= x.rm_.code(); |
| 905 } | 854 } |
| 906 ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback | 855 ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback |
| 907 emit(instr | am | x.rn_.code()*B16 | rd.code()*B12); | 856 emit(instr | am | x.rn_.code()*B16 | rd.code()*B12); |
| 908 } | 857 } |
| 909 | 858 |
| 910 | 859 |
| 911 void Assembler::addrmod4(Instr instr, Register rn, RegList rl) { | 860 void Assembler::addrmod4(Instr instr, Register rn, RegList rl) { |
| 912 ASSERT((instr & ~(CondMask | P | U | W | L)) == B27); | 861 ASSERT((instr & ~(kCondMask | P | U | W | L)) == B27); |
| 913 ASSERT(rl != 0); | 862 ASSERT(rl != 0); |
| 914 ASSERT(!rn.is(pc)); | 863 ASSERT(!rn.is(pc)); |
| 915 emit(instr | rn.code()*B16 | rl); | 864 emit(instr | rn.code()*B16 | rl); |
| 916 } | 865 } |
| 917 | 866 |
| 918 | 867 |
| 919 void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) { | 868 void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) { |
| 920 // Unindexed addressing is not encoded by this function. | 869 // Unindexed addressing is not encoded by this function. |
| 921 ASSERT_EQ((B27 | B26), | 870 ASSERT_EQ((B27 | B26), |
| 922 (instr & ~(CondMask | CoprocessorMask | P | U | N | W | L))); | 871 (instr & ~(kCondMask | kCoprocessorMask | P | U | N | W | L))); |
| 923 ASSERT(x.rn_.is_valid() && !x.rm_.is_valid()); | 872 ASSERT(x.rn_.is_valid() && !x.rm_.is_valid()); |
| 924 int am = x.am_; | 873 int am = x.am_; |
| 925 int offset_8 = x.offset_; | 874 int offset_8 = x.offset_; |
| 926 ASSERT((offset_8 & 3) == 0); // offset must be an aligned word offset | 875 ASSERT((offset_8 & 3) == 0); // offset must be an aligned word offset |
| 927 offset_8 >>= 2; | 876 offset_8 >>= 2; |
| 928 if (offset_8 < 0) { | 877 if (offset_8 < 0) { |
| 929 offset_8 = -offset_8; | 878 offset_8 = -offset_8; |
| 930 am ^= U; | 879 am ^= U; |
| 931 } | 880 } |
| 932 ASSERT(is_uint8(offset_8)); // unsigned word offset must fit in a byte | 881 ASSERT(is_uint8(offset_8)); // unsigned word offset must fit in a byte |
| (...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 975 instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag)); | 924 instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag)); |
| 976 } | 925 } |
| 977 } | 926 } |
| 978 | 927 |
| 979 | 928 |
| 980 // Branch instructions. | 929 // Branch instructions. |
| 981 void Assembler::b(int branch_offset, Condition cond) { | 930 void Assembler::b(int branch_offset, Condition cond) { |
| 982 ASSERT((branch_offset & 3) == 0); | 931 ASSERT((branch_offset & 3) == 0); |
| 983 int imm24 = branch_offset >> 2; | 932 int imm24 = branch_offset >> 2; |
| 984 ASSERT(is_int24(imm24)); | 933 ASSERT(is_int24(imm24)); |
| 985 emit(cond | B27 | B25 | (imm24 & Imm24Mask)); | 934 emit(cond | B27 | B25 | (imm24 & kImm24Mask)); |
| 986 | 935 |
| 987 if (cond == al) { | 936 if (cond == al) { |
| 988 // Dead code is a good location to emit the constant pool. | 937 // Dead code is a good location to emit the constant pool. |
| 989 CheckConstPool(false, false); | 938 CheckConstPool(false, false); |
| 990 } | 939 } |
| 991 } | 940 } |
| 992 | 941 |
| 993 | 942 |
| 994 void Assembler::bl(int branch_offset, Condition cond) { | 943 void Assembler::bl(int branch_offset, Condition cond) { |
| 995 positions_recorder()->WriteRecordedPositions(); | 944 positions_recorder()->WriteRecordedPositions(); |
| 996 ASSERT((branch_offset & 3) == 0); | 945 ASSERT((branch_offset & 3) == 0); |
| 997 int imm24 = branch_offset >> 2; | 946 int imm24 = branch_offset >> 2; |
| 998 ASSERT(is_int24(imm24)); | 947 ASSERT(is_int24(imm24)); |
| 999 emit(cond | B27 | B25 | B24 | (imm24 & Imm24Mask)); | 948 emit(cond | B27 | B25 | B24 | (imm24 & kImm24Mask)); |
| 1000 } | 949 } |
| 1001 | 950 |
| 1002 | 951 |
| 1003 void Assembler::blx(int branch_offset) { // v5 and above | 952 void Assembler::blx(int branch_offset) { // v5 and above |
| 1004 positions_recorder()->WriteRecordedPositions(); | 953 positions_recorder()->WriteRecordedPositions(); |
| 1005 ASSERT((branch_offset & 1) == 0); | 954 ASSERT((branch_offset & 1) == 0); |
| 1006 int h = ((branch_offset & 2) >> 1)*B24; | 955 int h = ((branch_offset & 2) >> 1)*B24; |
| 1007 int imm24 = branch_offset >> 2; | 956 int imm24 = branch_offset >> 2; |
| 1008 ASSERT(is_int24(imm24)); | 957 ASSERT(is_int24(imm24)); |
| 1009 emit(nv | B27 | B25 | h | (imm24 & Imm24Mask)); | 958 emit(kSpecialCondition | B27 | B25 | h | (imm24 & kImm24Mask)); |
| 1010 } | 959 } |
| 1011 | 960 |
| 1012 | 961 |
| 1013 void Assembler::blx(Register target, Condition cond) { // v5 and above | 962 void Assembler::blx(Register target, Condition cond) { // v5 and above |
| 1014 positions_recorder()->WriteRecordedPositions(); | 963 positions_recorder()->WriteRecordedPositions(); |
| 1015 ASSERT(!target.is(pc)); | 964 ASSERT(!target.is(pc)); |
| 1016 emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | 3*B4 | target.code()); | 965 emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | target.code()); |
| 1017 } | 966 } |
| 1018 | 967 |
| 1019 | 968 |
| 1020 void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t | 969 void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t |
| 1021 positions_recorder()->WriteRecordedPositions(); | 970 positions_recorder()->WriteRecordedPositions(); |
| 1022 ASSERT(!target.is(pc)); // use of pc is actually allowed, but discouraged | 971 ASSERT(!target.is(pc)); // use of pc is actually allowed, but discouraged |
| 1023 emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | B4 | target.code()); | 972 emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BX | target.code()); |
| 1024 } | 973 } |
| 1025 | 974 |
| 1026 | 975 |
| 1027 // Data-processing instructions. | 976 // Data-processing instructions. |
| 1028 | 977 |
| 1029 void Assembler::and_(Register dst, Register src1, const Operand& src2, | 978 void Assembler::and_(Register dst, Register src1, const Operand& src2, |
| 1030 SBit s, Condition cond) { | 979 SBit s, Condition cond) { |
| 1031 addrmod1(cond | 0*B21 | s, src1, dst, src2); | 980 addrmod1(cond | AND | s, src1, dst, src2); |
| 1032 } | 981 } |
| 1033 | 982 |
| 1034 | 983 |
| 1035 void Assembler::eor(Register dst, Register src1, const Operand& src2, | 984 void Assembler::eor(Register dst, Register src1, const Operand& src2, |
| 1036 SBit s, Condition cond) { | 985 SBit s, Condition cond) { |
| 1037 addrmod1(cond | 1*B21 | s, src1, dst, src2); | 986 addrmod1(cond | EOR | s, src1, dst, src2); |
| 1038 } | 987 } |
| 1039 | 988 |
| 1040 | 989 |
| 1041 void Assembler::sub(Register dst, Register src1, const Operand& src2, | 990 void Assembler::sub(Register dst, Register src1, const Operand& src2, |
| 1042 SBit s, Condition cond) { | 991 SBit s, Condition cond) { |
| 1043 addrmod1(cond | 2*B21 | s, src1, dst, src2); | 992 addrmod1(cond | SUB | s, src1, dst, src2); |
| 1044 } | 993 } |
| 1045 | 994 |
| 1046 | 995 |
| 1047 void Assembler::rsb(Register dst, Register src1, const Operand& src2, | 996 void Assembler::rsb(Register dst, Register src1, const Operand& src2, |
| 1048 SBit s, Condition cond) { | 997 SBit s, Condition cond) { |
| 1049 addrmod1(cond | 3*B21 | s, src1, dst, src2); | 998 addrmod1(cond | RSB | s, src1, dst, src2); |
| 1050 } | 999 } |
| 1051 | 1000 |
| 1052 | 1001 |
| 1053 void Assembler::add(Register dst, Register src1, const Operand& src2, | 1002 void Assembler::add(Register dst, Register src1, const Operand& src2, |
| 1054 SBit s, Condition cond) { | 1003 SBit s, Condition cond) { |
| 1055 addrmod1(cond | 4*B21 | s, src1, dst, src2); | 1004 addrmod1(cond | ADD | s, src1, dst, src2); |
| 1056 | 1005 |
| 1057 // Eliminate pattern: push(r), pop() | 1006 // Eliminate pattern: push(r), pop() |
| 1058 // str(src, MemOperand(sp, 4, NegPreIndex), al); | 1007 // str(src, MemOperand(sp, 4, NegPreIndex), al); |
| 1059 // add(sp, sp, Operand(kPointerSize)); | 1008 // add(sp, sp, Operand(kPointerSize)); |
| 1060 // Both instructions can be eliminated. | 1009 // Both instructions can be eliminated. |
| 1061 if (can_peephole_optimize(2) && | 1010 if (can_peephole_optimize(2) && |
| 1062 // Pattern. | 1011 // Pattern. |
| 1063 instr_at(pc_ - 1 * kInstrSize) == kPopInstruction && | 1012 instr_at(pc_ - 1 * kInstrSize) == kPopInstruction && |
| 1064 (instr_at(pc_ - 2 * kInstrSize) & ~RdMask) == kPushRegPattern) { | 1013 (instr_at(pc_ - 2 * kInstrSize) & ~kRdMask) == kPushRegPattern) { |
| 1065 pc_ -= 2 * kInstrSize; | 1014 pc_ -= 2 * kInstrSize; |
| 1066 if (FLAG_print_peephole_optimization) { | 1015 if (FLAG_print_peephole_optimization) { |
| 1067 PrintF("%x push(reg)/pop() eliminated\n", pc_offset()); | 1016 PrintF("%x push(reg)/pop() eliminated\n", pc_offset()); |
| 1068 } | 1017 } |
| 1069 } | 1018 } |
| 1070 } | 1019 } |
| 1071 | 1020 |
| 1072 | 1021 |
| 1073 void Assembler::adc(Register dst, Register src1, const Operand& src2, | 1022 void Assembler::adc(Register dst, Register src1, const Operand& src2, |
| 1074 SBit s, Condition cond) { | 1023 SBit s, Condition cond) { |
| 1075 addrmod1(cond | 5*B21 | s, src1, dst, src2); | 1024 addrmod1(cond | ADC | s, src1, dst, src2); |
| 1076 } | 1025 } |
| 1077 | 1026 |
| 1078 | 1027 |
| 1079 void Assembler::sbc(Register dst, Register src1, const Operand& src2, | 1028 void Assembler::sbc(Register dst, Register src1, const Operand& src2, |
| 1080 SBit s, Condition cond) { | 1029 SBit s, Condition cond) { |
| 1081 addrmod1(cond | 6*B21 | s, src1, dst, src2); | 1030 addrmod1(cond | SBC | s, src1, dst, src2); |
| 1082 } | 1031 } |
| 1083 | 1032 |
| 1084 | 1033 |
| 1085 void Assembler::rsc(Register dst, Register src1, const Operand& src2, | 1034 void Assembler::rsc(Register dst, Register src1, const Operand& src2, |
| 1086 SBit s, Condition cond) { | 1035 SBit s, Condition cond) { |
| 1087 addrmod1(cond | 7*B21 | s, src1, dst, src2); | 1036 addrmod1(cond | RSC | s, src1, dst, src2); |
| 1088 } | 1037 } |
| 1089 | 1038 |
| 1090 | 1039 |
| 1091 void Assembler::tst(Register src1, const Operand& src2, Condition cond) { | 1040 void Assembler::tst(Register src1, const Operand& src2, Condition cond) { |
| 1092 addrmod1(cond | 8*B21 | S, src1, r0, src2); | 1041 addrmod1(cond | TST | S, src1, r0, src2); |
| 1093 } | 1042 } |
| 1094 | 1043 |
| 1095 | 1044 |
| 1096 void Assembler::teq(Register src1, const Operand& src2, Condition cond) { | 1045 void Assembler::teq(Register src1, const Operand& src2, Condition cond) { |
| 1097 addrmod1(cond | 9*B21 | S, src1, r0, src2); | 1046 addrmod1(cond | TEQ | S, src1, r0, src2); |
| 1098 } | 1047 } |
| 1099 | 1048 |
| 1100 | 1049 |
| 1101 void Assembler::cmp(Register src1, const Operand& src2, Condition cond) { | 1050 void Assembler::cmp(Register src1, const Operand& src2, Condition cond) { |
| 1102 addrmod1(cond | 10*B21 | S, src1, r0, src2); | 1051 addrmod1(cond | CMP | S, src1, r0, src2); |
| 1103 } | 1052 } |
| 1104 | 1053 |
| 1105 | 1054 |
| 1106 void Assembler::cmn(Register src1, const Operand& src2, Condition cond) { | 1055 void Assembler::cmn(Register src1, const Operand& src2, Condition cond) { |
| 1107 addrmod1(cond | 11*B21 | S, src1, r0, src2); | 1056 addrmod1(cond | CMN | S, src1, r0, src2); |
| 1108 } | 1057 } |
| 1109 | 1058 |
| 1110 | 1059 |
| 1111 void Assembler::orr(Register dst, Register src1, const Operand& src2, | 1060 void Assembler::orr(Register dst, Register src1, const Operand& src2, |
| 1112 SBit s, Condition cond) { | 1061 SBit s, Condition cond) { |
| 1113 addrmod1(cond | 12*B21 | s, src1, dst, src2); | 1062 addrmod1(cond | ORR | s, src1, dst, src2); |
| 1114 } | 1063 } |
| 1115 | 1064 |
| 1116 | 1065 |
| 1117 void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) { | 1066 void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) { |
| 1118 if (dst.is(pc)) { | 1067 if (dst.is(pc)) { |
| 1119 positions_recorder()->WriteRecordedPositions(); | 1068 positions_recorder()->WriteRecordedPositions(); |
| 1120 } | 1069 } |
| 1121 // Don't allow nop instructions in the form mov rn, rn to be generated using | 1070 // Don't allow nop instructions in the form mov rn, rn to be generated using |
| 1122 // the mov instruction. They must be generated using nop(int/NopMarkerTypes) | 1071 // the mov instruction. They must be generated using nop(int/NopMarkerTypes) |
| 1123 // or MarkCode(int/NopMarkerTypes) pseudo instructions. | 1072 // or MarkCode(int/NopMarkerTypes) pseudo instructions. |
| 1124 ASSERT(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al)); | 1073 ASSERT(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al)); |
| 1125 addrmod1(cond | 13*B21 | s, r0, dst, src); | 1074 addrmod1(cond | MOV | s, r0, dst, src); |
| 1126 } | 1075 } |
| 1127 | 1076 |
| 1128 | 1077 |
| 1129 void Assembler::movw(Register reg, uint32_t immediate, Condition cond) { | 1078 void Assembler::movw(Register reg, uint32_t immediate, Condition cond) { |
| 1130 ASSERT(immediate < 0x10000); | 1079 ASSERT(immediate < 0x10000); |
| 1131 mov(reg, Operand(immediate), LeaveCC, cond); | 1080 mov(reg, Operand(immediate), LeaveCC, cond); |
| 1132 } | 1081 } |
| 1133 | 1082 |
| 1134 | 1083 |
| 1135 void Assembler::movt(Register reg, uint32_t immediate, Condition cond) { | 1084 void Assembler::movt(Register reg, uint32_t immediate, Condition cond) { |
| 1136 emit(cond | 0x34*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate)); | 1085 emit(cond | 0x34*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate)); |
| 1137 } | 1086 } |
| 1138 | 1087 |
| 1139 | 1088 |
| 1140 void Assembler::bic(Register dst, Register src1, const Operand& src2, | 1089 void Assembler::bic(Register dst, Register src1, const Operand& src2, |
| 1141 SBit s, Condition cond) { | 1090 SBit s, Condition cond) { |
| 1142 addrmod1(cond | 14*B21 | s, src1, dst, src2); | 1091 addrmod1(cond | BIC | s, src1, dst, src2); |
| 1143 } | 1092 } |
| 1144 | 1093 |
| 1145 | 1094 |
| 1146 void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) { | 1095 void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) { |
| 1147 addrmod1(cond | 15*B21 | s, r0, dst, src); | 1096 addrmod1(cond | MVN | s, r0, dst, src); |
| 1148 } | 1097 } |
| 1149 | 1098 |
| 1150 | 1099 |
| 1151 // Multiply instructions. | 1100 // Multiply instructions. |
| 1152 void Assembler::mla(Register dst, Register src1, Register src2, Register srcA, | 1101 void Assembler::mla(Register dst, Register src1, Register src2, Register srcA, |
| 1153 SBit s, Condition cond) { | 1102 SBit s, Condition cond) { |
| 1154 ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc)); | 1103 ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc)); |
| 1155 emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 | | 1104 emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 | |
| 1156 src2.code()*B8 | B7 | B4 | src1.code()); | 1105 src2.code()*B8 | B7 | B4 | src1.code()); |
| 1157 } | 1106 } |
| (...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1215 emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 | | 1164 emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 | |
| 1216 src2.code()*B8 | B7 | B4 | src1.code()); | 1165 src2.code()*B8 | B7 | B4 | src1.code()); |
| 1217 } | 1166 } |
| 1218 | 1167 |
| 1219 | 1168 |
| 1220 // Miscellaneous arithmetic instructions. | 1169 // Miscellaneous arithmetic instructions. |
| 1221 void Assembler::clz(Register dst, Register src, Condition cond) { | 1170 void Assembler::clz(Register dst, Register src, Condition cond) { |
| 1222 // v5 and above. | 1171 // v5 and above. |
| 1223 ASSERT(!dst.is(pc) && !src.is(pc)); | 1172 ASSERT(!dst.is(pc) && !src.is(pc)); |
| 1224 emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 | | 1173 emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 | |
| 1225 15*B8 | B4 | src.code()); | 1174 15*B8 | CLZ | src.code()); |
| 1226 } | 1175 } |
| 1227 | 1176 |
| 1228 | 1177 |
| 1229 // Saturating instructions. | 1178 // Saturating instructions. |
| 1230 | 1179 |
| 1231 // Unsigned saturate. | 1180 // Unsigned saturate. |
| 1232 void Assembler::usat(Register dst, | 1181 void Assembler::usat(Register dst, |
| 1233 int satpos, | 1182 int satpos, |
| 1234 const Operand& src, | 1183 const Operand& src, |
| 1235 Condition cond) { | 1184 Condition cond) { |
| (...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1369 // str(ry, MemOperand(sp, 4, NegPreIndex), al) | 1318 // str(ry, MemOperand(sp, 4, NegPreIndex), al) |
| 1370 // ldr(rx, MemOperand(sp, 4, PostIndex), al) | 1319 // ldr(rx, MemOperand(sp, 4, PostIndex), al) |
| 1371 // Both instructions can be eliminated if ry = rx. | 1320 // Both instructions can be eliminated if ry = rx. |
| 1372 // If ry != rx, a register copy from ry to rx is inserted | 1321 // If ry != rx, a register copy from ry to rx is inserted |
| 1373 // after eliminating the push and the pop instructions. | 1322 // after eliminating the push and the pop instructions. |
| 1374 if (can_peephole_optimize(2)) { | 1323 if (can_peephole_optimize(2)) { |
| 1375 Instr push_instr = instr_at(pc_ - 2 * kInstrSize); | 1324 Instr push_instr = instr_at(pc_ - 2 * kInstrSize); |
| 1376 Instr pop_instr = instr_at(pc_ - 1 * kInstrSize); | 1325 Instr pop_instr = instr_at(pc_ - 1 * kInstrSize); |
| 1377 | 1326 |
| 1378 if (IsPush(push_instr) && IsPop(pop_instr)) { | 1327 if (IsPush(push_instr) && IsPop(pop_instr)) { |
| 1379 if ((pop_instr & kRdMask) != (push_instr & kRdMask)) { | 1328 if (Instruction::RdValue(pop_instr) != Instruction::RdValue(push_instr)) { |
| 1380 // For consecutive push and pop on different registers, | 1329 // For consecutive push and pop on different registers, |
| 1381 // we delete both the push & pop and insert a register move. | 1330 // we delete both the push & pop and insert a register move. |
| 1382 // push ry, pop rx --> mov rx, ry | 1331 // push ry, pop rx --> mov rx, ry |
| 1383 Register reg_pushed, reg_popped; | 1332 Register reg_pushed, reg_popped; |
| 1384 reg_pushed = GetRd(push_instr); | 1333 reg_pushed = GetRd(push_instr); |
| 1385 reg_popped = GetRd(pop_instr); | 1334 reg_popped = GetRd(pop_instr); |
| 1386 pc_ -= 2 * kInstrSize; | 1335 pc_ -= 2 * kInstrSize; |
| 1387 // Insert a mov instruction, which is better than a pair of push & pop | 1336 // Insert a mov instruction, which is better than a pair of push & pop |
| 1388 mov(reg_popped, reg_pushed); | 1337 mov(reg_popped, reg_pushed); |
| 1389 if (FLAG_print_peephole_optimization) { | 1338 if (FLAG_print_peephole_optimization) { |
| (...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1450 } | 1399 } |
| 1451 | 1400 |
| 1452 if (can_peephole_optimize(3)) { | 1401 if (can_peephole_optimize(3)) { |
| 1453 Instr mem_write_instr = instr_at(pc_ - 3 * kInstrSize); | 1402 Instr mem_write_instr = instr_at(pc_ - 3 * kInstrSize); |
| 1454 Instr ldr_instr = instr_at(pc_ - 2 * kInstrSize); | 1403 Instr ldr_instr = instr_at(pc_ - 2 * kInstrSize); |
| 1455 Instr mem_read_instr = instr_at(pc_ - 1 * kInstrSize); | 1404 Instr mem_read_instr = instr_at(pc_ - 1 * kInstrSize); |
| 1456 if (IsPush(mem_write_instr) && | 1405 if (IsPush(mem_write_instr) && |
| 1457 IsPop(mem_read_instr)) { | 1406 IsPop(mem_read_instr)) { |
| 1458 if ((IsLdrRegFpOffset(ldr_instr) || | 1407 if ((IsLdrRegFpOffset(ldr_instr) || |
| 1459 IsLdrRegFpNegOffset(ldr_instr))) { | 1408 IsLdrRegFpNegOffset(ldr_instr))) { |
| 1460 if ((mem_write_instr & kRdMask) == | 1409 if (Instruction::RdValue(mem_write_instr) == |
| 1461 (mem_read_instr & kRdMask)) { | 1410 Instruction::RdValue(mem_read_instr)) { |
| 1462 // Pattern: push & pop from/to same register, | 1411 // Pattern: push & pop from/to same register, |
| 1463 // with a fp+offset ldr in between | 1412 // with a fp+offset ldr in between |
| 1464 // | 1413 // |
| 1465 // The following: | 1414 // The following: |
| 1466 // str rx, [sp, #-4]! | 1415 // str rx, [sp, #-4]! |
| 1467 // ldr rz, [fp, #-24] | 1416 // ldr rz, [fp, #-24] |
| 1468 // ldr rx, [sp], #+4 | 1417 // ldr rx, [sp], #+4 |
| 1469 // | 1418 // |
| 1470 // Becomes: | 1419 // Becomes: |
| 1471 // if(rx == rz) | 1420 // if(rx == rz) |
| 1472 // delete all | 1421 // delete all |
| 1473 // else | 1422 // else |
| 1474 // ldr rz, [fp, #-24] | 1423 // ldr rz, [fp, #-24] |
| 1475 | 1424 |
| 1476 if ((mem_write_instr & kRdMask) == (ldr_instr & kRdMask)) { | 1425 if (Instruction::RdValue(mem_write_instr) == |
| 1426 Instruction::RdValue(ldr_instr)) { | |
| 1477 pc_ -= 3 * kInstrSize; | 1427 pc_ -= 3 * kInstrSize; |
| 1478 } else { | 1428 } else { |
| 1479 pc_ -= 3 * kInstrSize; | 1429 pc_ -= 3 * kInstrSize; |
| 1480 // Reinsert back the ldr rz. | 1430 // Reinsert back the ldr rz. |
| 1481 emit(ldr_instr); | 1431 emit(ldr_instr); |
| 1482 } | 1432 } |
| 1483 if (FLAG_print_peephole_optimization) { | 1433 if (FLAG_print_peephole_optimization) { |
| 1484 PrintF("%x push/pop -dead ldr fp+offset in middle\n", pc_offset()); | 1434 PrintF("%x push/pop -dead ldr fp+offset in middle\n", pc_offset()); |
| 1485 } | 1435 } |
| 1486 } else { | 1436 } else { |
| 1487 // Pattern: push & pop from/to different registers | 1437 // Pattern: push & pop from/to different registers |
| 1488 // with a fp+offset ldr in between | 1438 // with a fp+offset ldr in between |
| 1489 // | 1439 // |
| 1490 // The following: | 1440 // The following: |
| 1491 // str rx, [sp, #-4]! | 1441 // str rx, [sp, #-4]! |
| 1492 // ldr rz, [fp, #-24] | 1442 // ldr rz, [fp, #-24] |
| 1493 // ldr ry, [sp], #+4 | 1443 // ldr ry, [sp], #+4 |
| 1494 // | 1444 // |
| 1495 // Becomes: | 1445 // Becomes: |
| 1496 // if(ry == rz) | 1446 // if(ry == rz) |
| 1497 // mov ry, rx; | 1447 // mov ry, rx; |
| 1498 // else if(rx != rz) | 1448 // else if(rx != rz) |
| 1499 // ldr rz, [fp, #-24] | 1449 // ldr rz, [fp, #-24] |
| 1500 // mov ry, rx | 1450 // mov ry, rx |
| 1501 // else if((ry != rz) || (rx == rz)) becomes: | 1451 // else if((ry != rz) || (rx == rz)) becomes: |
| 1502 // mov ry, rx | 1452 // mov ry, rx |
| 1503 // ldr rz, [fp, #-24] | 1453 // ldr rz, [fp, #-24] |
| 1504 | 1454 |
| 1505 Register reg_pushed, reg_popped; | 1455 Register reg_pushed, reg_popped; |
| 1506 if ((mem_read_instr & kRdMask) == (ldr_instr & kRdMask)) { | 1456 if (Instruction::RdValue(mem_read_instr) == |
| 1457 Instruction::RdValue(ldr_instr)) { | |
| 1507 reg_pushed = GetRd(mem_write_instr); | 1458 reg_pushed = GetRd(mem_write_instr); |
| 1508 reg_popped = GetRd(mem_read_instr); | 1459 reg_popped = GetRd(mem_read_instr); |
| 1509 pc_ -= 3 * kInstrSize; | 1460 pc_ -= 3 * kInstrSize; |
| 1510 mov(reg_popped, reg_pushed); | 1461 mov(reg_popped, reg_pushed); |
| 1511 } else if ((mem_write_instr & kRdMask) | 1462 } else if (Instruction::RdValue(mem_write_instr) != |
| 1512 != (ldr_instr & kRdMask)) { | 1463 Instruction::RdValue(ldr_instr)) { |
| 1513 reg_pushed = GetRd(mem_write_instr); | 1464 reg_pushed = GetRd(mem_write_instr); |
| 1514 reg_popped = GetRd(mem_read_instr); | 1465 reg_popped = GetRd(mem_read_instr); |
| 1515 pc_ -= 3 * kInstrSize; | 1466 pc_ -= 3 * kInstrSize; |
| 1516 emit(ldr_instr); | 1467 emit(ldr_instr); |
| 1517 mov(reg_popped, reg_pushed); | 1468 mov(reg_popped, reg_pushed); |
| 1518 } else if (((mem_read_instr & kRdMask) | 1469 } else if ((Instruction::RdValue(mem_read_instr) != |
| 1519 != (ldr_instr & kRdMask)) || | 1470 Instruction::RdValue(ldr_instr)) || |
|
Mads Ager (chromium)
2011/01/26 08:23:48
Indentation is off here.
| |
| 1520 ((mem_write_instr & kRdMask) | 1471 (Instruction::RdValue(mem_write_instr) == |
| 1521 == (ldr_instr & kRdMask)) ) { | 1472 Instruction::RdValue(ldr_instr)) ) { |
| 1522 reg_pushed = GetRd(mem_write_instr); | 1473 reg_pushed = GetRd(mem_write_instr); |
| 1523 reg_popped = GetRd(mem_read_instr); | 1474 reg_popped = GetRd(mem_read_instr); |
| 1524 pc_ -= 3 * kInstrSize; | 1475 pc_ -= 3 * kInstrSize; |
| 1525 mov(reg_popped, reg_pushed); | 1476 mov(reg_popped, reg_pushed); |
| 1526 emit(ldr_instr); | 1477 emit(ldr_instr); |
| 1527 } | 1478 } |
| 1528 if (FLAG_print_peephole_optimization) { | 1479 if (FLAG_print_peephole_optimization) { |
| 1529 PrintF("%x push/pop (ldr fp+off in middle)\n", pc_offset()); | 1480 PrintF("%x push/pop (ldr fp+off in middle)\n", pc_offset()); |
| 1530 } | 1481 } |
| 1531 } | 1482 } |
| (...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1633 Condition cond) { | 1584 Condition cond) { |
| 1634 addrmod4(cond | B27 | am, base, src); | 1585 addrmod4(cond | B27 | am, base, src); |
| 1635 } | 1586 } |
| 1636 | 1587 |
| 1637 | 1588 |
| 1638 // Exception-generating instructions and debugging support. | 1589 // Exception-generating instructions and debugging support. |
| 1639 // Stops with a non-negative code less than kNumOfWatchedStops support | 1590 // Stops with a non-negative code less than kNumOfWatchedStops support |
| 1640 // enabling/disabling and a counter feature. See simulator-arm.h . | 1591 // enabling/disabling and a counter feature. See simulator-arm.h . |
| 1641 void Assembler::stop(const char* msg, Condition cond, int32_t code) { | 1592 void Assembler::stop(const char* msg, Condition cond, int32_t code) { |
| 1642 #ifndef __arm__ | 1593 #ifndef __arm__ |
| 1643 // See constants-arm.h SoftwareInterruptCodes. Unluckily the Assembler and | |
| 1644 // Simulator do not share constants declaration. | |
| 1645 ASSERT(code >= kDefaultStopCode); | 1594 ASSERT(code >= kDefaultStopCode); |
| 1646 static const uint32_t kStopInterruptCode = 1 << 23; | |
| 1647 static const uint32_t kMaxStopCode = kStopInterruptCode - 1; | |
| 1648 // The Simulator will handle the stop instruction and get the message address. | 1595 // The Simulator will handle the stop instruction and get the message address. |
| 1649 // It expects to find the address just after the svc instruction. | 1596 // It expects to find the address just after the svc instruction. |
| 1650 BlockConstPoolFor(2); | 1597 BlockConstPoolFor(2); |
| 1651 if (code >= 0) { | 1598 if (code >= 0) { |
| 1652 svc(kStopInterruptCode + code, cond); | 1599 svc(kStopCode + code, cond); |
| 1653 } else { | 1600 } else { |
| 1654 svc(kStopInterruptCode + kMaxStopCode, cond); | 1601 svc(kStopCode + kMaxStopCode, cond); |
| 1655 } | 1602 } |
| 1656 emit(reinterpret_cast<Instr>(msg)); | 1603 emit(reinterpret_cast<Instr>(msg)); |
| 1657 #else // def __arm__ | 1604 #else // def __arm__ |
| 1658 #ifdef CAN_USE_ARMV5_INSTRUCTIONS | 1605 #ifdef CAN_USE_ARMV5_INSTRUCTIONS |
| 1659 if (cond != al) { | 1606 if (cond != al) { |
| 1660 Label skip; | 1607 Label skip; |
| 1661 b(&skip, NegateCondition(cond)); | 1608 b(&skip, NegateCondition(cond)); |
| 1662 bkpt(0); | 1609 bkpt(0); |
| 1663 bind(&skip); | 1610 bind(&skip); |
| 1664 } else { | 1611 } else { |
| 1665 bkpt(0); | 1612 bkpt(0); |
| 1666 } | 1613 } |
| 1667 #else // ndef CAN_USE_ARMV5_INSTRUCTIONS | 1614 #else // ndef CAN_USE_ARMV5_INSTRUCTIONS |
| 1668 svc(0x9f0001, cond); | 1615 svc(0x9f0001, cond); |
| 1669 #endif // ndef CAN_USE_ARMV5_INSTRUCTIONS | 1616 #endif // ndef CAN_USE_ARMV5_INSTRUCTIONS |
| 1670 #endif // def __arm__ | 1617 #endif // def __arm__ |
| 1671 } | 1618 } |
| 1672 | 1619 |
| 1673 | 1620 |
| 1674 void Assembler::bkpt(uint32_t imm16) { // v5 and above | 1621 void Assembler::bkpt(uint32_t imm16) { // v5 and above |
| 1675 ASSERT(is_uint16(imm16)); | 1622 ASSERT(is_uint16(imm16)); |
| 1676 emit(al | B24 | B21 | (imm16 >> 4)*B8 | 7*B4 | (imm16 & 0xf)); | 1623 emit(al | B24 | B21 | (imm16 >> 4)*B8 | BKPT | (imm16 & 0xf)); |
| 1677 } | 1624 } |
| 1678 | 1625 |
| 1679 | 1626 |
| 1680 void Assembler::svc(uint32_t imm24, Condition cond) { | 1627 void Assembler::svc(uint32_t imm24, Condition cond) { |
| 1681 ASSERT(is_uint24(imm24)); | 1628 ASSERT(is_uint24(imm24)); |
| 1682 emit(cond | 15*B24 | imm24); | 1629 emit(cond | 15*B24 | imm24); |
| 1683 } | 1630 } |
| 1684 | 1631 |
| 1685 | 1632 |
| 1686 // Coprocessor instructions. | 1633 // Coprocessor instructions. |
| 1687 void Assembler::cdp(Coprocessor coproc, | 1634 void Assembler::cdp(Coprocessor coproc, |
| 1688 int opcode_1, | 1635 int opcode_1, |
| 1689 CRegister crd, | 1636 CRegister crd, |
| 1690 CRegister crn, | 1637 CRegister crn, |
| 1691 CRegister crm, | 1638 CRegister crm, |
| 1692 int opcode_2, | 1639 int opcode_2, |
| 1693 Condition cond) { | 1640 Condition cond) { |
| 1694 ASSERT(is_uint4(opcode_1) && is_uint3(opcode_2)); | 1641 ASSERT(is_uint4(opcode_1) && is_uint3(opcode_2)); |
| 1695 emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 | | 1642 emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 | |
| 1696 crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code()); | 1643 crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code()); |
| 1697 } | 1644 } |
| 1698 | 1645 |
| 1699 | 1646 |
| 1700 void Assembler::cdp2(Coprocessor coproc, | 1647 void Assembler::cdp2(Coprocessor coproc, |
| 1701 int opcode_1, | 1648 int opcode_1, |
| 1702 CRegister crd, | 1649 CRegister crd, |
| 1703 CRegister crn, | 1650 CRegister crn, |
| 1704 CRegister crm, | 1651 CRegister crm, |
| 1705 int opcode_2) { // v5 and above | 1652 int opcode_2) { // v5 and above |
| 1706 cdp(coproc, opcode_1, crd, crn, crm, opcode_2, static_cast<Condition>(nv)); | 1653 cdp(coproc, opcode_1, crd, crn, crm, opcode_2, kSpecialCondition); |
| 1707 } | 1654 } |
| 1708 | 1655 |
| 1709 | 1656 |
| 1710 void Assembler::mcr(Coprocessor coproc, | 1657 void Assembler::mcr(Coprocessor coproc, |
| 1711 int opcode_1, | 1658 int opcode_1, |
| 1712 Register rd, | 1659 Register rd, |
| 1713 CRegister crn, | 1660 CRegister crn, |
| 1714 CRegister crm, | 1661 CRegister crm, |
| 1715 int opcode_2, | 1662 int opcode_2, |
| 1716 Condition cond) { | 1663 Condition cond) { |
| 1717 ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2)); | 1664 ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2)); |
| 1718 emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 | | 1665 emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 | |
| 1719 rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code()); | 1666 rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code()); |
| 1720 } | 1667 } |
| 1721 | 1668 |
| 1722 | 1669 |
| 1723 void Assembler::mcr2(Coprocessor coproc, | 1670 void Assembler::mcr2(Coprocessor coproc, |
| 1724 int opcode_1, | 1671 int opcode_1, |
| 1725 Register rd, | 1672 Register rd, |
| 1726 CRegister crn, | 1673 CRegister crn, |
| 1727 CRegister crm, | 1674 CRegister crm, |
| 1728 int opcode_2) { // v5 and above | 1675 int opcode_2) { // v5 and above |
| 1729 mcr(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv)); | 1676 mcr(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition); |
| 1730 } | 1677 } |
| 1731 | 1678 |
| 1732 | 1679 |
| 1733 void Assembler::mrc(Coprocessor coproc, | 1680 void Assembler::mrc(Coprocessor coproc, |
| 1734 int opcode_1, | 1681 int opcode_1, |
| 1735 Register rd, | 1682 Register rd, |
| 1736 CRegister crn, | 1683 CRegister crn, |
| 1737 CRegister crm, | 1684 CRegister crm, |
| 1738 int opcode_2, | 1685 int opcode_2, |
| 1739 Condition cond) { | 1686 Condition cond) { |
| 1740 ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2)); | 1687 ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2)); |
| 1741 emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 | | 1688 emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 | |
| 1742 rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code()); | 1689 rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code()); |
| 1743 } | 1690 } |
| 1744 | 1691 |
| 1745 | 1692 |
| 1746 void Assembler::mrc2(Coprocessor coproc, | 1693 void Assembler::mrc2(Coprocessor coproc, |
| 1747 int opcode_1, | 1694 int opcode_1, |
| 1748 Register rd, | 1695 Register rd, |
| 1749 CRegister crn, | 1696 CRegister crn, |
| 1750 CRegister crm, | 1697 CRegister crm, |
| 1751 int opcode_2) { // v5 and above | 1698 int opcode_2) { // v5 and above |
| 1752 mrc(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv)); | 1699 mrc(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition); |
| 1753 } | 1700 } |
| 1754 | 1701 |
| 1755 | 1702 |
| 1756 void Assembler::ldc(Coprocessor coproc, | 1703 void Assembler::ldc(Coprocessor coproc, |
| 1757 CRegister crd, | 1704 CRegister crd, |
| 1758 const MemOperand& src, | 1705 const MemOperand& src, |
| 1759 LFlag l, | 1706 LFlag l, |
| 1760 Condition cond) { | 1707 Condition cond) { |
| 1761 addrmod5(cond | B27 | B26 | l | L | coproc*B8, crd, src); | 1708 addrmod5(cond | B27 | B26 | l | L | coproc*B8, crd, src); |
| 1762 } | 1709 } |
| 1763 | 1710 |
| 1764 | 1711 |
| 1765 void Assembler::ldc(Coprocessor coproc, | 1712 void Assembler::ldc(Coprocessor coproc, |
| 1766 CRegister crd, | 1713 CRegister crd, |
| 1767 Register rn, | 1714 Register rn, |
| 1768 int option, | 1715 int option, |
| 1769 LFlag l, | 1716 LFlag l, |
| 1770 Condition cond) { | 1717 Condition cond) { |
| 1771 // Unindexed addressing. | 1718 // Unindexed addressing. |
| 1772 ASSERT(is_uint8(option)); | 1719 ASSERT(is_uint8(option)); |
| 1773 emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 | | 1720 emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 | |
| 1774 coproc*B8 | (option & 255)); | 1721 coproc*B8 | (option & 255)); |
| 1775 } | 1722 } |
| 1776 | 1723 |
| 1777 | 1724 |
| 1778 void Assembler::ldc2(Coprocessor coproc, | 1725 void Assembler::ldc2(Coprocessor coproc, |
| 1779 CRegister crd, | 1726 CRegister crd, |
| 1780 const MemOperand& src, | 1727 const MemOperand& src, |
| 1781 LFlag l) { // v5 and above | 1728 LFlag l) { // v5 and above |
| 1782 ldc(coproc, crd, src, l, static_cast<Condition>(nv)); | 1729 ldc(coproc, crd, src, l, kSpecialCondition); |
| 1783 } | 1730 } |
| 1784 | 1731 |
| 1785 | 1732 |
| 1786 void Assembler::ldc2(Coprocessor coproc, | 1733 void Assembler::ldc2(Coprocessor coproc, |
| 1787 CRegister crd, | 1734 CRegister crd, |
| 1788 Register rn, | 1735 Register rn, |
| 1789 int option, | 1736 int option, |
| 1790 LFlag l) { // v5 and above | 1737 LFlag l) { // v5 and above |
| 1791 ldc(coproc, crd, rn, option, l, static_cast<Condition>(nv)); | 1738 ldc(coproc, crd, rn, option, l, kSpecialCondition); |
| 1792 } | 1739 } |
| 1793 | 1740 |
| 1794 | 1741 |
| 1795 void Assembler::stc(Coprocessor coproc, | 1742 void Assembler::stc(Coprocessor coproc, |
| 1796 CRegister crd, | 1743 CRegister crd, |
| 1797 const MemOperand& dst, | 1744 const MemOperand& dst, |
| 1798 LFlag l, | 1745 LFlag l, |
| 1799 Condition cond) { | 1746 Condition cond) { |
| 1800 addrmod5(cond | B27 | B26 | l | coproc*B8, crd, dst); | 1747 addrmod5(cond | B27 | B26 | l | coproc*B8, crd, dst); |
| 1801 } | 1748 } |
| 1802 | 1749 |
| 1803 | 1750 |
| 1804 void Assembler::stc(Coprocessor coproc, | 1751 void Assembler::stc(Coprocessor coproc, |
| 1805 CRegister crd, | 1752 CRegister crd, |
| 1806 Register rn, | 1753 Register rn, |
| 1807 int option, | 1754 int option, |
| 1808 LFlag l, | 1755 LFlag l, |
| 1809 Condition cond) { | 1756 Condition cond) { |
| 1810 // Unindexed addressing. | 1757 // Unindexed addressing. |
| 1811 ASSERT(is_uint8(option)); | 1758 ASSERT(is_uint8(option)); |
| 1812 emit(cond | B27 | B26 | U | l | rn.code()*B16 | crd.code()*B12 | | 1759 emit(cond | B27 | B26 | U | l | rn.code()*B16 | crd.code()*B12 | |
| 1813 coproc*B8 | (option & 255)); | 1760 coproc*B8 | (option & 255)); |
| 1814 } | 1761 } |
| 1815 | 1762 |
| 1816 | 1763 |
| 1817 void Assembler::stc2(Coprocessor | 1764 void Assembler::stc2(Coprocessor |
| 1818 coproc, CRegister crd, | 1765 coproc, CRegister crd, |
| 1819 const MemOperand& dst, | 1766 const MemOperand& dst, |
| 1820 LFlag l) { // v5 and above | 1767 LFlag l) { // v5 and above |
| 1821 stc(coproc, crd, dst, l, static_cast<Condition>(nv)); | 1768 stc(coproc, crd, dst, l, kSpecialCondition); |
| 1822 } | 1769 } |
| 1823 | 1770 |
| 1824 | 1771 |
| 1825 void Assembler::stc2(Coprocessor coproc, | 1772 void Assembler::stc2(Coprocessor coproc, |
| 1826 CRegister crd, | 1773 CRegister crd, |
| 1827 Register rn, | 1774 Register rn, |
| 1828 int option, | 1775 int option, |
| 1829 LFlag l) { // v5 and above | 1776 LFlag l) { // v5 and above |
| 1830 stc(coproc, crd, rn, option, l, static_cast<Condition>(nv)); | 1777 stc(coproc, crd, rn, option, l, kSpecialCondition); |
| 1831 } | 1778 } |
| 1832 | 1779 |
| 1833 | 1780 |
| 1834 // Support for VFP. | 1781 // Support for VFP. |
| 1835 | 1782 |
| 1836 void Assembler::vldr(const DwVfpRegister dst, | 1783 void Assembler::vldr(const DwVfpRegister dst, |
| 1837 const Register base, | 1784 const Register base, |
| 1838 int offset, | 1785 int offset, |
| 1839 const Condition cond) { | 1786 const Condition cond) { |
| 1840 // Ddst = MEM(Rbase + offset). | 1787 // Ddst = MEM(Rbase + offset). |
| (...skipping 789 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2630 // Emit constant pool entries. | 2577 // Emit constant pool entries. |
| 2631 for (int i = 0; i < num_prinfo_; i++) { | 2578 for (int i = 0; i < num_prinfo_; i++) { |
| 2632 RelocInfo& rinfo = prinfo_[i]; | 2579 RelocInfo& rinfo = prinfo_[i]; |
| 2633 ASSERT(rinfo.rmode() != RelocInfo::COMMENT && | 2580 ASSERT(rinfo.rmode() != RelocInfo::COMMENT && |
| 2634 rinfo.rmode() != RelocInfo::POSITION && | 2581 rinfo.rmode() != RelocInfo::POSITION && |
| 2635 rinfo.rmode() != RelocInfo::STATEMENT_POSITION); | 2582 rinfo.rmode() != RelocInfo::STATEMENT_POSITION); |
| 2636 Instr instr = instr_at(rinfo.pc()); | 2583 Instr instr = instr_at(rinfo.pc()); |
| 2637 | 2584 |
| 2638 // Instruction to patch must be a ldr/str [pc, #offset]. | 2585 // Instruction to patch must be a ldr/str [pc, #offset]. |
| 2639 // P and U set, B and W clear, Rn == pc, offset12 still 0. | 2586 // P and U set, B and W clear, Rn == pc, offset12 still 0. |
| 2640 ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | Off12Mask)) == | 2587 ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | kOff12Mask)) == |
| 2641 (2*B25 | P | U | pc.code()*B16)); | 2588 (2*B25 | P | U | pc.code()*B16)); |
| 2642 int delta = pc_ - rinfo.pc() - 8; | 2589 int delta = pc_ - rinfo.pc() - 8; |
| 2643 ASSERT(delta >= -4); // instr could be ldr pc, [pc, #-4] followed by targ32 | 2590 ASSERT(delta >= -4); // instr could be ldr pc, [pc, #-4] followed by targ32 |
| 2644 if (delta < 0) { | 2591 if (delta < 0) { |
| 2645 instr &= ~U; | 2592 instr &= ~U; |
| 2646 delta = -delta; | 2593 delta = -delta; |
| 2647 } | 2594 } |
| 2648 ASSERT(is_uint12(delta)); | 2595 ASSERT(is_uint12(delta)); |
| 2649 instr_at_put(rinfo.pc(), instr + delta); | 2596 instr_at_put(rinfo.pc(), instr + delta); |
| 2650 emit(rinfo.data()); | 2597 emit(rinfo.data()); |
| 2651 } | 2598 } |
| 2652 num_prinfo_ = 0; | 2599 num_prinfo_ = 0; |
| 2653 last_const_pool_end_ = pc_offset(); | 2600 last_const_pool_end_ = pc_offset(); |
| 2654 | 2601 |
| 2655 RecordComment("]"); | 2602 RecordComment("]"); |
| 2656 | 2603 |
| 2657 if (after_pool.is_linked()) { | 2604 if (after_pool.is_linked()) { |
| 2658 bind(&after_pool); | 2605 bind(&after_pool); |
| 2659 } | 2606 } |
| 2660 | 2607 |
| 2661 // Since a constant pool was just emitted, move the check offset forward by | 2608 // Since a constant pool was just emitted, move the check offset forward by |
| 2662 // the standard interval. | 2609 // the standard interval. |
| 2663 next_buffer_check_ = pc_offset() + kCheckConstInterval; | 2610 next_buffer_check_ = pc_offset() + kCheckConstInterval; |
| 2664 } | 2611 } |
| 2665 | 2612 |
| 2666 | 2613 |
| 2667 } } // namespace v8::internal | 2614 } } // namespace v8::internal |
| 2668 | 2615 |
| 2669 #endif // V8_TARGET_ARCH_ARM | 2616 #endif // V8_TARGET_ARCH_ARM |
| OLD | NEW |