| OLD | NEW |
| (Empty) |
| 1 // Copyright 2013 the V8 project authors. All rights reserved. | |
| 2 // Redistribution and use in source and binary forms, with or without | |
| 3 // modification, are permitted provided that the following conditions are | |
| 4 // met: | |
| 5 // | |
| 6 // * Redistributions of source code must retain the above copyright | |
| 7 // notice, this list of conditions and the following disclaimer. | |
| 8 // * Redistributions in binary form must reproduce the above | |
| 9 // copyright notice, this list of conditions and the following | |
| 10 // disclaimer in the documentation and/or other materials provided | |
| 11 // with the distribution. | |
| 12 // * Neither the name of Google Inc. nor the names of its | |
| 13 // contributors may be used to endorse or promote products derived | |
| 14 // from this software without specific prior written permission. | |
| 15 // | |
| 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
| 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
| 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
| 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
| 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
| 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
| 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
| 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
| 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
| 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
| 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
| 27 | |
| 28 #ifndef V8_A64_INSTRUCTIONS_A64_H_ | |
| 29 #define V8_A64_INSTRUCTIONS_A64_H_ | |
| 30 | |
| 31 #include "globals.h" | |
| 32 #include "utils.h" | |
| 33 #include "a64/constants-a64.h" | |
| 34 #include "a64/utils-a64.h" | |
| 35 | |
| 36 namespace v8 { | |
| 37 namespace internal { | |
| 38 | |
| 39 | |
| 40 // ISA constants. -------------------------------------------------------------- | |
| 41 | |
| 42 typedef uint32_t Instr; | |
| 43 | |
| 44 // The following macros initialize a float/double variable with a bit pattern | |
| 45 // without using static initializers: If A64_DEFINE_FP_STATICS is defined, the | |
| 46 // symbol is defined as uint32_t/uint64_t initialized with the desired bit | |
| 47 // pattern. Otherwise, the same symbol is declared as an external float/double. | |
| 48 #if defined(A64_DEFINE_FP_STATICS) | |
| 49 #define DEFINE_FLOAT(name, value) extern const uint32_t name = value | |
| 50 #define DEFINE_DOUBLE(name, value) extern const uint64_t name = value | |
| 51 #else | |
| 52 #define DEFINE_FLOAT(name, value) extern const float name | |
| 53 #define DEFINE_DOUBLE(name, value) extern const double name | |
| 54 #endif // defined(A64_DEFINE_FP_STATICS) | |
| 55 | |
| 56 DEFINE_FLOAT(kFP32PositiveInfinity, 0x7f800000); | |
| 57 DEFINE_FLOAT(kFP32NegativeInfinity, 0xff800000); | |
| 58 DEFINE_DOUBLE(kFP64PositiveInfinity, 0x7ff0000000000000UL); | |
| 59 DEFINE_DOUBLE(kFP64NegativeInfinity, 0xfff0000000000000UL); | |
| 60 | |
| 61 // This value is a signalling NaN as both a double and as a float (taking the | |
| 62 // least-significant word). | |
| 63 DEFINE_DOUBLE(kFP64SignallingNaN, 0x7ff000007f800001); | |
| 64 DEFINE_FLOAT(kFP32SignallingNaN, 0x7f800001); | |
| 65 | |
| 66 // A similar value, but as a quiet NaN. | |
| 67 DEFINE_DOUBLE(kFP64QuietNaN, 0x7ff800007fc00001); | |
| 68 DEFINE_FLOAT(kFP32QuietNaN, 0x7fc00001); | |
| 69 | |
| 70 #undef DEFINE_FLOAT | |
| 71 #undef DEFINE_DOUBLE | |
| 72 | |
| 73 | |
| 74 enum LSDataSize { | |
| 75 LSByte = 0, | |
| 76 LSHalfword = 1, | |
| 77 LSWord = 2, | |
| 78 LSDoubleWord = 3 | |
| 79 }; | |
| 80 | |
| 81 LSDataSize CalcLSPairDataSize(LoadStorePairOp op); | |
| 82 | |
| 83 enum ImmBranchType { | |
| 84 UnknownBranchType = 0, | |
| 85 CondBranchType = 1, | |
| 86 UncondBranchType = 2, | |
| 87 CompareBranchType = 3, | |
| 88 TestBranchType = 4 | |
| 89 }; | |
| 90 | |
| 91 enum AddrMode { | |
| 92 Offset, | |
| 93 PreIndex, | |
| 94 PostIndex | |
| 95 }; | |
| 96 | |
| 97 enum FPRounding { | |
| 98 // The first four values are encodable directly by FPCR<RMode>. | |
| 99 FPTieEven = 0x0, | |
| 100 FPPositiveInfinity = 0x1, | |
| 101 FPNegativeInfinity = 0x2, | |
| 102 FPZero = 0x3, | |
| 103 | |
| 104 // The final rounding mode is only available when explicitly specified by the | |
| 105 // instruction (such as with fcvta). It cannot be set in FPCR. | |
| 106 FPTieAway | |
| 107 }; | |
| 108 | |
| 109 enum Reg31Mode { | |
| 110 Reg31IsStackPointer, | |
| 111 Reg31IsZeroRegister | |
| 112 }; | |
| 113 | |
| 114 // Instructions. --------------------------------------------------------------- | |
| 115 | |
| 116 class Instruction { | |
| 117 public: | |
| 118 Instr InstructionBits() const { | |
| 119 Instr bits; | |
| 120 memcpy(&bits, this, sizeof(bits)); | |
| 121 return bits; | |
| 122 } | |
| 123 | |
| 124 void SetInstructionBits(Instr new_instr) { | |
| 125 memcpy(this, &new_instr, sizeof(new_instr)); | |
| 126 } | |
| 127 | |
| 128 int Bit(int pos) const { | |
| 129 return (InstructionBits() >> pos) & 1; | |
| 130 } | |
| 131 | |
| 132 uint32_t Bits(int msb, int lsb) const { | |
| 133 return unsigned_bitextract_32(msb, lsb, InstructionBits()); | |
| 134 } | |
| 135 | |
| 136 int32_t SignedBits(int msb, int lsb) const { | |
| 137 int32_t bits = *(reinterpret_cast<const int32_t*>(this)); | |
| 138 return signed_bitextract_32(msb, lsb, bits); | |
| 139 } | |
| 140 | |
| 141 Instr Mask(uint32_t mask) const { | |
| 142 return InstructionBits() & mask; | |
| 143 } | |
| 144 | |
| 145 Instruction* following(int count = 1) { | |
| 146 return this + count * kInstructionSize; | |
| 147 } | |
| 148 | |
| 149 Instruction* preceding(int count = 1) { | |
| 150 return this - count * kInstructionSize; | |
| 151 } | |
| 152 | |
| 153 #define DEFINE_GETTER(Name, HighBit, LowBit, Func) \ | |
| 154 int64_t Name() const { return Func(HighBit, LowBit); } | |
| 155 INSTRUCTION_FIELDS_LIST(DEFINE_GETTER) | |
| 156 #undef DEFINE_GETTER | |
| 157 | |
| 158 // ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST), | |
| 159 // formed from ImmPCRelLo and ImmPCRelHi. | |
| 160 int ImmPCRel() const { | |
| 161 int const offset = ((ImmPCRelHi() << ImmPCRelLo_width) | ImmPCRelLo()); | |
| 162 int const width = ImmPCRelLo_width + ImmPCRelHi_width; | |
| 163 return signed_bitextract_32(width-1, 0, offset); | |
| 164 } | |
| 165 | |
| 166 uint64_t ImmLogical(); | |
| 167 float ImmFP32(); | |
| 168 double ImmFP64(); | |
| 169 | |
| 170 LSDataSize SizeLSPair() const { | |
| 171 return CalcLSPairDataSize( | |
| 172 static_cast<LoadStorePairOp>(Mask(LoadStorePairMask))); | |
| 173 } | |
| 174 | |
| 175 // Helpers. | |
| 176 bool IsCondBranchImm() const { | |
| 177 return Mask(ConditionalBranchFMask) == ConditionalBranchFixed; | |
| 178 } | |
| 179 | |
| 180 bool IsUncondBranchImm() const { | |
| 181 return Mask(UnconditionalBranchFMask) == UnconditionalBranchFixed; | |
| 182 } | |
| 183 | |
| 184 bool IsCompareBranch() const { | |
| 185 return Mask(CompareBranchFMask) == CompareBranchFixed; | |
| 186 } | |
| 187 | |
| 188 bool IsTestBranch() const { | |
| 189 return Mask(TestBranchFMask) == TestBranchFixed; | |
| 190 } | |
| 191 | |
| 192 bool IsLdrLiteral() const { | |
| 193 return Mask(LoadLiteralFMask) == LoadLiteralFixed; | |
| 194 } | |
| 195 | |
| 196 bool IsLdrLiteralX() const { | |
| 197 return Mask(LoadLiteralMask) == LDR_x_lit; | |
| 198 } | |
| 199 | |
| 200 bool IsPCRelAddressing() const { | |
| 201 return Mask(PCRelAddressingFMask) == PCRelAddressingFixed; | |
| 202 } | |
| 203 | |
| 204 bool IsLogicalImmediate() const { | |
| 205 return Mask(LogicalImmediateFMask) == LogicalImmediateFixed; | |
| 206 } | |
| 207 | |
| 208 bool IsAddSubImmediate() const { | |
| 209 return Mask(AddSubImmediateFMask) == AddSubImmediateFixed; | |
| 210 } | |
| 211 | |
| 212 bool IsAddSubExtended() const { | |
| 213 return Mask(AddSubExtendedFMask) == AddSubExtendedFixed; | |
| 214 } | |
| 215 | |
| 216 // Match any loads or stores, including pairs. | |
| 217 bool IsLoadOrStore() const { | |
| 218 return Mask(LoadStoreAnyFMask) == LoadStoreAnyFixed; | |
| 219 } | |
| 220 | |
| 221 // Match any loads, including pairs. | |
| 222 bool IsLoad() const; | |
| 223 // Match any stores, including pairs. | |
| 224 bool IsStore() const; | |
| 225 | |
| 226 // Indicate whether Rd can be the stack pointer or the zero register. This | |
| 227 // does not check that the instruction actually has an Rd field. | |
| 228 Reg31Mode RdMode() const { | |
| 229 // The following instructions use csp or wsp as Rd: | |
| 230 // Add/sub (immediate) when not setting the flags. | |
| 231 // Add/sub (extended) when not setting the flags. | |
| 232 // Logical (immediate) when not setting the flags. | |
| 233 // Otherwise, r31 is the zero register. | |
| 234 if (IsAddSubImmediate() || IsAddSubExtended()) { | |
| 235 if (Mask(AddSubSetFlagsBit)) { | |
| 236 return Reg31IsZeroRegister; | |
| 237 } else { | |
| 238 return Reg31IsStackPointer; | |
| 239 } | |
| 240 } | |
| 241 if (IsLogicalImmediate()) { | |
| 242 // Of the logical (immediate) instructions, only ANDS (and its aliases) | |
| 243 // can set the flags. The others can all write into csp. | |
| 244 // Note that some logical operations are not available to | |
| 245 // immediate-operand instructions, so we have to combine two masks here. | |
| 246 if (Mask(LogicalImmediateMask & LogicalOpMask) == ANDS) { | |
| 247 return Reg31IsZeroRegister; | |
| 248 } else { | |
| 249 return Reg31IsStackPointer; | |
| 250 } | |
| 251 } | |
| 252 return Reg31IsZeroRegister; | |
| 253 } | |
| 254 | |
| 255 // Indicate whether Rn can be the stack pointer or the zero register. This | |
| 256 // does not check that the instruction actually has an Rn field. | |
| 257 Reg31Mode RnMode() const { | |
| 258 // The following instructions use csp or wsp as Rn: | |
| 259 // All loads and stores. | |
| 260 // Add/sub (immediate). | |
| 261 // Add/sub (extended). | |
| 262 // Otherwise, r31 is the zero register. | |
| 263 if (IsLoadOrStore() || IsAddSubImmediate() || IsAddSubExtended()) { | |
| 264 return Reg31IsStackPointer; | |
| 265 } | |
| 266 return Reg31IsZeroRegister; | |
| 267 } | |
| 268 | |
| 269 ImmBranchType BranchType() const { | |
| 270 if (IsCondBranchImm()) { | |
| 271 return CondBranchType; | |
| 272 } else if (IsUncondBranchImm()) { | |
| 273 return UncondBranchType; | |
| 274 } else if (IsCompareBranch()) { | |
| 275 return CompareBranchType; | |
| 276 } else if (IsTestBranch()) { | |
| 277 return TestBranchType; | |
| 278 } else { | |
| 279 return UnknownBranchType; | |
| 280 } | |
| 281 } | |
| 282 | |
| 283 static int ImmBranchRangeBitwidth(ImmBranchType branch_type) { | |
| 284 switch (branch_type) { | |
| 285 case UncondBranchType: | |
| 286 return ImmUncondBranch_width; | |
| 287 case CondBranchType: | |
| 288 return ImmCondBranch_width; | |
| 289 case CompareBranchType: | |
| 290 return ImmCmpBranch_width; | |
| 291 case TestBranchType: | |
| 292 return ImmTestBranch_width; | |
| 293 default: | |
| 294 UNREACHABLE(); | |
| 295 return 0; | |
| 296 } | |
| 297 } | |
| 298 | |
| 299 // The range of the branch instruction, expressed as 'instr +- range'. | |
| 300 static int32_t ImmBranchRange(ImmBranchType branch_type) { | |
| 301 return | |
| 302 (1 << (ImmBranchRangeBitwidth(branch_type) + kInstructionSizeLog2)) / 2 - | |
| 303 kInstructionSize; | |
| 304 } | |
| 305 | |
| 306 int ImmBranch() const { | |
| 307 switch (BranchType()) { | |
| 308 case CondBranchType: return ImmCondBranch(); | |
| 309 case UncondBranchType: return ImmUncondBranch(); | |
| 310 case CompareBranchType: return ImmCmpBranch(); | |
| 311 case TestBranchType: return ImmTestBranch(); | |
| 312 default: UNREACHABLE(); | |
| 313 } | |
| 314 return 0; | |
| 315 } | |
| 316 | |
| 317 bool IsBranchAndLinkToRegister() const { | |
| 318 return Mask(UnconditionalBranchToRegisterMask) == BLR; | |
| 319 } | |
| 320 | |
| 321 bool IsMovz() const { | |
| 322 return (Mask(MoveWideImmediateMask) == MOVZ_x) || | |
| 323 (Mask(MoveWideImmediateMask) == MOVZ_w); | |
| 324 } | |
| 325 | |
| 326 bool IsMovk() const { | |
| 327 return (Mask(MoveWideImmediateMask) == MOVK_x) || | |
| 328 (Mask(MoveWideImmediateMask) == MOVK_w); | |
| 329 } | |
| 330 | |
| 331 bool IsMovn() const { | |
| 332 return (Mask(MoveWideImmediateMask) == MOVN_x) || | |
| 333 (Mask(MoveWideImmediateMask) == MOVN_w); | |
| 334 } | |
| 335 | |
| 336 bool IsNop(int n) { | |
| 337 // A marking nop is an instruction | |
| 338 // mov r<n>, r<n> | |
| 339 // which is encoded as | |
| 340 // orr r<n>, xzr, r<n> | |
| 341 return (Mask(LogicalShiftedMask) == ORR_x) && | |
| 342 (Rd() == Rm()) && | |
| 343 (Rd() == n); | |
| 344 } | |
| 345 | |
| 346 // Find the PC offset encoded in this instruction. 'this' may be a branch or | |
| 347 // a PC-relative addressing instruction. | |
| 348 // The offset returned is unscaled. | |
| 349 ptrdiff_t ImmPCOffset(); | |
| 350 | |
| 351 // Find the target of this instruction. 'this' may be a branch or a | |
| 352 // PC-relative addressing instruction. | |
| 353 Instruction* ImmPCOffsetTarget(); | |
| 354 | |
| 355 static bool IsValidImmPCOffset(ImmBranchType branch_type, int32_t offset); | |
| 356 bool IsTargetInImmPCOffsetRange(Instruction* target); | |
| 357 // Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or | |
| 358 // a PC-relative addressing instruction. | |
| 359 void SetImmPCOffsetTarget(Instruction* target); | |
| 360 // Patch a literal load instruction to load from 'source'. | |
| 361 void SetImmLLiteral(Instruction* source); | |
| 362 | |
| 363 uint8_t* LiteralAddress() { | |
| 364 int offset = ImmLLiteral() << kLiteralEntrySizeLog2; | |
| 365 return reinterpret_cast<uint8_t*>(this) + offset; | |
| 366 } | |
| 367 | |
| 368 uint32_t Literal32() { | |
| 369 uint32_t literal; | |
| 370 memcpy(&literal, LiteralAddress(), sizeof(literal)); | |
| 371 | |
| 372 return literal; | |
| 373 } | |
| 374 | |
| 375 uint64_t Literal64() { | |
| 376 uint64_t literal; | |
| 377 memcpy(&literal, LiteralAddress(), sizeof(literal)); | |
| 378 | |
| 379 return literal; | |
| 380 } | |
| 381 | |
| 382 float LiteralFP32() { | |
| 383 return rawbits_to_float(Literal32()); | |
| 384 } | |
| 385 | |
| 386 double LiteralFP64() { | |
| 387 return rawbits_to_double(Literal64()); | |
| 388 } | |
| 389 | |
| 390 Instruction* NextInstruction() { | |
| 391 return this + kInstructionSize; | |
| 392 } | |
| 393 | |
| 394 Instruction* InstructionAtOffset(int64_t offset) { | |
| 395 ASSERT(IsAligned(reinterpret_cast<uintptr_t>(this) + offset, | |
| 396 kInstructionSize)); | |
| 397 return this + offset; | |
| 398 } | |
| 399 | |
| 400 template<typename T> static Instruction* Cast(T src) { | |
| 401 return reinterpret_cast<Instruction*>(src); | |
| 402 } | |
| 403 | |
| 404 | |
| 405 void SetPCRelImmTarget(Instruction* target); | |
| 406 void SetBranchImmTarget(Instruction* target); | |
| 407 }; | |
| 408 | |
| 409 | |
| 410 // Where Instruction looks at instructions generated by the Assembler, | |
| 411 // InstructionSequence looks at instructions sequences generated by the | |
| 412 // MacroAssembler. | |
| 413 class InstructionSequence : public Instruction { | |
| 414 public: | |
| 415 static InstructionSequence* At(Address address) { | |
| 416 return reinterpret_cast<InstructionSequence*>(address); | |
| 417 } | |
| 418 | |
| 419 // Sequences generated by MacroAssembler::InlineData(). | |
| 420 bool IsInlineData() const; | |
| 421 uint64_t InlineData() const; | |
| 422 }; | |
| 423 | |
| 424 | |
| 425 // Simulator/Debugger debug instructions --------------------------------------- | |
| 426 // Each debug marker is represented by a HLT instruction. The immediate comment | |
| 427 // field in the instruction is used to identify the type of debug marker. Each | |
| 428 // marker encodes arguments in a different way, as described below. | |
| 429 | |
| 430 // Indicate to the Debugger that the instruction is a redirected call. | |
| 431 const Instr kImmExceptionIsRedirectedCall = 0xca11; | |
| 432 | |
| 433 // Represent unreachable code. This is used as a guard in parts of the code that | |
| 434 // should not be reachable, such as in data encoded inline in the instructions. | |
| 435 const Instr kImmExceptionIsUnreachable = 0xdebf; | |
| 436 | |
| 437 // A pseudo 'printf' instruction. The arguments will be passed to the platform | |
| 438 // printf method. | |
| 439 const Instr kImmExceptionIsPrintf = 0xdeb1; | |
| 440 // Parameters are stored in A64 registers as if the printf pseudo-instruction | |
| 441 // was a call to the real printf method: | |
| 442 // | |
| 443 // x0: The format string, then either of: | |
| 444 // x1-x7: Optional arguments. | |
| 445 // d0-d7: Optional arguments. | |
| 446 // | |
| 447 // Floating-point and integer arguments are passed in separate sets of | |
| 448 // registers in AAPCS64 (even for varargs functions), so it is not possible to | |
| 449 // determine the type of location of each arguments without some information | |
| 450 // about the values that were passed in. This information could be retrieved | |
| 451 // from the printf format string, but the format string is not trivial to | |
| 452 // parse so we encode the relevant information with the HLT instruction. | |
| 453 // - Type | |
| 454 // Either kRegister or kFPRegister, but stored as a uint32_t because there's | |
| 455 // no way to guarantee the size of the CPURegister::RegisterType enum. | |
| 456 const unsigned kPrintfTypeOffset = 1 * kInstructionSize; | |
| 457 const unsigned kPrintfLength = 2 * kInstructionSize; | |
| 458 | |
| 459 // A pseudo 'debug' instruction. | |
| 460 const Instr kImmExceptionIsDebug = 0xdeb0; | |
| 461 // Parameters are inlined in the code after a debug pseudo-instruction: | |
| 462 // - Debug code. | |
| 463 // - Debug parameters. | |
| 464 // - Debug message string. This is a NULL-terminated ASCII string, padded to | |
| 465 // kInstructionSize so that subsequent instructions are correctly aligned. | |
| 466 // - A kImmExceptionIsUnreachable marker, to catch accidental execution of the | |
| 467 // string data. | |
| 468 const unsigned kDebugCodeOffset = 1 * kInstructionSize; | |
| 469 const unsigned kDebugParamsOffset = 2 * kInstructionSize; | |
| 470 const unsigned kDebugMessageOffset = 3 * kInstructionSize; | |
| 471 | |
| 472 // Debug parameters. | |
| 473 // Used without a TRACE_ option, the Debugger will print the arguments only | |
| 474 // once. Otherwise TRACE_ENABLE and TRACE_DISABLE will enable or disable tracing | |
| 475 // before every instruction for the specified LOG_ parameters. | |
| 476 // | |
| 477 // TRACE_OVERRIDE enables the specified LOG_ parameters, and disabled any | |
| 478 // others that were not specified. | |
| 479 // | |
| 480 // For example: | |
| 481 // | |
| 482 // __ debug("print registers and fp registers", 0, LOG_REGS | LOG_FP_REGS); | |
| 483 // will print the registers and fp registers only once. | |
| 484 // | |
| 485 // __ debug("trace disasm", 1, TRACE_ENABLE | LOG_DISASM); | |
| 486 // starts disassembling the code. | |
| 487 // | |
| 488 // __ debug("trace rets", 2, TRACE_ENABLE | LOG_REGS); | |
| 489 // adds the general purpose registers to the trace. | |
| 490 // | |
| 491 // __ debug("stop regs", 3, TRACE_DISABLE | LOG_REGS); | |
| 492 // stops tracing the registers. | |
| 493 const unsigned kDebuggerTracingDirectivesMask = 3 << 6; | |
| 494 enum DebugParameters { | |
| 495 NO_PARAM = 0, | |
| 496 BREAK = 1 << 0, | |
| 497 LOG_DISASM = 1 << 1, // Use only with TRACE. Disassemble the code. | |
| 498 LOG_REGS = 1 << 2, // Log general purpose registers. | |
| 499 LOG_FP_REGS = 1 << 3, // Log floating-point registers. | |
| 500 LOG_SYS_REGS = 1 << 4, // Log the status flags. | |
| 501 LOG_WRITE = 1 << 5, // Log any memory write. | |
| 502 | |
| 503 LOG_STATE = LOG_REGS | LOG_FP_REGS | LOG_SYS_REGS, | |
| 504 LOG_ALL = LOG_DISASM | LOG_STATE | LOG_WRITE, | |
| 505 | |
| 506 // Trace control. | |
| 507 TRACE_ENABLE = 1 << 6, | |
| 508 TRACE_DISABLE = 2 << 6, | |
| 509 TRACE_OVERRIDE = 3 << 6 | |
| 510 }; | |
| 511 | |
| 512 | |
| 513 } } // namespace v8::internal | |
| 514 | |
| 515 | |
| 516 #endif // V8_A64_INSTRUCTIONS_A64_H_ | |
| OLD | NEW |