OLD | NEW |
(Empty) | |
| 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. |
| 2 // All Rights Reserved. |
| 3 // |
| 4 // Redistribution and use in source and binary forms, with or without |
| 5 // modification, are permitted provided that the following conditions |
| 6 // are met: |
| 7 // |
| 8 // - Redistributions of source code must retain the above copyright notice, |
| 9 // this list of conditions and the following disclaimer. |
| 10 // |
| 11 // - Redistribution in binary form must reproduce the above copyright |
| 12 // notice, this list of conditions and the following disclaimer in the |
| 13 // documentation and/or other materials provided with the |
| 14 // distribution. |
| 15 // |
| 16 // - Neither the name of Sun Microsystems or the names of contributors may |
| 17 // be used to endorse or promote products derived from this software without |
| 18 // specific prior written permission. |
| 19 // |
| 20 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 21 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 22 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS |
| 23 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE |
| 24 // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, |
| 25 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
| 26 // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR |
| 27 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| 28 // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, |
| 29 // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
| 30 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED |
| 31 // OF THE POSSIBILITY OF SUCH DAMAGE. |
| 32 |
| 33 // The original source code covered by the above license above has been |
| 34 // modified significantly by Google Inc. |
| 35 // Copyright 2012 the V8 project authors. All rights reserved. |
| 36 |
| 37 // A light-weight PPC Assembler |
| 38 // Generates user mode instructions for the PPC architecture up |
| 39 |
| 40 #ifndef V8_PPC_ASSEMBLER_PPC_H_ |
| 41 #define V8_PPC_ASSEMBLER_PPC_H_ |
| 42 |
| 43 #include <stdio.h> |
| 44 #include <vector> |
| 45 |
| 46 #include "src/assembler.h" |
| 47 #include "src/ppc/constants-ppc.h" |
| 48 #include "src/serialize.h" |
| 49 |
| 50 #define ABI_USES_FUNCTION_DESCRIPTORS \ |
| 51 (V8_HOST_ARCH_PPC&&(V8_OS_AIX || \ |
| 52 (V8_TARGET_ARCH_PPC64 && V8_TARGET_BIG_ENDIAN))) |
| 53 |
| 54 #define ABI_PASSES_HANDLES_IN_REGS \ |
| 55 (!V8_HOST_ARCH_PPC || V8_OS_AIX || V8_TARGET_ARCH_PPC64) |
| 56 |
| 57 #define ABI_RETURNS_HANDLES_IN_REGS \ |
| 58 (!V8_HOST_ARCH_PPC || V8_TARGET_LITTLE_ENDIAN) |
| 59 |
| 60 #define ABI_RETURNS_OBJECT_PAIRS_IN_REGS \ |
| 61 (!V8_HOST_ARCH_PPC || V8_TARGET_LITTLE_ENDIAN) |
| 62 |
| 63 #define ABI_TOC_ADDRESSABILITY_VIA_IP \ |
| 64 (V8_HOST_ARCH_PPC && V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN) |
| 65 |
| 66 #if !V8_HOST_ARCH_PPC || V8_OS_AIX || V8_TARGET_ARCH_PPC64 |
| 67 #define ABI_TOC_REGISTER kRegister_r2_Code |
| 68 #else |
| 69 #define ABI_TOC_REGISTER kRegister_r13_Code |
| 70 #endif |
| 71 |
| 72 #define INSTR_AND_DATA_CACHE_COHERENCY LWSYNC |
| 73 |
| 74 namespace v8 { |
| 75 namespace internal { |
| 76 |
| 77 // CPU Registers. |
| 78 // |
| 79 // 1) We would prefer to use an enum, but enum values are assignment- |
| 80 // compatible with int, which has caused code-generation bugs. |
| 81 // |
| 82 // 2) We would prefer to use a class instead of a struct but we don't like |
| 83 // the register initialization to depend on the particular initialization |
| 84 // order (which appears to be different on OS X, Linux, and Windows for the |
| 85 // installed versions of C++ we tried). Using a struct permits C-style |
| 86 // "initialization". Also, the Register objects cannot be const as this |
| 87 // forces initialization stubs in MSVC, making us dependent on initialization |
| 88 // order. |
| 89 // |
| 90 // 3) By not using an enum, we are possibly preventing the compiler from |
| 91 // doing certain constant folds, which may significantly reduce the |
| 92 // code generated for some assembly instructions (because they boil down |
| 93 // to a few constants). If this is a problem, we could change the code |
| 94 // such that we use an enum in optimized mode, and the struct in debug |
| 95 // mode. This way we get the compile-time error checking in debug mode |
| 96 // and best performance in optimized code. |
| 97 |
| 98 // Core register |
| 99 struct Register { |
| 100 static const int kNumRegisters = 32; |
| 101 static const int kSizeInBytes = kPointerSize; |
| 102 |
| 103 #if V8_TARGET_LITTLE_ENDIAN |
| 104 static const int kMantissaOffset = 0; |
| 105 static const int kExponentOffset = 4; |
| 106 #else |
| 107 static const int kMantissaOffset = 4; |
| 108 static const int kExponentOffset = 0; |
| 109 #endif |
| 110 |
| 111 static const int kAllocatableLowRangeBegin = 3; |
| 112 static const int kAllocatableLowRangeEnd = 10; |
| 113 static const int kAllocatableHighRangeBegin = 14; |
| 114 #if V8_OOL_CONSTANT_POOL |
| 115 static const int kAllocatableHighRangeEnd = 27; |
| 116 #else |
| 117 static const int kAllocatableHighRangeEnd = 28; |
| 118 #endif |
| 119 static const int kAllocatableContext = 30; |
| 120 |
| 121 static const int kNumAllocatableLow = |
| 122 kAllocatableLowRangeEnd - kAllocatableLowRangeBegin + 1; |
| 123 static const int kNumAllocatableHigh = |
| 124 kAllocatableHighRangeEnd - kAllocatableHighRangeBegin + 1; |
| 125 static const int kMaxNumAllocatableRegisters = |
| 126 kNumAllocatableLow + kNumAllocatableHigh + 1; // cp |
| 127 |
| 128 static int NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; } |
| 129 |
| 130 static int ToAllocationIndex(Register reg) { |
| 131 int index; |
| 132 int code = reg.code(); |
| 133 if (code == kAllocatableContext) { |
| 134 // Context is the last index |
| 135 index = NumAllocatableRegisters() - 1; |
| 136 } else if (code <= kAllocatableLowRangeEnd) { |
| 137 // low range |
| 138 index = code - kAllocatableLowRangeBegin; |
| 139 } else { |
| 140 // high range |
| 141 index = code - kAllocatableHighRangeBegin + kNumAllocatableLow; |
| 142 } |
| 143 DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters); |
| 144 return index; |
| 145 } |
| 146 |
| 147 static Register FromAllocationIndex(int index) { |
| 148 DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters); |
| 149 // Last index is always the 'cp' register. |
| 150 if (index == kMaxNumAllocatableRegisters - 1) { |
| 151 return from_code(kAllocatableContext); |
| 152 } |
| 153 return (index < kNumAllocatableLow) |
| 154 ? from_code(index + kAllocatableLowRangeBegin) |
| 155 : from_code(index - kNumAllocatableLow + |
| 156 kAllocatableHighRangeBegin); |
| 157 } |
| 158 |
| 159 static const char* AllocationIndexToString(int index) { |
| 160 DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters); |
| 161 const char* const names[] = { |
| 162 "r3", |
| 163 "r4", |
| 164 "r5", |
| 165 "r6", |
| 166 "r7", |
| 167 "r8", |
| 168 "r9", |
| 169 "r10", |
| 170 "r14", |
| 171 "r15", |
| 172 "r16", |
| 173 "r17", |
| 174 "r18", |
| 175 "r19", |
| 176 "r20", |
| 177 "r21", |
| 178 "r22", |
| 179 "r23", |
| 180 "r24", |
| 181 "r25", |
| 182 "r26", |
| 183 "r27", |
| 184 #if !V8_OOL_CONSTANT_POOL |
| 185 "r28", |
| 186 #endif |
| 187 "cp", |
| 188 }; |
| 189 return names[index]; |
| 190 } |
| 191 |
| 192 static Register from_code(int code) { |
| 193 Register r = {code}; |
| 194 return r; |
| 195 } |
| 196 |
| 197 bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; } |
| 198 bool is(Register reg) const { return code_ == reg.code_; } |
| 199 int code() const { |
| 200 DCHECK(is_valid()); |
| 201 return code_; |
| 202 } |
| 203 int bit() const { |
| 204 DCHECK(is_valid()); |
| 205 return 1 << code_; |
| 206 } |
| 207 |
| 208 void set_code(int code) { |
| 209 code_ = code; |
| 210 DCHECK(is_valid()); |
| 211 } |
| 212 |
| 213 // Unfortunately we can't make this private in a struct. |
| 214 int code_; |
| 215 }; |
| 216 |
| 217 // These constants are used in several locations, including static initializers |
| 218 const int kRegister_no_reg_Code = -1; |
| 219 const int kRegister_r0_Code = 0; // general scratch |
| 220 const int kRegister_sp_Code = 1; // stack pointer |
| 221 const int kRegister_r2_Code = 2; // special on PowerPC |
| 222 const int kRegister_r3_Code = 3; |
| 223 const int kRegister_r4_Code = 4; |
| 224 const int kRegister_r5_Code = 5; |
| 225 const int kRegister_r6_Code = 6; |
| 226 const int kRegister_r7_Code = 7; |
| 227 const int kRegister_r8_Code = 8; |
| 228 const int kRegister_r9_Code = 9; |
| 229 const int kRegister_r10_Code = 10; |
| 230 const int kRegister_r11_Code = 11; // lithium scratch |
| 231 const int kRegister_ip_Code = 12; // ip (general scratch) |
| 232 const int kRegister_r13_Code = 13; // special on PowerPC |
| 233 const int kRegister_r14_Code = 14; |
| 234 const int kRegister_r15_Code = 15; |
| 235 |
| 236 const int kRegister_r16_Code = 16; |
| 237 const int kRegister_r17_Code = 17; |
| 238 const int kRegister_r18_Code = 18; |
| 239 const int kRegister_r19_Code = 19; |
| 240 const int kRegister_r20_Code = 20; |
| 241 const int kRegister_r21_Code = 21; |
| 242 const int kRegister_r22_Code = 22; |
| 243 const int kRegister_r23_Code = 23; |
| 244 const int kRegister_r24_Code = 24; |
| 245 const int kRegister_r25_Code = 25; |
| 246 const int kRegister_r26_Code = 26; |
| 247 const int kRegister_r27_Code = 27; |
| 248 const int kRegister_r28_Code = 28; // constant pool pointer |
| 249 const int kRegister_r29_Code = 29; // roots array pointer |
| 250 const int kRegister_r30_Code = 30; // context pointer |
| 251 const int kRegister_fp_Code = 31; // frame pointer |
| 252 |
| 253 const Register no_reg = {kRegister_no_reg_Code}; |
| 254 |
| 255 const Register r0 = {kRegister_r0_Code}; |
| 256 const Register sp = {kRegister_sp_Code}; |
| 257 const Register r2 = {kRegister_r2_Code}; |
| 258 const Register r3 = {kRegister_r3_Code}; |
| 259 const Register r4 = {kRegister_r4_Code}; |
| 260 const Register r5 = {kRegister_r5_Code}; |
| 261 const Register r6 = {kRegister_r6_Code}; |
| 262 const Register r7 = {kRegister_r7_Code}; |
| 263 const Register r8 = {kRegister_r8_Code}; |
| 264 const Register r9 = {kRegister_r9_Code}; |
| 265 const Register r10 = {kRegister_r10_Code}; |
| 266 const Register r11 = {kRegister_r11_Code}; |
| 267 const Register ip = {kRegister_ip_Code}; |
| 268 const Register r13 = {kRegister_r13_Code}; |
| 269 const Register r14 = {kRegister_r14_Code}; |
| 270 const Register r15 = {kRegister_r15_Code}; |
| 271 |
| 272 const Register r16 = {kRegister_r16_Code}; |
| 273 const Register r17 = {kRegister_r17_Code}; |
| 274 const Register r18 = {kRegister_r18_Code}; |
| 275 const Register r19 = {kRegister_r19_Code}; |
| 276 const Register r20 = {kRegister_r20_Code}; |
| 277 const Register r21 = {kRegister_r21_Code}; |
| 278 const Register r22 = {kRegister_r22_Code}; |
| 279 const Register r23 = {kRegister_r23_Code}; |
| 280 const Register r24 = {kRegister_r24_Code}; |
| 281 const Register r25 = {kRegister_r25_Code}; |
| 282 const Register r26 = {kRegister_r26_Code}; |
| 283 const Register r27 = {kRegister_r27_Code}; |
| 284 const Register r28 = {kRegister_r28_Code}; |
| 285 const Register r29 = {kRegister_r29_Code}; |
| 286 const Register r30 = {kRegister_r30_Code}; |
| 287 const Register fp = {kRegister_fp_Code}; |
| 288 |
| 289 // Give alias names to registers |
| 290 const Register cp = {kRegister_r30_Code}; // JavaScript context pointer |
| 291 const Register kRootRegister = {kRegister_r29_Code}; // Roots array pointer. |
| 292 #if V8_OOL_CONSTANT_POOL |
| 293 const Register kConstantPoolRegister = {kRegister_r28_Code}; // Constant pool |
| 294 #endif |
| 295 |
| 296 // Double word FP register. |
| 297 struct DoubleRegister { |
| 298 static const int kNumRegisters = 32; |
| 299 static const int kMaxNumRegisters = kNumRegisters; |
| 300 static const int kNumVolatileRegisters = 14; // d0-d13 |
| 301 static const int kSizeInBytes = 8; |
| 302 |
| 303 static const int kAllocatableLowRangeBegin = 1; |
| 304 static const int kAllocatableLowRangeEnd = 12; |
| 305 static const int kAllocatableHighRangeBegin = 15; |
| 306 static const int kAllocatableHighRangeEnd = 31; |
| 307 |
| 308 static const int kNumAllocatableLow = |
| 309 kAllocatableLowRangeEnd - kAllocatableLowRangeBegin + 1; |
| 310 static const int kNumAllocatableHigh = |
| 311 kAllocatableHighRangeEnd - kAllocatableHighRangeBegin + 1; |
| 312 static const int kMaxNumAllocatableRegisters = |
| 313 kNumAllocatableLow + kNumAllocatableHigh; |
| 314 static int NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; } |
| 315 |
| 316 // TODO(turbofan) |
| 317 inline static int NumAllocatableAliasedRegisters() { |
| 318 return NumAllocatableRegisters(); |
| 319 } |
| 320 |
| 321 static int ToAllocationIndex(DoubleRegister reg) { |
| 322 int code = reg.code(); |
| 323 int index = (code <= kAllocatableLowRangeEnd) |
| 324 ? code - kAllocatableLowRangeBegin |
| 325 : code - kAllocatableHighRangeBegin + kNumAllocatableLow; |
| 326 DCHECK(index < kMaxNumAllocatableRegisters); |
| 327 return index; |
| 328 } |
| 329 |
| 330 static DoubleRegister FromAllocationIndex(int index) { |
| 331 DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters); |
| 332 return (index < kNumAllocatableLow) |
| 333 ? from_code(index + kAllocatableLowRangeBegin) |
| 334 : from_code(index - kNumAllocatableLow + |
| 335 kAllocatableHighRangeBegin); |
| 336 } |
| 337 |
| 338 static const char* AllocationIndexToString(int index); |
| 339 |
| 340 static DoubleRegister from_code(int code) { |
| 341 DoubleRegister r = {code}; |
| 342 return r; |
| 343 } |
| 344 |
| 345 bool is_valid() const { return 0 <= code_ && code_ < kMaxNumRegisters; } |
| 346 bool is(DoubleRegister reg) const { return code_ == reg.code_; } |
| 347 |
| 348 int code() const { |
| 349 DCHECK(is_valid()); |
| 350 return code_; |
| 351 } |
| 352 int bit() const { |
| 353 DCHECK(is_valid()); |
| 354 return 1 << code_; |
| 355 } |
| 356 void split_code(int* vm, int* m) const { |
| 357 DCHECK(is_valid()); |
| 358 *m = (code_ & 0x10) >> 4; |
| 359 *vm = code_ & 0x0F; |
| 360 } |
| 361 |
| 362 int code_; |
| 363 }; |
| 364 |
| 365 |
| 366 const DoubleRegister no_dreg = {-1}; |
| 367 const DoubleRegister d0 = {0}; |
| 368 const DoubleRegister d1 = {1}; |
| 369 const DoubleRegister d2 = {2}; |
| 370 const DoubleRegister d3 = {3}; |
| 371 const DoubleRegister d4 = {4}; |
| 372 const DoubleRegister d5 = {5}; |
| 373 const DoubleRegister d6 = {6}; |
| 374 const DoubleRegister d7 = {7}; |
| 375 const DoubleRegister d8 = {8}; |
| 376 const DoubleRegister d9 = {9}; |
| 377 const DoubleRegister d10 = {10}; |
| 378 const DoubleRegister d11 = {11}; |
| 379 const DoubleRegister d12 = {12}; |
| 380 const DoubleRegister d13 = {13}; |
| 381 const DoubleRegister d14 = {14}; |
| 382 const DoubleRegister d15 = {15}; |
| 383 const DoubleRegister d16 = {16}; |
| 384 const DoubleRegister d17 = {17}; |
| 385 const DoubleRegister d18 = {18}; |
| 386 const DoubleRegister d19 = {19}; |
| 387 const DoubleRegister d20 = {20}; |
| 388 const DoubleRegister d21 = {21}; |
| 389 const DoubleRegister d22 = {22}; |
| 390 const DoubleRegister d23 = {23}; |
| 391 const DoubleRegister d24 = {24}; |
| 392 const DoubleRegister d25 = {25}; |
| 393 const DoubleRegister d26 = {26}; |
| 394 const DoubleRegister d27 = {27}; |
| 395 const DoubleRegister d28 = {28}; |
| 396 const DoubleRegister d29 = {29}; |
| 397 const DoubleRegister d30 = {30}; |
| 398 const DoubleRegister d31 = {31}; |
| 399 |
| 400 // Aliases for double registers. Defined using #define instead of |
| 401 // "static const DoubleRegister&" because Clang complains otherwise when a |
| 402 // compilation unit that includes this header doesn't use the variables. |
| 403 #define kFirstCalleeSavedDoubleReg d14 |
| 404 #define kLastCalleeSavedDoubleReg d31 |
| 405 #define kDoubleRegZero d14 |
| 406 #define kScratchDoubleReg d13 |
| 407 |
| 408 Register ToRegister(int num); |
| 409 |
| 410 // Coprocessor register |
| 411 struct CRegister { |
| 412 bool is_valid() const { return 0 <= code_ && code_ < 16; } |
| 413 bool is(CRegister creg) const { return code_ == creg.code_; } |
| 414 int code() const { |
| 415 DCHECK(is_valid()); |
| 416 return code_; |
| 417 } |
| 418 int bit() const { |
| 419 DCHECK(is_valid()); |
| 420 return 1 << code_; |
| 421 } |
| 422 |
| 423 // Unfortunately we can't make this private in a struct. |
| 424 int code_; |
| 425 }; |
| 426 |
| 427 |
| 428 const CRegister no_creg = {-1}; |
| 429 |
| 430 const CRegister cr0 = {0}; |
| 431 const CRegister cr1 = {1}; |
| 432 const CRegister cr2 = {2}; |
| 433 const CRegister cr3 = {3}; |
| 434 const CRegister cr4 = {4}; |
| 435 const CRegister cr5 = {5}; |
| 436 const CRegister cr6 = {6}; |
| 437 const CRegister cr7 = {7}; |
| 438 const CRegister cr8 = {8}; |
| 439 const CRegister cr9 = {9}; |
| 440 const CRegister cr10 = {10}; |
| 441 const CRegister cr11 = {11}; |
| 442 const CRegister cr12 = {12}; |
| 443 const CRegister cr13 = {13}; |
| 444 const CRegister cr14 = {14}; |
| 445 const CRegister cr15 = {15}; |
| 446 |
| 447 // ----------------------------------------------------------------------------- |
| 448 // Machine instruction Operands |
| 449 |
| 450 #if V8_TARGET_ARCH_PPC64 |
| 451 const RelocInfo::Mode kRelocInfo_NONEPTR = RelocInfo::NONE64; |
| 452 #else |
| 453 const RelocInfo::Mode kRelocInfo_NONEPTR = RelocInfo::NONE32; |
| 454 #endif |
| 455 |
| 456 // Class Operand represents a shifter operand in data processing instructions |
| 457 class Operand BASE_EMBEDDED { |
| 458 public: |
| 459 // immediate |
| 460 INLINE(explicit Operand(intptr_t immediate, |
| 461 RelocInfo::Mode rmode = kRelocInfo_NONEPTR)); |
| 462 INLINE(static Operand Zero()) { return Operand(static_cast<intptr_t>(0)); } |
| 463 INLINE(explicit Operand(const ExternalReference& f)); |
| 464 explicit Operand(Handle<Object> handle); |
| 465 INLINE(explicit Operand(Smi* value)); |
| 466 |
| 467 // rm |
| 468 INLINE(explicit Operand(Register rm)); |
| 469 |
| 470 // Return true if this is a register operand. |
| 471 INLINE(bool is_reg() const); |
| 472 |
| 473 // For mov. Return the number of actual instructions required to |
| 474 // load the operand into a register. This can be anywhere from |
| 475 // one (constant pool small section) to five instructions (full |
| 476 // 64-bit sequence). |
| 477 // |
| 478 // The value returned is only valid as long as no entries are added to the |
| 479 // constant pool between this call and the actual instruction being emitted. |
| 480 bool must_output_reloc_info(const Assembler* assembler) const; |
| 481 |
| 482 inline intptr_t immediate() const { |
| 483 DCHECK(!rm_.is_valid()); |
| 484 return imm_; |
| 485 } |
| 486 |
| 487 Register rm() const { return rm_; } |
| 488 |
| 489 private: |
| 490 Register rm_; |
| 491 intptr_t imm_; // valid if rm_ == no_reg |
| 492 RelocInfo::Mode rmode_; |
| 493 |
| 494 friend class Assembler; |
| 495 friend class MacroAssembler; |
| 496 }; |
| 497 |
| 498 |
| 499 // Class MemOperand represents a memory operand in load and store instructions |
| 500 // On PowerPC we have base register + 16bit signed value |
| 501 // Alternatively we can have a 16bit signed value immediate |
| 502 class MemOperand BASE_EMBEDDED { |
| 503 public: |
| 504 explicit MemOperand(Register rn, int32_t offset = 0); |
| 505 |
| 506 explicit MemOperand(Register ra, Register rb); |
| 507 |
| 508 int32_t offset() const { |
| 509 DCHECK(rb_.is(no_reg)); |
| 510 return offset_; |
| 511 } |
| 512 |
| 513 // PowerPC - base register |
| 514 Register ra() const { |
| 515 DCHECK(!ra_.is(no_reg)); |
| 516 return ra_; |
| 517 } |
| 518 |
| 519 Register rb() const { |
| 520 DCHECK(offset_ == 0 && !rb_.is(no_reg)); |
| 521 return rb_; |
| 522 } |
| 523 |
| 524 private: |
| 525 Register ra_; // base |
| 526 int32_t offset_; // offset |
| 527 Register rb_; // index |
| 528 |
| 529 friend class Assembler; |
| 530 }; |
| 531 |
| 532 |
| 533 #if V8_OOL_CONSTANT_POOL |
| 534 // Class used to build a constant pool. |
| 535 class ConstantPoolBuilder BASE_EMBEDDED { |
| 536 public: |
| 537 ConstantPoolBuilder(); |
| 538 ConstantPoolArray::LayoutSection AddEntry(Assembler* assm, |
| 539 const RelocInfo& rinfo); |
| 540 void Relocate(intptr_t pc_delta); |
| 541 bool IsEmpty(); |
| 542 Handle<ConstantPoolArray> New(Isolate* isolate); |
| 543 void Populate(Assembler* assm, ConstantPoolArray* constant_pool); |
| 544 |
| 545 inline ConstantPoolArray::LayoutSection current_section() const { |
| 546 return current_section_; |
| 547 } |
| 548 |
| 549 // Rather than increasing the capacity of the ConstantPoolArray's |
| 550 // small section to match the longer (16-bit) reach of PPC's load |
| 551 // instruction (at the expense of a larger header to describe the |
| 552 // layout), the PPC implementation utilizes the extended section to |
| 553 // satisfy that reach. I.e. all entries (regardless of their |
| 554 // section) are reachable with a single load instruction. |
| 555 // |
| 556 // This implementation does not support an unlimited constant pool |
| 557 // size (which would require a multi-instruction sequence). [See |
| 558 // ARM commit e27ab337 for a reference on the changes required to |
| 559 // support the longer instruction sequence.] Note, however, that |
| 560 // going down that path will necessarily generate that longer |
| 561 // sequence for all extended section accesses since the placement of |
| 562 // a given entry within the section is not known at the time of |
| 563 // code generation. |
| 564 // |
| 565 // TODO(mbrandy): Determine whether there is a benefit to supporting |
| 566 // the longer sequence given that nops could be used for those |
| 567 // entries which are reachable with a single instruction. |
| 568 inline bool is_full() const { return !is_int16(size_); } |
| 569 |
| 570 inline ConstantPoolArray::NumberOfEntries* number_of_entries( |
| 571 ConstantPoolArray::LayoutSection section) { |
| 572 return &number_of_entries_[section]; |
| 573 } |
| 574 |
| 575 inline ConstantPoolArray::NumberOfEntries* small_entries() { |
| 576 return number_of_entries(ConstantPoolArray::SMALL_SECTION); |
| 577 } |
| 578 |
| 579 inline ConstantPoolArray::NumberOfEntries* extended_entries() { |
| 580 return number_of_entries(ConstantPoolArray::EXTENDED_SECTION); |
| 581 } |
| 582 |
| 583 private: |
| 584 struct ConstantPoolEntry { |
| 585 ConstantPoolEntry(RelocInfo rinfo, ConstantPoolArray::LayoutSection section, |
| 586 int merged_index) |
| 587 : rinfo_(rinfo), section_(section), merged_index_(merged_index) {} |
| 588 |
| 589 RelocInfo rinfo_; |
| 590 ConstantPoolArray::LayoutSection section_; |
| 591 int merged_index_; |
| 592 }; |
| 593 |
| 594 ConstantPoolArray::Type GetConstantPoolType(RelocInfo::Mode rmode); |
| 595 |
| 596 uint32_t size_; |
| 597 std::vector<ConstantPoolEntry> entries_; |
| 598 ConstantPoolArray::LayoutSection current_section_; |
| 599 ConstantPoolArray::NumberOfEntries number_of_entries_[2]; |
| 600 }; |
| 601 #endif |
| 602 |
| 603 |
| 604 class Assembler : public AssemblerBase { |
| 605 public: |
| 606 // Create an assembler. Instructions and relocation information are emitted |
| 607 // into a buffer, with the instructions starting from the beginning and the |
| 608 // relocation information starting from the end of the buffer. See CodeDesc |
| 609 // for a detailed comment on the layout (globals.h). |
| 610 // |
| 611 // If the provided buffer is NULL, the assembler allocates and grows its own |
| 612 // buffer, and buffer_size determines the initial buffer size. The buffer is |
| 613 // owned by the assembler and deallocated upon destruction of the assembler. |
| 614 // |
| 615 // If the provided buffer is not NULL, the assembler uses the provided buffer |
| 616 // for code generation and assumes its size to be buffer_size. If the buffer |
| 617 // is too small, a fatal error occurs. No deallocation of the buffer is done |
| 618 // upon destruction of the assembler. |
| 619 Assembler(Isolate* isolate, void* buffer, int buffer_size); |
| 620 virtual ~Assembler() {} |
| 621 |
| 622 // GetCode emits any pending (non-emitted) code and fills the descriptor |
| 623 // desc. GetCode() is idempotent; it returns the same result if no other |
| 624 // Assembler functions are invoked in between GetCode() calls. |
| 625 void GetCode(CodeDesc* desc); |
| 626 |
| 627 // Label operations & relative jumps (PPUM Appendix D) |
| 628 // |
| 629 // Takes a branch opcode (cc) and a label (L) and generates |
| 630 // either a backward branch or a forward branch and links it |
| 631 // to the label fixup chain. Usage: |
| 632 // |
| 633 // Label L; // unbound label |
| 634 // j(cc, &L); // forward branch to unbound label |
| 635 // bind(&L); // bind label to the current pc |
| 636 // j(cc, &L); // backward branch to bound label |
| 637 // bind(&L); // illegal: a label may be bound only once |
| 638 // |
| 639 // Note: The same Label can be used for forward and backward branches |
| 640 // but it may be bound only once. |
| 641 |
| 642 void bind(Label* L); // binds an unbound label L to the current code position |
| 643 // Determines if Label is bound and near enough so that a single |
| 644 // branch instruction can be used to reach it. |
| 645 bool is_near(Label* L, Condition cond); |
| 646 |
| 647 // Returns the branch offset to the given label from the current code position |
| 648 // Links the label to the current position if it is still unbound |
| 649 // Manages the jump elimination optimization if the second parameter is true. |
| 650 int branch_offset(Label* L, bool jump_elimination_allowed); |
| 651 |
| 652 // Puts a labels target address at the given position. |
| 653 // The high 8 bits are set to zero. |
| 654 void label_at_put(Label* L, int at_offset); |
| 655 |
| 656 #if V8_OOL_CONSTANT_POOL |
| 657 INLINE(static bool IsConstantPoolLoadStart(Address pc)); |
| 658 INLINE(static bool IsConstantPoolLoadEnd(Address pc)); |
| 659 INLINE(static int GetConstantPoolOffset(Address pc)); |
| 660 INLINE(static void SetConstantPoolOffset(Address pc, int offset)); |
| 661 |
| 662 // Return the address in the constant pool of the code target address used by |
| 663 // the branch/call instruction at pc, or the object in a mov. |
| 664 INLINE(static Address target_constant_pool_address_at( |
| 665 Address pc, ConstantPoolArray* constant_pool)); |
| 666 #endif |
| 667 |
| 668 // Read/Modify the code target address in the branch/call instruction at pc. |
| 669 INLINE(static Address target_address_at(Address pc, |
| 670 ConstantPoolArray* constant_pool)); |
| 671 INLINE(static void set_target_address_at( |
| 672 Address pc, ConstantPoolArray* constant_pool, Address target, |
| 673 ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)); |
| 674 INLINE(static Address target_address_at(Address pc, Code* code)) { |
| 675 ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL; |
| 676 return target_address_at(pc, constant_pool); |
| 677 } |
| 678 INLINE(static void set_target_address_at( |
| 679 Address pc, Code* code, Address target, |
| 680 ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) { |
| 681 ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL; |
| 682 set_target_address_at(pc, constant_pool, target, icache_flush_mode); |
| 683 } |
| 684 |
| 685 // Return the code target address at a call site from the return address |
| 686 // of that call in the instruction stream. |
| 687 inline static Address target_address_from_return_address(Address pc); |
| 688 |
| 689 // Given the address of the beginning of a call, return the address |
| 690 // in the instruction stream that the call will return to. |
| 691 INLINE(static Address return_address_from_call_start(Address pc)); |
| 692 |
| 693 // Return the code target address of the patch debug break slot |
| 694 INLINE(static Address break_address_from_return_address(Address pc)); |
| 695 |
| 696 // This sets the branch destination. |
| 697 // This is for calls and branches within generated code. |
| 698 inline static void deserialization_set_special_target_at( |
| 699 Address instruction_payload, Code* code, Address target); |
| 700 |
| 701 // Size of an instruction. |
| 702 static const int kInstrSize = sizeof(Instr); |
| 703 |
| 704 // Here we are patching the address in the LUI/ORI instruction pair. |
| 705 // These values are used in the serialization process and must be zero for |
| 706 // PPC platform, as Code, Embedded Object or External-reference pointers |
| 707 // are split across two consecutive instructions and don't exist separately |
| 708 // in the code, so the serializer should not step forwards in memory after |
| 709 // a target is resolved and written. |
| 710 static const int kSpecialTargetSize = 0; |
| 711 |
| 712 // Number of instructions to load an address via a mov sequence. |
| 713 #if V8_TARGET_ARCH_PPC64 |
| 714 static const int kMovInstructionsConstantPool = 2; |
| 715 static const int kMovInstructionsNoConstantPool = 5; |
| 716 #else |
| 717 static const int kMovInstructionsConstantPool = 1; |
| 718 static const int kMovInstructionsNoConstantPool = 2; |
| 719 #endif |
| 720 #if V8_OOL_CONSTANT_POOL |
| 721 static const int kMovInstructions = kMovInstructionsConstantPool; |
| 722 #else |
| 723 static const int kMovInstructions = kMovInstructionsNoConstantPool; |
| 724 #endif |
| 725 |
| 726 // Distance between the instruction referring to the address of the call |
| 727 // target and the return address. |
| 728 |
| 729 // Call sequence is a FIXED_SEQUENCE: |
| 730 // mov r8, @ call address |
| 731 // mtlr r8 |
| 732 // blrl |
| 733 // @ return address |
| 734 static const int kCallTargetAddressOffset = |
| 735 (kMovInstructions + 2) * kInstrSize; |
| 736 |
| 737 // Distance between start of patched return sequence and the emitted address |
| 738 // to jump to. |
| 739 // Patched return sequence is a FIXED_SEQUENCE: |
| 740 // mov r0, <address> |
| 741 // mtlr r0 |
| 742 // blrl |
| 743 static const int kPatchReturnSequenceAddressOffset = 0 * kInstrSize; |
| 744 |
| 745 // Distance between start of patched debug break slot and the emitted address |
| 746 // to jump to. |
| 747 // Patched debug break slot code is a FIXED_SEQUENCE: |
| 748 // mov r0, <address> |
| 749 // mtlr r0 |
| 750 // blrl |
| 751 static const int kPatchDebugBreakSlotAddressOffset = 0 * kInstrSize; |
| 752 |
| 753 // This is the length of the BreakLocationIterator::SetDebugBreakAtReturn() |
| 754 // code patch FIXED_SEQUENCE |
| 755 static const int kJSReturnSequenceInstructions = |
| 756 kMovInstructionsNoConstantPool + 3; |
| 757 |
| 758 // This is the length of the code sequence from SetDebugBreakAtSlot() |
| 759 // FIXED_SEQUENCE |
| 760 static const int kDebugBreakSlotInstructions = |
| 761 kMovInstructionsNoConstantPool + 2; |
| 762 static const int kDebugBreakSlotLength = |
| 763 kDebugBreakSlotInstructions * kInstrSize; |
| 764 |
| 765 static inline int encode_crbit(const CRegister& cr, enum CRBit crbit) { |
| 766 return ((cr.code() * CRWIDTH) + crbit); |
| 767 } |
| 768 |
| 769 // --------------------------------------------------------------------------- |
| 770 // Code generation |
| 771 |
| 772 // Insert the smallest number of nop instructions |
| 773 // possible to align the pc offset to a multiple |
| 774 // of m. m must be a power of 2 (>= 4). |
| 775 void Align(int m); |
| 776 // Aligns code to something that's optimal for a jump target for the platform. |
| 777 void CodeTargetAlign(); |
| 778 |
| 779 // Branch instructions |
| 780 void bclr(BOfield bo, LKBit lk); |
| 781 void blr(); |
| 782 void bc(int branch_offset, BOfield bo, int condition_bit, LKBit lk = LeaveLK); |
| 783 void b(int branch_offset, LKBit lk); |
| 784 |
| 785 void bcctr(BOfield bo, LKBit lk); |
| 786 void bctr(); |
| 787 void bctrl(); |
| 788 |
| 789 // Convenience branch instructions using labels |
| 790 void b(Label* L, LKBit lk = LeaveLK) { b(branch_offset(L, false), lk); } |
| 791 |
| 792 void bc_short(Condition cond, Label* L, CRegister cr = cr7, |
| 793 LKBit lk = LeaveLK) { |
| 794 DCHECK(cond != al); |
| 795 DCHECK(cr.code() >= 0 && cr.code() <= 7); |
| 796 |
| 797 int b_offset = branch_offset(L, false); |
| 798 |
| 799 switch (cond) { |
| 800 case eq: |
| 801 bc(b_offset, BT, encode_crbit(cr, CR_EQ), lk); |
| 802 break; |
| 803 case ne: |
| 804 bc(b_offset, BF, encode_crbit(cr, CR_EQ), lk); |
| 805 break; |
| 806 case gt: |
| 807 bc(b_offset, BT, encode_crbit(cr, CR_GT), lk); |
| 808 break; |
| 809 case le: |
| 810 bc(b_offset, BF, encode_crbit(cr, CR_GT), lk); |
| 811 break; |
| 812 case lt: |
| 813 bc(b_offset, BT, encode_crbit(cr, CR_LT), lk); |
| 814 break; |
| 815 case ge: |
| 816 bc(b_offset, BF, encode_crbit(cr, CR_LT), lk); |
| 817 break; |
| 818 case unordered: |
| 819 bc(b_offset, BT, encode_crbit(cr, CR_FU), lk); |
| 820 break; |
| 821 case ordered: |
| 822 bc(b_offset, BF, encode_crbit(cr, CR_FU), lk); |
| 823 break; |
| 824 case overflow: |
| 825 bc(b_offset, BT, encode_crbit(cr, CR_SO), lk); |
| 826 break; |
| 827 case nooverflow: |
| 828 bc(b_offset, BF, encode_crbit(cr, CR_SO), lk); |
| 829 break; |
| 830 default: |
| 831 UNIMPLEMENTED(); |
| 832 } |
| 833 } |
| 834 |
| 835 void b(Condition cond, Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) { |
| 836 if (cond == al) { |
| 837 b(L, lk); |
| 838 return; |
| 839 } |
| 840 |
| 841 if ((L->is_bound() && is_near(L, cond)) || !is_trampoline_emitted()) { |
| 842 bc_short(cond, L, cr, lk); |
| 843 return; |
| 844 } |
| 845 |
| 846 Label skip; |
| 847 Condition neg_cond = NegateCondition(cond); |
| 848 bc_short(neg_cond, &skip, cr); |
| 849 b(L, lk); |
| 850 bind(&skip); |
| 851 } |
| 852 |
| 853 void bne(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) { |
| 854 b(ne, L, cr, lk); |
| 855 } |
| 856 void beq(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) { |
| 857 b(eq, L, cr, lk); |
| 858 } |
| 859 void blt(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) { |
| 860 b(lt, L, cr, lk); |
| 861 } |
| 862 void bge(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) { |
| 863 b(ge, L, cr, lk); |
| 864 } |
| 865 void ble(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) { |
| 866 b(le, L, cr, lk); |
| 867 } |
| 868 void bgt(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) { |
| 869 b(gt, L, cr, lk); |
| 870 } |
| 871 void bunordered(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) { |
| 872 b(unordered, L, cr, lk); |
| 873 } |
| 874 void bordered(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) { |
| 875 b(ordered, L, cr, lk); |
| 876 } |
| 877 void boverflow(Label* L, CRegister cr = cr0, LKBit lk = LeaveLK) { |
| 878 b(overflow, L, cr, lk); |
| 879 } |
| 880 void bnooverflow(Label* L, CRegister cr = cr0, LKBit lk = LeaveLK) { |
| 881 b(nooverflow, L, cr, lk); |
| 882 } |
| 883 |
| 884 // Decrement CTR; branch if CTR != 0 |
| 885 void bdnz(Label* L, LKBit lk = LeaveLK) { |
| 886 bc(branch_offset(L, false), DCBNZ, 0, lk); |
| 887 } |
| 888 |
| 889 // Data-processing instructions |
| 890 |
| 891 void sub(Register dst, Register src1, Register src2, OEBit s = LeaveOE, |
| 892 RCBit r = LeaveRC); |
| 893 |
| 894 void subfic(Register dst, Register src, const Operand& imm); |
| 895 |
| 896 void subfc(Register dst, Register src1, Register src2, OEBit s = LeaveOE, |
| 897 RCBit r = LeaveRC); |
| 898 |
| 899 void add(Register dst, Register src1, Register src2, OEBit s = LeaveOE, |
| 900 RCBit r = LeaveRC); |
| 901 |
| 902 void addc(Register dst, Register src1, Register src2, OEBit o = LeaveOE, |
| 903 RCBit r = LeaveRC); |
| 904 |
| 905 void addze(Register dst, Register src1, OEBit o, RCBit r); |
| 906 |
| 907 void mullw(Register dst, Register src1, Register src2, OEBit o = LeaveOE, |
| 908 RCBit r = LeaveRC); |
| 909 |
| 910 void mulhw(Register dst, Register src1, Register src2, OEBit o = LeaveOE, |
| 911 RCBit r = LeaveRC); |
| 912 |
| 913 void divw(Register dst, Register src1, Register src2, OEBit o = LeaveOE, |
| 914 RCBit r = LeaveRC); |
| 915 |
| 916 void addi(Register dst, Register src, const Operand& imm); |
| 917 void addis(Register dst, Register src, const Operand& imm); |
| 918 void addic(Register dst, Register src, const Operand& imm); |
| 919 |
| 920 void and_(Register dst, Register src1, Register src2, RCBit rc = LeaveRC); |
| 921 void andc(Register dst, Register src1, Register src2, RCBit rc = LeaveRC); |
| 922 void andi(Register ra, Register rs, const Operand& imm); |
| 923 void andis(Register ra, Register rs, const Operand& imm); |
| 924 void nor(Register dst, Register src1, Register src2, RCBit r = LeaveRC); |
| 925 void notx(Register dst, Register src, RCBit r = LeaveRC); |
| 926 void ori(Register dst, Register src, const Operand& imm); |
| 927 void oris(Register dst, Register src, const Operand& imm); |
| 928 void orx(Register dst, Register src1, Register src2, RCBit rc = LeaveRC); |
| 929 void xori(Register dst, Register src, const Operand& imm); |
| 930 void xoris(Register ra, Register rs, const Operand& imm); |
| 931 void xor_(Register dst, Register src1, Register src2, RCBit rc = LeaveRC); |
| 932 void cmpi(Register src1, const Operand& src2, CRegister cr = cr7); |
| 933 void cmpli(Register src1, const Operand& src2, CRegister cr = cr7); |
| 934 void cmpwi(Register src1, const Operand& src2, CRegister cr = cr7); |
| 935 void cmplwi(Register src1, const Operand& src2, CRegister cr = cr7); |
| 936 void li(Register dst, const Operand& src); |
| 937 void lis(Register dst, const Operand& imm); |
| 938 void mr(Register dst, Register src); |
| 939 |
| 940 void lbz(Register dst, const MemOperand& src); |
| 941 void lbzx(Register dst, const MemOperand& src); |
| 942 void lbzux(Register dst, const MemOperand& src); |
| 943 void lhz(Register dst, const MemOperand& src); |
| 944 void lhzx(Register dst, const MemOperand& src); |
| 945 void lhzux(Register dst, const MemOperand& src); |
| 946 void lwz(Register dst, const MemOperand& src); |
| 947 void lwzu(Register dst, const MemOperand& src); |
| 948 void lwzx(Register dst, const MemOperand& src); |
| 949 void lwzux(Register dst, const MemOperand& src); |
| 950 void lwa(Register dst, const MemOperand& src); |
| 951 void stb(Register dst, const MemOperand& src); |
| 952 void stbx(Register dst, const MemOperand& src); |
| 953 void stbux(Register dst, const MemOperand& src); |
| 954 void sth(Register dst, const MemOperand& src); |
| 955 void sthx(Register dst, const MemOperand& src); |
| 956 void sthux(Register dst, const MemOperand& src); |
| 957 void stw(Register dst, const MemOperand& src); |
| 958 void stwu(Register dst, const MemOperand& src); |
| 959 void stwx(Register rs, const MemOperand& src); |
| 960 void stwux(Register rs, const MemOperand& src); |
| 961 |
| 962 void extsb(Register rs, Register ra, RCBit r = LeaveRC); |
| 963 void extsh(Register rs, Register ra, RCBit r = LeaveRC); |
| 964 |
| 965 void neg(Register rt, Register ra, OEBit o = LeaveOE, RCBit c = LeaveRC); |
| 966 |
| 967 #if V8_TARGET_ARCH_PPC64 |
| 968 void ld(Register rd, const MemOperand& src); |
| 969 void ldx(Register rd, const MemOperand& src); |
| 970 void ldu(Register rd, const MemOperand& src); |
| 971 void ldux(Register rd, const MemOperand& src); |
| 972 void std(Register rs, const MemOperand& src); |
| 973 void stdx(Register rs, const MemOperand& src); |
| 974 void stdu(Register rs, const MemOperand& src); |
| 975 void stdux(Register rs, const MemOperand& src); |
| 976 void rldic(Register dst, Register src, int sh, int mb, RCBit r = LeaveRC); |
| 977 void rldicl(Register dst, Register src, int sh, int mb, RCBit r = LeaveRC); |
| 978 void rldcl(Register ra, Register rs, Register rb, int mb, RCBit r = LeaveRC); |
| 979 void rldicr(Register dst, Register src, int sh, int me, RCBit r = LeaveRC); |
| 980 void rldimi(Register dst, Register src, int sh, int mb, RCBit r = LeaveRC); |
| 981 void sldi(Register dst, Register src, const Operand& val, RCBit rc = LeaveRC); |
| 982 void srdi(Register dst, Register src, const Operand& val, RCBit rc = LeaveRC); |
| 983 void clrrdi(Register dst, Register src, const Operand& val, |
| 984 RCBit rc = LeaveRC); |
| 985 void clrldi(Register dst, Register src, const Operand& val, |
| 986 RCBit rc = LeaveRC); |
| 987 void sradi(Register ra, Register rs, int sh, RCBit r = LeaveRC); |
| 988 void srd(Register dst, Register src1, Register src2, RCBit r = LeaveRC); |
| 989 void sld(Register dst, Register src1, Register src2, RCBit r = LeaveRC); |
| 990 void srad(Register dst, Register src1, Register src2, RCBit r = LeaveRC); |
| 991 void rotld(Register ra, Register rs, Register rb, RCBit r = LeaveRC); |
| 992 void rotldi(Register ra, Register rs, int sh, RCBit r = LeaveRC); |
| 993 void rotrdi(Register ra, Register rs, int sh, RCBit r = LeaveRC); |
| 994 void cntlzd_(Register dst, Register src, RCBit rc = LeaveRC); |
| 995 void extsw(Register rs, Register ra, RCBit r = LeaveRC); |
| 996 void mulld(Register dst, Register src1, Register src2, OEBit o = LeaveOE, |
| 997 RCBit r = LeaveRC); |
| 998 void divd(Register dst, Register src1, Register src2, OEBit o = LeaveOE, |
| 999 RCBit r = LeaveRC); |
| 1000 #endif |
| 1001 |
| 1002 void rlwinm(Register ra, Register rs, int sh, int mb, int me, |
| 1003 RCBit rc = LeaveRC); |
| 1004 void rlwimi(Register ra, Register rs, int sh, int mb, int me, |
| 1005 RCBit rc = LeaveRC); |
| 1006 void rlwnm(Register ra, Register rs, Register rb, int mb, int me, |
| 1007 RCBit rc = LeaveRC); |
| 1008 void slwi(Register dst, Register src, const Operand& val, RCBit rc = LeaveRC); |
| 1009 void srwi(Register dst, Register src, const Operand& val, RCBit rc = LeaveRC); |
| 1010 void clrrwi(Register dst, Register src, const Operand& val, |
| 1011 RCBit rc = LeaveRC); |
| 1012 void clrlwi(Register dst, Register src, const Operand& val, |
| 1013 RCBit rc = LeaveRC); |
| 1014 void srawi(Register ra, Register rs, int sh, RCBit r = LeaveRC); |
| 1015 void srw(Register dst, Register src1, Register src2, RCBit r = LeaveRC); |
| 1016 void slw(Register dst, Register src1, Register src2, RCBit r = LeaveRC); |
| 1017 void sraw(Register dst, Register src1, Register src2, RCBit r = LeaveRC); |
| 1018 void rotlw(Register ra, Register rs, Register rb, RCBit r = LeaveRC); |
| 1019 void rotlwi(Register ra, Register rs, int sh, RCBit r = LeaveRC); |
| 1020 void rotrwi(Register ra, Register rs, int sh, RCBit r = LeaveRC); |
| 1021 |
| 1022 void cntlzw_(Register dst, Register src, RCBit rc = LeaveRC); |
| 1023 |
| 1024 void subi(Register dst, Register src1, const Operand& src2); |
| 1025 |
| 1026 void cmp(Register src1, Register src2, CRegister cr = cr7); |
| 1027 void cmpl(Register src1, Register src2, CRegister cr = cr7); |
| 1028 void cmpw(Register src1, Register src2, CRegister cr = cr7); |
| 1029 void cmplw(Register src1, Register src2, CRegister cr = cr7); |
| 1030 |
| 1031 void mov(Register dst, const Operand& src); |
| 1032 |
| 1033 // Load the position of the label relative to the generated code object |
| 1034 // pointer in a register. |
| 1035 void mov_label_offset(Register dst, Label* label); |
| 1036 |
| 1037 // Multiply instructions |
| 1038 void mul(Register dst, Register src1, Register src2, OEBit s = LeaveOE, |
| 1039 RCBit r = LeaveRC); |
| 1040 |
| 1041 // Miscellaneous arithmetic instructions |
| 1042 |
| 1043 // Special register access |
| 1044 void crxor(int bt, int ba, int bb); |
| 1045 void crclr(int bt) { crxor(bt, bt, bt); } |
| 1046 void creqv(int bt, int ba, int bb); |
| 1047 void crset(int bt) { creqv(bt, bt, bt); } |
| 1048 void mflr(Register dst); |
| 1049 void mtlr(Register src); |
| 1050 void mtctr(Register src); |
| 1051 void mtxer(Register src); |
| 1052 void mcrfs(int bf, int bfa); |
| 1053 void mfcr(Register dst); |
| 1054 #if V8_TARGET_ARCH_PPC64 |
| 1055 void mffprd(Register dst, DoubleRegister src); |
| 1056 void mffprwz(Register dst, DoubleRegister src); |
| 1057 void mtfprd(DoubleRegister dst, Register src); |
| 1058 void mtfprwz(DoubleRegister dst, Register src); |
| 1059 void mtfprwa(DoubleRegister dst, Register src); |
| 1060 #endif |
| 1061 |
| 1062 void fake_asm(enum FAKE_OPCODE_T fopcode); |
| 1063 void marker_asm(int mcode); |
| 1064 void function_descriptor(); |
| 1065 |
| 1066 // Exception-generating instructions and debugging support |
| 1067 void stop(const char* msg, Condition cond = al, |
| 1068 int32_t code = kDefaultStopCode, CRegister cr = cr7); |
| 1069 |
| 1070 void bkpt(uint32_t imm16); // v5 and above |
| 1071 |
| 1072 // Informational messages when simulating |
| 1073 void info(const char* msg, Condition cond = al, |
| 1074 int32_t code = kDefaultStopCode, CRegister cr = cr7); |
| 1075 |
| 1076 void dcbf(Register ra, Register rb); |
| 1077 void sync(); |
| 1078 void lwsync(); |
| 1079 void icbi(Register ra, Register rb); |
| 1080 void isync(); |
| 1081 |
| 1082 // Support for floating point |
| 1083 void lfd(const DoubleRegister frt, const MemOperand& src); |
| 1084 void lfdu(const DoubleRegister frt, const MemOperand& src); |
| 1085 void lfdx(const DoubleRegister frt, const MemOperand& src); |
| 1086 void lfdux(const DoubleRegister frt, const MemOperand& src); |
| 1087 void lfs(const DoubleRegister frt, const MemOperand& src); |
| 1088 void lfsu(const DoubleRegister frt, const MemOperand& src); |
| 1089 void lfsx(const DoubleRegister frt, const MemOperand& src); |
| 1090 void lfsux(const DoubleRegister frt, const MemOperand& src); |
| 1091 void stfd(const DoubleRegister frs, const MemOperand& src); |
| 1092 void stfdu(const DoubleRegister frs, const MemOperand& src); |
| 1093 void stfdx(const DoubleRegister frs, const MemOperand& src); |
| 1094 void stfdux(const DoubleRegister frs, const MemOperand& src); |
| 1095 void stfs(const DoubleRegister frs, const MemOperand& src); |
| 1096 void stfsu(const DoubleRegister frs, const MemOperand& src); |
| 1097 void stfsx(const DoubleRegister frs, const MemOperand& src); |
| 1098 void stfsux(const DoubleRegister frs, const MemOperand& src); |
| 1099 |
| 1100 void fadd(const DoubleRegister frt, const DoubleRegister fra, |
| 1101 const DoubleRegister frb, RCBit rc = LeaveRC); |
| 1102 void fsub(const DoubleRegister frt, const DoubleRegister fra, |
| 1103 const DoubleRegister frb, RCBit rc = LeaveRC); |
| 1104 void fdiv(const DoubleRegister frt, const DoubleRegister fra, |
| 1105 const DoubleRegister frb, RCBit rc = LeaveRC); |
| 1106 void fmul(const DoubleRegister frt, const DoubleRegister fra, |
| 1107 const DoubleRegister frc, RCBit rc = LeaveRC); |
| 1108 void fcmpu(const DoubleRegister fra, const DoubleRegister frb, |
| 1109 CRegister cr = cr7); |
| 1110 void fmr(const DoubleRegister frt, const DoubleRegister frb, |
| 1111 RCBit rc = LeaveRC); |
| 1112 void fctiwz(const DoubleRegister frt, const DoubleRegister frb); |
| 1113 void fctiw(const DoubleRegister frt, const DoubleRegister frb); |
| 1114 void frim(const DoubleRegister frt, const DoubleRegister frb); |
| 1115 void frsp(const DoubleRegister frt, const DoubleRegister frb, |
| 1116 RCBit rc = LeaveRC); |
| 1117 void fcfid(const DoubleRegister frt, const DoubleRegister frb, |
| 1118 RCBit rc = LeaveRC); |
| 1119 void fctid(const DoubleRegister frt, const DoubleRegister frb, |
| 1120 RCBit rc = LeaveRC); |
| 1121 void fctidz(const DoubleRegister frt, const DoubleRegister frb, |
| 1122 RCBit rc = LeaveRC); |
| 1123 void fsel(const DoubleRegister frt, const DoubleRegister fra, |
| 1124 const DoubleRegister frc, const DoubleRegister frb, |
| 1125 RCBit rc = LeaveRC); |
| 1126 void fneg(const DoubleRegister frt, const DoubleRegister frb, |
| 1127 RCBit rc = LeaveRC); |
| 1128 void mtfsfi(int bf, int immediate, RCBit rc = LeaveRC); |
| 1129 void mffs(const DoubleRegister frt, RCBit rc = LeaveRC); |
| 1130 void mtfsf(const DoubleRegister frb, bool L = 1, int FLM = 0, bool W = 0, |
| 1131 RCBit rc = LeaveRC); |
| 1132 void fsqrt(const DoubleRegister frt, const DoubleRegister frb, |
| 1133 RCBit rc = LeaveRC); |
| 1134 void fabs(const DoubleRegister frt, const DoubleRegister frb, |
| 1135 RCBit rc = LeaveRC); |
| 1136 void fmadd(const DoubleRegister frt, const DoubleRegister fra, |
| 1137 const DoubleRegister frc, const DoubleRegister frb, |
| 1138 RCBit rc = LeaveRC); |
| 1139 void fmsub(const DoubleRegister frt, const DoubleRegister fra, |
| 1140 const DoubleRegister frc, const DoubleRegister frb, |
| 1141 RCBit rc = LeaveRC); |
| 1142 |
| 1143 // Pseudo instructions |
| 1144 |
| 1145 // Different nop operations are used by the code generator to detect certain |
| 1146 // states of the generated code. |
| 1147 enum NopMarkerTypes { |
| 1148 NON_MARKING_NOP = 0, |
| 1149 GROUP_ENDING_NOP, |
| 1150 DEBUG_BREAK_NOP, |
| 1151 // IC markers. |
| 1152 PROPERTY_ACCESS_INLINED, |
| 1153 PROPERTY_ACCESS_INLINED_CONTEXT, |
| 1154 PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE, |
| 1155 // Helper values. |
| 1156 LAST_CODE_MARKER, |
| 1157 FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED |
| 1158 }; |
| 1159 |
| 1160 void nop(int type = 0); // 0 is the default non-marking type. |
| 1161 |
| 1162 void push(Register src) { |
| 1163 #if V8_TARGET_ARCH_PPC64 |
| 1164 stdu(src, MemOperand(sp, -kPointerSize)); |
| 1165 #else |
| 1166 stwu(src, MemOperand(sp, -kPointerSize)); |
| 1167 #endif |
| 1168 } |
| 1169 |
| 1170 void pop(Register dst) { |
| 1171 #if V8_TARGET_ARCH_PPC64 |
| 1172 ld(dst, MemOperand(sp)); |
| 1173 #else |
| 1174 lwz(dst, MemOperand(sp)); |
| 1175 #endif |
| 1176 addi(sp, sp, Operand(kPointerSize)); |
| 1177 } |
| 1178 |
| 1179 void pop() { addi(sp, sp, Operand(kPointerSize)); } |
| 1180 |
| 1181 // Jump unconditionally to given label. |
| 1182 void jmp(Label* L) { b(L); } |
| 1183 |
| 1184 // Check the code size generated from label to here. |
| 1185 int SizeOfCodeGeneratedSince(Label* label) { |
| 1186 return pc_offset() - label->pos(); |
| 1187 } |
| 1188 |
| 1189 // Check the number of instructions generated from label to here. |
| 1190 int InstructionsGeneratedSince(Label* label) { |
| 1191 return SizeOfCodeGeneratedSince(label) / kInstrSize; |
| 1192 } |
| 1193 |
| 1194 // Class for scoping postponing the trampoline pool generation. |
| 1195 class BlockTrampolinePoolScope { |
| 1196 public: |
| 1197 explicit BlockTrampolinePoolScope(Assembler* assem) : assem_(assem) { |
| 1198 assem_->StartBlockTrampolinePool(); |
| 1199 } |
| 1200 ~BlockTrampolinePoolScope() { assem_->EndBlockTrampolinePool(); } |
| 1201 |
| 1202 private: |
| 1203 Assembler* assem_; |
| 1204 |
| 1205 DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope); |
| 1206 }; |
| 1207 |
| 1208 // Debugging |
| 1209 |
| 1210 // Mark address of the ExitJSFrame code. |
| 1211 void RecordJSReturn(); |
| 1212 |
| 1213 // Mark address of a debug break slot. |
| 1214 void RecordDebugBreakSlot(); |
| 1215 |
| 1216 // Record the AST id of the CallIC being compiled, so that it can be placed |
| 1217 // in the relocation information. |
| 1218 void SetRecordedAstId(TypeFeedbackId ast_id) { |
| 1219 // PPC - this shouldn't be failing roohack |
| 1220 // DCHECK(recorded_ast_id_.IsNone()); |
| 1221 recorded_ast_id_ = ast_id; |
| 1222 } |
| 1223 |
| 1224 TypeFeedbackId RecordedAstId() { |
| 1225 // roohack - another issue??? DCHECK(!recorded_ast_id_.IsNone()); |
| 1226 return recorded_ast_id_; |
| 1227 } |
| 1228 |
| 1229 void ClearRecordedAstId() { recorded_ast_id_ = TypeFeedbackId::None(); } |
| 1230 |
| 1231 // Record a comment relocation entry that can be used by a disassembler. |
| 1232 // Use --code-comments to enable. |
| 1233 void RecordComment(const char* msg); |
| 1234 |
| 1235 // Writes a single byte or word of data in the code stream. Used |
| 1236 // for inline tables, e.g., jump-tables. |
| 1237 void db(uint8_t data); |
| 1238 void dd(uint32_t data); |
| 1239 void emit_ptr(uintptr_t data); |
| 1240 |
| 1241 PositionsRecorder* positions_recorder() { return &positions_recorder_; } |
| 1242 |
| 1243 // Read/patch instructions |
| 1244 Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); } |
| 1245 void instr_at_put(int pos, Instr instr) { |
| 1246 *reinterpret_cast<Instr*>(buffer_ + pos) = instr; |
| 1247 } |
| 1248 static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); } |
| 1249 static void instr_at_put(byte* pc, Instr instr) { |
| 1250 *reinterpret_cast<Instr*>(pc) = instr; |
| 1251 } |
| 1252 static Condition GetCondition(Instr instr); |
| 1253 |
| 1254 static bool IsLis(Instr instr); |
| 1255 static bool IsLi(Instr instr); |
| 1256 static bool IsAddic(Instr instr); |
| 1257 static bool IsOri(Instr instr); |
| 1258 |
| 1259 static bool IsBranch(Instr instr); |
| 1260 static Register GetRA(Instr instr); |
| 1261 static Register GetRB(Instr instr); |
| 1262 #if V8_TARGET_ARCH_PPC64 |
| 1263 static bool Is64BitLoadIntoR12(Instr instr1, Instr instr2, Instr instr3, |
| 1264 Instr instr4, Instr instr5); |
| 1265 #else |
| 1266 static bool Is32BitLoadIntoR12(Instr instr1, Instr instr2); |
| 1267 #endif |
| 1268 |
| 1269 static bool IsCmpRegister(Instr instr); |
| 1270 static bool IsCmpImmediate(Instr instr); |
| 1271 static bool IsRlwinm(Instr instr); |
| 1272 #if V8_TARGET_ARCH_PPC64 |
| 1273 static bool IsRldicl(Instr instr); |
| 1274 #endif |
| 1275 static bool IsCrSet(Instr instr); |
| 1276 static Register GetCmpImmediateRegister(Instr instr); |
| 1277 static int GetCmpImmediateRawImmediate(Instr instr); |
| 1278 static bool IsNop(Instr instr, int type = NON_MARKING_NOP); |
| 1279 |
| 1280 // Postpone the generation of the trampoline pool for the specified number of |
| 1281 // instructions. |
| 1282 void BlockTrampolinePoolFor(int instructions); |
| 1283 void CheckTrampolinePool(); |
| 1284 |
| 1285 int instructions_required_for_mov(const Operand& x) const; |
| 1286 |
| 1287 #if V8_OOL_CONSTANT_POOL |
| 1288 // Decide between using the constant pool vs. a mov immediate sequence. |
| 1289 bool use_constant_pool_for_mov(const Operand& x, bool canOptimize) const; |
| 1290 |
| 1291 // The code currently calls CheckBuffer() too often. This has the side |
| 1292 // effect of randomly growing the buffer in the middle of multi-instruction |
| 1293 // sequences. |
| 1294 // MacroAssembler::LoadConstantPoolPointerRegister() includes a relocation |
| 1295 // and multiple instructions. We cannot grow the buffer until the |
| 1296 // relocation and all of the instructions are written. |
| 1297 // |
| 1298 // This function allows outside callers to check and grow the buffer |
| 1299 void EnsureSpaceFor(int space_needed); |
| 1300 #endif |
| 1301 |
| 1302 // Allocate a constant pool of the correct size for the generated code. |
| 1303 Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate); |
| 1304 |
| 1305 // Generate the constant pool for the generated code. |
| 1306 void PopulateConstantPool(ConstantPoolArray* constant_pool); |
| 1307 |
| 1308 #if V8_OOL_CONSTANT_POOL |
| 1309 bool is_constant_pool_available() const { return constant_pool_available_; } |
| 1310 |
| 1311 bool is_constant_pool_full() const { |
| 1312 return constant_pool_builder_.is_full(); |
| 1313 } |
| 1314 |
| 1315 bool use_extended_constant_pool() const { |
| 1316 return constant_pool_builder_.current_section() == |
| 1317 ConstantPoolArray::EXTENDED_SECTION; |
| 1318 } |
| 1319 #endif |
| 1320 |
| 1321 #if ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL |
| 1322 static void RelocateInternalReference( |
| 1323 Address pc, intptr_t delta, Address code_start, |
| 1324 ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED); |
| 1325 static int DecodeInternalReference(Vector<char> buffer, Address pc); |
| 1326 #endif |
| 1327 |
| 1328 protected: |
| 1329 // Relocation for a type-recording IC has the AST id added to it. This |
| 1330 // member variable is a way to pass the information from the call site to |
| 1331 // the relocation info. |
| 1332 TypeFeedbackId recorded_ast_id_; |
| 1333 |
| 1334 int buffer_space() const { return reloc_info_writer.pos() - pc_; } |
| 1335 |
| 1336 // Decode branch instruction at pos and return branch target pos |
| 1337 int target_at(int pos); |
| 1338 |
| 1339 // Patch branch instruction at pos to branch to given branch target pos |
| 1340 void target_at_put(int pos, int target_pos); |
| 1341 |
| 1342 // Record reloc info for current pc_ |
| 1343 void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0); |
| 1344 void RecordRelocInfo(const RelocInfo& rinfo); |
| 1345 #if V8_OOL_CONSTANT_POOL |
| 1346 ConstantPoolArray::LayoutSection ConstantPoolAddEntry( |
| 1347 const RelocInfo& rinfo) { |
| 1348 return constant_pool_builder_.AddEntry(this, rinfo); |
| 1349 } |
| 1350 #endif |
| 1351 |
| 1352 // Block the emission of the trampoline pool before pc_offset. |
| 1353 void BlockTrampolinePoolBefore(int pc_offset) { |
| 1354 if (no_trampoline_pool_before_ < pc_offset) |
| 1355 no_trampoline_pool_before_ = pc_offset; |
| 1356 } |
| 1357 |
| 1358 void StartBlockTrampolinePool() { trampoline_pool_blocked_nesting_++; } |
| 1359 |
| 1360 void EndBlockTrampolinePool() { trampoline_pool_blocked_nesting_--; } |
| 1361 |
| 1362 bool is_trampoline_pool_blocked() const { |
| 1363 return trampoline_pool_blocked_nesting_ > 0; |
| 1364 } |
| 1365 |
| 1366 bool has_exception() const { return internal_trampoline_exception_; } |
| 1367 |
| 1368 bool is_trampoline_emitted() const { return trampoline_emitted_; } |
| 1369 |
| 1370 #if V8_OOL_CONSTANT_POOL |
| 1371 void set_constant_pool_available(bool available) { |
| 1372 constant_pool_available_ = available; |
| 1373 } |
| 1374 #endif |
| 1375 |
| 1376 private: |
| 1377 // Code generation |
| 1378 // The relocation writer's position is at least kGap bytes below the end of |
| 1379 // the generated instructions. This is so that multi-instruction sequences do |
| 1380 // not have to check for overflow. The same is true for writes of large |
| 1381 // relocation info entries. |
| 1382 static const int kGap = 32; |
| 1383 |
| 1384 // Repeated checking whether the trampoline pool should be emitted is rather |
| 1385 // expensive. By default we only check again once a number of instructions |
| 1386 // has been generated. |
| 1387 int next_buffer_check_; // pc offset of next buffer check. |
| 1388 |
| 1389 // Emission of the trampoline pool may be blocked in some code sequences. |
| 1390 int trampoline_pool_blocked_nesting_; // Block emission if this is not zero. |
| 1391 int no_trampoline_pool_before_; // Block emission before this pc offset. |
| 1392 |
| 1393 // Relocation info generation |
| 1394 // Each relocation is encoded as a variable size value |
| 1395 static const int kMaxRelocSize = RelocInfoWriter::kMaxSize; |
| 1396 RelocInfoWriter reloc_info_writer; |
| 1397 |
| 1398 // The bound position, before this we cannot do instruction elimination. |
| 1399 int last_bound_pos_; |
| 1400 |
| 1401 #if V8_OOL_CONSTANT_POOL |
| 1402 ConstantPoolBuilder constant_pool_builder_; |
| 1403 |
| 1404 // Indicates whether the constant pool can be accessed, which is only possible |
| 1405 // if kConstantPoolRegister points to the current code object's constant pool. |
| 1406 bool constant_pool_available_; |
| 1407 #endif |
| 1408 |
| 1409 // Code emission |
| 1410 inline void CheckBuffer(); |
| 1411 void GrowBuffer(); |
| 1412 inline void emit(Instr x); |
| 1413 inline void CheckTrampolinePoolQuick(); |
| 1414 |
| 1415 // Instruction generation |
| 1416 void a_form(Instr instr, DoubleRegister frt, DoubleRegister fra, |
| 1417 DoubleRegister frb, RCBit r); |
| 1418 void d_form(Instr instr, Register rt, Register ra, const intptr_t val, |
| 1419 bool signed_disp); |
| 1420 void x_form(Instr instr, Register ra, Register rs, Register rb, RCBit r); |
| 1421 void xo_form(Instr instr, Register rt, Register ra, Register rb, OEBit o, |
| 1422 RCBit r); |
| 1423 void md_form(Instr instr, Register ra, Register rs, int shift, int maskbit, |
| 1424 RCBit r); |
| 1425 void mds_form(Instr instr, Register ra, Register rs, Register rb, int maskbit, |
| 1426 RCBit r); |
| 1427 |
| 1428 // Labels |
| 1429 void print(Label* L); |
| 1430 int max_reach_from(int pos); |
| 1431 void bind_to(Label* L, int pos); |
| 1432 void next(Label* L); |
| 1433 |
| 1434 class Trampoline { |
| 1435 public: |
| 1436 Trampoline() { |
| 1437 next_slot_ = 0; |
| 1438 free_slot_count_ = 0; |
| 1439 } |
| 1440 Trampoline(int start, int slot_count) { |
| 1441 next_slot_ = start; |
| 1442 free_slot_count_ = slot_count; |
| 1443 } |
| 1444 int take_slot() { |
| 1445 int trampoline_slot = kInvalidSlotPos; |
| 1446 if (free_slot_count_ <= 0) { |
| 1447 // We have run out of space on trampolines. |
| 1448 // Make sure we fail in debug mode, so we become aware of each case |
| 1449 // when this happens. |
| 1450 DCHECK(0); |
| 1451 // Internal exception will be caught. |
| 1452 } else { |
| 1453 trampoline_slot = next_slot_; |
| 1454 free_slot_count_--; |
| 1455 next_slot_ += kTrampolineSlotsSize; |
| 1456 } |
| 1457 return trampoline_slot; |
| 1458 } |
| 1459 |
| 1460 private: |
| 1461 int next_slot_; |
| 1462 int free_slot_count_; |
| 1463 }; |
| 1464 |
| 1465 int32_t get_trampoline_entry(); |
| 1466 int unbound_labels_count_; |
| 1467 // If trampoline is emitted, generated code is becoming large. As |
| 1468 // this is already a slow case which can possibly break our code |
| 1469 // generation for the extreme case, we use this information to |
| 1470 // trigger different mode of branch instruction generation, where we |
| 1471 // no longer use a single branch instruction. |
| 1472 bool trampoline_emitted_; |
| 1473 static const int kTrampolineSlotsSize = kInstrSize; |
| 1474 static const int kMaxCondBranchReach = (1 << (16 - 1)) - 1; |
| 1475 static const int kMaxBlockTrampolineSectionSize = 64 * kInstrSize; |
| 1476 static const int kInvalidSlotPos = -1; |
| 1477 |
| 1478 Trampoline trampoline_; |
| 1479 bool internal_trampoline_exception_; |
| 1480 |
| 1481 friend class RegExpMacroAssemblerPPC; |
| 1482 friend class RelocInfo; |
| 1483 friend class CodePatcher; |
| 1484 friend class BlockTrampolinePoolScope; |
| 1485 #if V8_OOL_CONSTANT_POOL |
| 1486 friend class FrameAndConstantPoolScope; |
| 1487 friend class ConstantPoolUnavailableScope; |
| 1488 #endif |
| 1489 |
| 1490 PositionsRecorder positions_recorder_; |
| 1491 friend class PositionsRecorder; |
| 1492 friend class EnsureSpace; |
| 1493 }; |
| 1494 |
| 1495 |
| 1496 class EnsureSpace BASE_EMBEDDED { |
| 1497 public: |
| 1498 explicit EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); } |
| 1499 }; |
| 1500 } |
| 1501 } // namespace v8::internal |
| 1502 |
| 1503 #endif // V8_PPC_ASSEMBLER_PPC_H_ |
OLD | NEW |