OLD | NEW |
(Empty) | |
| 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. |
| 2 // All Rights Reserved. |
| 3 // |
| 4 // Redistribution and use in source and binary forms, with or without |
| 5 // modification, are permitted provided that the following conditions |
| 6 // are met: |
| 7 // |
| 8 // - Redistributions of source code must retain the above copyright notice, |
| 9 // this list of conditions and the following disclaimer. |
| 10 // |
| 11 // - Redistribution in binary form must reproduce the above copyright |
| 12 // notice, this list of conditions and the following disclaimer in the |
| 13 // documentation and/or other materials provided with the |
| 14 // distribution. |
| 15 // |
| 16 // - Neither the name of Sun Microsystems or the names of contributors may |
| 17 // be used to endorse or promote products derived from this software without |
| 18 // specific prior written permission. |
| 19 // |
| 20 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 21 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 22 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS |
| 23 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE |
| 24 // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, |
| 25 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
| 26 // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR |
| 27 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| 28 // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, |
| 29 // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
| 30 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED |
| 31 // OF THE POSSIBILITY OF SUCH DAMAGE. |
| 32 |
| 33 // The original source code covered by the above license above has been |
| 34 // modified significantly by Google Inc. |
| 35 // Copyright 2012 the V8 project authors. All rights reserved. |
| 36 |
| 37 // |
| 38 // Copyright IBM Corp. 2012, 2013. All rights reserved. |
| 39 // |
| 40 |
| 41 // A light-weight PPC Assembler |
| 42 // Generates user mode instructions for the PPC architecture up |
| 43 |
| 44 #ifndef V8_PPC_ASSEMBLER_PPC_H_ |
| 45 #define V8_PPC_ASSEMBLER_PPC_H_ |
| 46 |
| 47 #include <stdio.h> |
| 48 #include <vector> |
| 49 |
| 50 #include "src/assembler.h" |
| 51 #include "src/ppc/constants-ppc.h" |
| 52 #include "src/serialize.h" |
| 53 |
| 54 #define ABI_USES_FUNCTION_DESCRIPTORS \ |
| 55 (V8_HOST_ARCH_PPC && \ |
| 56 (V8_OS_AIX || \ |
| 57 (V8_TARGET_ARCH_PPC64 && V8_TARGET_BIG_ENDIAN))) |
| 58 |
| 59 #define ABI_PASSES_HANDLES_IN_REGS \ |
| 60 (!V8_HOST_ARCH_PPC || V8_OS_AIX || V8_TARGET_ARCH_PPC64) |
| 61 |
| 62 #define ABI_RETURNS_HANDLES_IN_REGS \ |
| 63 (!V8_HOST_ARCH_PPC || V8_TARGET_LITTLE_ENDIAN) |
| 64 |
| 65 #define ABI_RETURNS_OBJECT_PAIRS_IN_REGS \ |
| 66 (!V8_HOST_ARCH_PPC || V8_TARGET_LITTLE_ENDIAN) |
| 67 |
| 68 #define ABI_TOC_ADDRESSABILITY_VIA_IP \ |
| 69 (V8_HOST_ARCH_PPC && V8_TARGET_ARCH_PPC64 && \ |
| 70 V8_TARGET_LITTLE_ENDIAN) |
| 71 |
| 72 #if !V8_HOST_ARCH_PPC || V8_OS_AIX || V8_TARGET_ARCH_PPC64 |
| 73 #define ABI_TOC_REGISTER kRegister_r2_Code |
| 74 #else |
| 75 #define ABI_TOC_REGISTER kRegister_r13_Code |
| 76 #endif |
| 77 |
| 78 namespace v8 { |
| 79 namespace internal { |
| 80 |
| 81 // CPU Registers. |
| 82 // |
| 83 // 1) We would prefer to use an enum, but enum values are assignment- |
| 84 // compatible with int, which has caused code-generation bugs. |
| 85 // |
| 86 // 2) We would prefer to use a class instead of a struct but we don't like |
| 87 // the register initialization to depend on the particular initialization |
| 88 // order (which appears to be different on OS X, Linux, and Windows for the |
| 89 // installed versions of C++ we tried). Using a struct permits C-style |
| 90 // "initialization". Also, the Register objects cannot be const as this |
| 91 // forces initialization stubs in MSVC, making us dependent on initialization |
| 92 // order. |
| 93 // |
| 94 // 3) By not using an enum, we are possibly preventing the compiler from |
| 95 // doing certain constant folds, which may significantly reduce the |
| 96 // code generated for some assembly instructions (because they boil down |
| 97 // to a few constants). If this is a problem, we could change the code |
| 98 // such that we use an enum in optimized mode, and the struct in debug |
| 99 // mode. This way we get the compile-time error checking in debug mode |
| 100 // and best performance in optimized code. |
| 101 |
| 102 // Core register |
| 103 struct Register { |
| 104 static const int kNumRegisters = 32; |
| 105 static const int kSizeInBytes = kPointerSize; |
| 106 |
| 107 #if V8_TARGET_LITTLE_ENDIAN |
| 108 static const int kMantissaOffset = 0; |
| 109 static const int kExponentOffset = 4; |
| 110 #else |
| 111 static const int kMantissaOffset = 4; |
| 112 static const int kExponentOffset = 0; |
| 113 #endif |
| 114 |
| 115 static const int kAllocatableLowRangeBegin = 3; |
| 116 static const int kAllocatableLowRangeEnd = 10; |
| 117 static const int kAllocatableHighRangeBegin = 14; |
| 118 #if V8_OOL_CONSTANT_POOL |
| 119 static const int kAllocatableHighRangeEnd = 27; |
| 120 #else |
| 121 static const int kAllocatableHighRangeEnd = 28; |
| 122 #endif |
| 123 static const int kAllocatableContext = 30; |
| 124 |
| 125 static const int kNumAllocatableLow = |
| 126 kAllocatableLowRangeEnd - kAllocatableLowRangeBegin + 1; |
| 127 static const int kNumAllocatableHigh = |
| 128 kAllocatableHighRangeEnd - kAllocatableHighRangeBegin + 1; |
| 129 static const int kMaxNumAllocatableRegisters = |
| 130 kNumAllocatableLow + kNumAllocatableHigh + 1; // cp |
| 131 static int NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; } |
| 132 |
| 133 static int ToAllocationIndex(Register reg) { |
| 134 int index; |
| 135 int code = reg.code(); |
| 136 if (code == kAllocatableContext) { |
| 137 // Context is the last index |
| 138 index = NumAllocatableRegisters() - 1; |
| 139 } else if (code <= kAllocatableLowRangeEnd) { |
| 140 // low range |
| 141 index = code - kAllocatableLowRangeBegin; |
| 142 } else { |
| 143 // high range |
| 144 index = code - kAllocatableHighRangeBegin + kNumAllocatableLow; |
| 145 } |
| 146 DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters); |
| 147 return index; |
| 148 } |
| 149 |
| 150 static Register FromAllocationIndex(int index) { |
| 151 DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters); |
| 152 // Last index is always the 'cp' register. |
| 153 if (index == kMaxNumAllocatableRegisters - 1) { |
| 154 return from_code(kAllocatableContext); |
| 155 } |
| 156 return (index < kNumAllocatableLow) |
| 157 ? from_code(index + kAllocatableLowRangeBegin) |
| 158 : from_code(index - kNumAllocatableLow + kAllocatableHighRangeBegin); |
| 159 } |
| 160 |
| 161 static const char* AllocationIndexToString(int index) { |
| 162 DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters); |
| 163 const char* const names[] = { |
| 164 "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", |
| 165 "r14", "r15", "r16", "r17", "r18", |
| 166 "r19", "r20", "r21", "r22", "r23", |
| 167 "r24", "r25", "r26", "r27", |
| 168 #if !V8_OOL_CONSTANT_POOL |
| 169 "r28", |
| 170 #endif |
| 171 "cp", |
| 172 }; |
| 173 return names[index]; |
| 174 } |
| 175 |
| 176 static Register from_code(int code) { |
| 177 Register r = { code }; |
| 178 return r; |
| 179 } |
| 180 |
| 181 bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; } |
| 182 bool is(Register reg) const { return code_ == reg.code_; } |
| 183 int code() const { |
| 184 DCHECK(is_valid()); |
| 185 return code_; |
| 186 } |
| 187 int bit() const { |
| 188 DCHECK(is_valid()); |
| 189 return 1 << code_; |
| 190 } |
| 191 |
| 192 void set_code(int code) { |
| 193 code_ = code; |
| 194 DCHECK(is_valid()); |
| 195 } |
| 196 |
| 197 // Unfortunately we can't make this private in a struct. |
| 198 int code_; |
| 199 }; |
| 200 |
| 201 // These constants are used in several locations, including static initializers |
| 202 const int kRegister_no_reg_Code = -1; |
| 203 const int kRegister_r0_Code = 0; // general scratch |
| 204 const int kRegister_sp_Code = 1; // stack pointer |
| 205 const int kRegister_r2_Code = 2; // special on PowerPC |
| 206 const int kRegister_r3_Code = 3; |
| 207 const int kRegister_r4_Code = 4; |
| 208 const int kRegister_r5_Code = 5; |
| 209 const int kRegister_r6_Code = 6; |
| 210 const int kRegister_r7_Code = 7; |
| 211 const int kRegister_r8_Code = 8; |
| 212 const int kRegister_r9_Code = 9; |
| 213 const int kRegister_r10_Code = 10; |
| 214 const int kRegister_r11_Code = 11; // lithium scratch |
| 215 const int kRegister_ip_Code = 12; // ip (general scratch) |
| 216 const int kRegister_r13_Code = 13; // special on PowerPC |
| 217 const int kRegister_r14_Code = 14; |
| 218 const int kRegister_r15_Code = 15; |
| 219 |
| 220 const int kRegister_r16_Code = 16; |
| 221 const int kRegister_r17_Code = 17; |
| 222 const int kRegister_r18_Code = 18; |
| 223 const int kRegister_r19_Code = 19; |
| 224 const int kRegister_r20_Code = 20; |
| 225 const int kRegister_r21_Code = 21; |
| 226 const int kRegister_r22_Code = 22; |
| 227 const int kRegister_r23_Code = 23; |
| 228 const int kRegister_r24_Code = 24; |
| 229 const int kRegister_r25_Code = 25; |
| 230 const int kRegister_r26_Code = 26; |
| 231 const int kRegister_r27_Code = 27; |
| 232 const int kRegister_r28_Code = 28; // constant pool pointer |
| 233 const int kRegister_r29_Code = 29; // roots array pointer |
| 234 const int kRegister_r30_Code = 30; // context pointer |
| 235 const int kRegister_fp_Code = 31; // frame pointer |
| 236 |
| 237 const Register no_reg = { kRegister_no_reg_Code }; |
| 238 |
| 239 const Register r0 = { kRegister_r0_Code }; |
| 240 const Register sp = { kRegister_sp_Code }; |
| 241 const Register r2 = { kRegister_r2_Code }; |
| 242 const Register r3 = { kRegister_r3_Code }; |
| 243 const Register r4 = { kRegister_r4_Code }; |
| 244 const Register r5 = { kRegister_r5_Code }; |
| 245 const Register r6 = { kRegister_r6_Code }; |
| 246 const Register r7 = { kRegister_r7_Code }; |
| 247 const Register r8 = { kRegister_r8_Code }; |
| 248 const Register r9 = { kRegister_r9_Code }; |
| 249 const Register r10 = { kRegister_r10_Code }; |
| 250 const Register r11 = { kRegister_r11_Code }; |
| 251 const Register ip = { kRegister_ip_Code }; |
| 252 const Register r13 = { kRegister_r13_Code }; |
| 253 const Register r14 = { kRegister_r14_Code }; |
| 254 const Register r15 = { kRegister_r15_Code }; |
| 255 |
| 256 const Register r16 = { kRegister_r16_Code }; |
| 257 const Register r17 = { kRegister_r17_Code }; |
| 258 const Register r18 = { kRegister_r18_Code }; |
| 259 const Register r19 = { kRegister_r19_Code }; |
| 260 const Register r20 = { kRegister_r20_Code }; |
| 261 const Register r21 = { kRegister_r21_Code }; |
| 262 const Register r22 = { kRegister_r22_Code }; |
| 263 const Register r23 = { kRegister_r23_Code }; |
| 264 const Register r24 = { kRegister_r24_Code }; |
| 265 const Register r25 = { kRegister_r25_Code }; |
| 266 const Register r26 = { kRegister_r26_Code }; |
| 267 const Register r27 = { kRegister_r27_Code }; |
| 268 const Register r28 = { kRegister_r28_Code }; |
| 269 const Register r29 = { kRegister_r29_Code }; |
| 270 const Register r30 = { kRegister_r30_Code }; |
| 271 const Register fp = { kRegister_fp_Code }; |
| 272 |
| 273 // Give alias names to registers |
| 274 const Register cp = { kRegister_r30_Code }; // JavaScript context pointer |
| 275 const Register kRootRegister = { kRegister_r29_Code }; // Roots array pointer. |
| 276 #if V8_OOL_CONSTANT_POOL |
| 277 const Register kConstantPoolRegister = { kRegister_r28_Code }; // Constant pool |
| 278 #endif |
| 279 |
| 280 // Double word FP register. |
| 281 struct DoubleRegister { |
| 282 static const int kNumRegisters = 32; |
| 283 static const int kMaxNumRegisters = kNumRegisters; |
| 284 static const int kNumVolatileRegisters = 14; // d0-d13 |
| 285 static const int kSizeInBytes = 8; |
| 286 |
| 287 static const int kAllocatableLowRangeBegin = 1; |
| 288 static const int kAllocatableLowRangeEnd = 12; |
| 289 static const int kAllocatableHighRangeBegin = 15; |
| 290 static const int kAllocatableHighRangeEnd = 31; |
| 291 |
| 292 static const int kNumAllocatableLow = |
| 293 kAllocatableLowRangeEnd - kAllocatableLowRangeBegin + 1; |
| 294 static const int kNumAllocatableHigh = |
| 295 kAllocatableHighRangeEnd - kAllocatableHighRangeBegin + 1; |
| 296 static const int kMaxNumAllocatableRegisters = |
| 297 kNumAllocatableLow + kNumAllocatableHigh; |
| 298 static int NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; } |
| 299 |
| 300 static int ToAllocationIndex(DoubleRegister reg) { |
| 301 int code = reg.code(); |
| 302 int index = (code <= kAllocatableLowRangeEnd) |
| 303 ? code - kAllocatableLowRangeBegin |
| 304 : code - kAllocatableHighRangeBegin + kNumAllocatableLow; |
| 305 DCHECK(index < kMaxNumAllocatableRegisters); |
| 306 return index; |
| 307 } |
| 308 |
| 309 static DoubleRegister FromAllocationIndex(int index) { |
| 310 DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters); |
| 311 return (index < kNumAllocatableLow) |
| 312 ? from_code(index + kAllocatableLowRangeBegin) |
| 313 : from_code(index - kNumAllocatableLow + kAllocatableHighRangeBegin); |
| 314 } |
| 315 |
| 316 static const char* AllocationIndexToString(int index); |
| 317 |
| 318 static DoubleRegister from_code(int code) { |
| 319 DoubleRegister r = { code }; |
| 320 return r; |
| 321 } |
| 322 |
| 323 bool is_valid() const { return 0 <= code_ && code_ < kMaxNumRegisters; } |
| 324 bool is(DoubleRegister reg) const { return code_ == reg.code_; } |
| 325 |
| 326 int code() const { |
| 327 DCHECK(is_valid()); |
| 328 return code_; |
| 329 } |
| 330 int bit() const { |
| 331 DCHECK(is_valid()); |
| 332 return 1 << code_; |
| 333 } |
| 334 void split_code(int* vm, int* m) const { |
| 335 DCHECK(is_valid()); |
| 336 *m = (code_ & 0x10) >> 4; |
| 337 *vm = code_ & 0x0F; |
| 338 } |
| 339 |
| 340 int code_; |
| 341 }; |
| 342 |
| 343 |
| 344 const DoubleRegister no_dreg = { -1 }; |
| 345 const DoubleRegister d0 = { 0 }; |
| 346 const DoubleRegister d1 = { 1 }; |
| 347 const DoubleRegister d2 = { 2 }; |
| 348 const DoubleRegister d3 = { 3 }; |
| 349 const DoubleRegister d4 = { 4 }; |
| 350 const DoubleRegister d5 = { 5 }; |
| 351 const DoubleRegister d6 = { 6 }; |
| 352 const DoubleRegister d7 = { 7 }; |
| 353 const DoubleRegister d8 = { 8 }; |
| 354 const DoubleRegister d9 = { 9 }; |
| 355 const DoubleRegister d10 = { 10 }; |
| 356 const DoubleRegister d11 = { 11 }; |
| 357 const DoubleRegister d12 = { 12 }; |
| 358 const DoubleRegister d13 = { 13 }; |
| 359 const DoubleRegister d14 = { 14 }; |
| 360 const DoubleRegister d15 = { 15 }; |
| 361 const DoubleRegister d16 = { 16 }; |
| 362 const DoubleRegister d17 = { 17 }; |
| 363 const DoubleRegister d18 = { 18 }; |
| 364 const DoubleRegister d19 = { 19 }; |
| 365 const DoubleRegister d20 = { 20 }; |
| 366 const DoubleRegister d21 = { 21 }; |
| 367 const DoubleRegister d22 = { 22 }; |
| 368 const DoubleRegister d23 = { 23 }; |
| 369 const DoubleRegister d24 = { 24 }; |
| 370 const DoubleRegister d25 = { 25 }; |
| 371 const DoubleRegister d26 = { 26 }; |
| 372 const DoubleRegister d27 = { 27 }; |
| 373 const DoubleRegister d28 = { 28 }; |
| 374 const DoubleRegister d29 = { 29 }; |
| 375 const DoubleRegister d30 = { 30 }; |
| 376 const DoubleRegister d31 = { 31 }; |
| 377 |
| 378 // Aliases for double registers. Defined using #define instead of |
| 379 // "static const DoubleRegister&" because Clang complains otherwise when a |
| 380 // compilation unit that includes this header doesn't use the variables. |
| 381 #define kFirstCalleeSavedDoubleReg d14 |
| 382 #define kLastCalleeSavedDoubleReg d31 |
| 383 #define kDoubleRegZero d14 |
| 384 #define kScratchDoubleReg d13 |
| 385 |
| 386 Register ToRegister(int num); |
| 387 |
| 388 // Coprocessor register |
| 389 struct CRegister { |
| 390 bool is_valid() const { return 0 <= code_ && code_ < 16; } |
| 391 bool is(CRegister creg) const { return code_ == creg.code_; } |
| 392 int code() const { |
| 393 DCHECK(is_valid()); |
| 394 return code_; |
| 395 } |
| 396 int bit() const { |
| 397 DCHECK(is_valid()); |
| 398 return 1 << code_; |
| 399 } |
| 400 |
| 401 // Unfortunately we can't make this private in a struct. |
| 402 int code_; |
| 403 }; |
| 404 |
| 405 |
| 406 const CRegister no_creg = { -1 }; |
| 407 |
| 408 const CRegister cr0 = { 0 }; |
| 409 const CRegister cr1 = { 1 }; |
| 410 const CRegister cr2 = { 2 }; |
| 411 const CRegister cr3 = { 3 }; |
| 412 const CRegister cr4 = { 4 }; |
| 413 const CRegister cr5 = { 5 }; |
| 414 const CRegister cr6 = { 6 }; |
| 415 const CRegister cr7 = { 7 }; |
| 416 const CRegister cr8 = { 8 }; |
| 417 const CRegister cr9 = { 9 }; |
| 418 const CRegister cr10 = { 10 }; |
| 419 const CRegister cr11 = { 11 }; |
| 420 const CRegister cr12 = { 12 }; |
| 421 const CRegister cr13 = { 13 }; |
| 422 const CRegister cr14 = { 14 }; |
| 423 const CRegister cr15 = { 15 }; |
| 424 |
| 425 // ----------------------------------------------------------------------------- |
| 426 // Machine instruction Operands |
| 427 |
| 428 #if V8_TARGET_ARCH_PPC64 |
| 429 const RelocInfo::Mode kRelocInfo_NONEPTR = RelocInfo::NONE64; |
| 430 #else |
| 431 const RelocInfo::Mode kRelocInfo_NONEPTR = RelocInfo::NONE32; |
| 432 #endif |
| 433 |
| 434 // Class Operand represents a shifter operand in data processing instructions |
| 435 class Operand BASE_EMBEDDED { |
| 436 public: |
| 437 // immediate |
| 438 INLINE(explicit Operand(intptr_t immediate, |
| 439 RelocInfo::Mode rmode = kRelocInfo_NONEPTR)); |
| 440 INLINE(static Operand Zero()) { |
| 441 return Operand(static_cast<intptr_t>(0)); |
| 442 } |
| 443 INLINE(explicit Operand(const ExternalReference& f)); |
| 444 explicit Operand(Handle<Object> handle); |
| 445 INLINE(explicit Operand(Smi* value)); |
| 446 |
| 447 // rm |
| 448 INLINE(explicit Operand(Register rm)); |
| 449 |
| 450 // Return true if this is a register operand. |
| 451 INLINE(bool is_reg() const); |
| 452 |
| 453 // For mov. Return the number of actual instructions required to |
| 454 // load the operand into a register. This can be anywhere from |
| 455 // one (constant pool small section) to five instructions (full |
| 456 // 64-bit sequence). |
| 457 // |
| 458 // The value returned is only valid as long as no entries are added to the |
| 459 // constant pool between this call and the actual instruction being emitted. |
| 460 bool must_output_reloc_info(const Assembler* assembler) const; |
| 461 |
| 462 inline intptr_t immediate() const { |
| 463 DCHECK(!rm_.is_valid()); |
| 464 return imm_; |
| 465 } |
| 466 |
| 467 Register rm() const { return rm_; } |
| 468 |
| 469 private: |
| 470 Register rm_; |
| 471 intptr_t imm_; // valid if rm_ == no_reg |
| 472 RelocInfo::Mode rmode_; |
| 473 |
| 474 friend class Assembler; |
| 475 friend class MacroAssembler; |
| 476 }; |
| 477 |
| 478 |
| 479 // Class MemOperand represents a memory operand in load and store instructions |
| 480 // On PowerPC we have base register + 16bit signed value |
| 481 // Alternatively we can have a 16bit signed value immediate |
| 482 class MemOperand BASE_EMBEDDED { |
| 483 public: |
| 484 explicit MemOperand(Register rn, int32_t offset = 0); |
| 485 |
| 486 explicit MemOperand(Register ra, Register rb); |
| 487 |
| 488 int32_t offset() const { |
| 489 DCHECK(rb_.is(no_reg)); |
| 490 return offset_; |
| 491 } |
| 492 |
| 493 // PowerPC - base register |
| 494 Register ra() const { |
| 495 DCHECK(!ra_.is(no_reg)); |
| 496 return ra_; |
| 497 } |
| 498 |
| 499 Register rb() const { |
| 500 DCHECK(offset_ == 0 && !rb_.is(no_reg)); |
| 501 return rb_; |
| 502 } |
| 503 |
| 504 private: |
| 505 Register ra_; // base |
| 506 int32_t offset_; // offset |
| 507 Register rb_; // index |
| 508 |
| 509 friend class Assembler; |
| 510 }; |
| 511 |
| 512 |
| 513 #if V8_OOL_CONSTANT_POOL |
| 514 // Class used to build a constant pool. |
| 515 class ConstantPoolBuilder BASE_EMBEDDED { |
| 516 public: |
| 517 ConstantPoolBuilder(); |
| 518 ConstantPoolArray::LayoutSection AddEntry(Assembler* assm, |
| 519 const RelocInfo& rinfo); |
| 520 void Relocate(intptr_t pc_delta); |
| 521 bool IsEmpty(); |
| 522 Handle<ConstantPoolArray> New(Isolate* isolate); |
| 523 void Populate(Assembler* assm, ConstantPoolArray* constant_pool); |
| 524 |
| 525 inline ConstantPoolArray::LayoutSection current_section() const { |
| 526 return current_section_; |
| 527 } |
| 528 |
| 529 // Rather than increasing the capacity of the ConstantPoolArray's |
| 530 // small section to match the longer (16-bit) reach of PPC's load |
| 531 // instruction (at the expense of a larger header to describe the |
| 532 // layout), the PPC implementation utilizes the extended section to |
| 533 // satisfy that reach. I.e. all entries (regardless of their |
| 534 // section) are reachable with a single load instruction. |
| 535 // |
| 536 // This implementation does not support an unlimited constant pool |
| 537 // size (which would require a multi-instruction sequence). [See |
| 538 // ARM commit e27ab337 for a reference on the changes required to |
| 539 // support the longer instruction sequence.] Note, however, that |
| 540 // going down that path will necessarily generate that longer |
| 541 // sequence for all extended section accesses since the placement of |
| 542 // a given entry within the section is not known at the time of |
| 543 // code generation. |
| 544 // |
| 545 // TODO(mbrandy): Determine whether there is a benefit to supporting |
| 546 // the longer sequence given that nops could be used for those |
| 547 // entries which are reachable with a single instruction. |
| 548 inline bool is_full() const { |
| 549 return !is_int16(size_); |
| 550 } |
| 551 |
| 552 inline ConstantPoolArray::NumberOfEntries* number_of_entries( |
| 553 ConstantPoolArray::LayoutSection section) { |
| 554 return &number_of_entries_[section]; |
| 555 } |
| 556 |
| 557 inline ConstantPoolArray::NumberOfEntries* small_entries() { |
| 558 return number_of_entries(ConstantPoolArray::SMALL_SECTION); |
| 559 } |
| 560 |
| 561 inline ConstantPoolArray::NumberOfEntries* extended_entries() { |
| 562 return number_of_entries(ConstantPoolArray::EXTENDED_SECTION); |
| 563 } |
| 564 |
| 565 private: |
| 566 struct ConstantPoolEntry { |
| 567 ConstantPoolEntry(RelocInfo rinfo, ConstantPoolArray::LayoutSection section, |
| 568 int merged_index) |
| 569 : rinfo_(rinfo), section_(section), merged_index_(merged_index) {} |
| 570 |
| 571 RelocInfo rinfo_; |
| 572 ConstantPoolArray::LayoutSection section_; |
| 573 int merged_index_; |
| 574 }; |
| 575 |
| 576 ConstantPoolArray::Type GetConstantPoolType(RelocInfo::Mode rmode); |
| 577 |
| 578 uint32_t size_; |
| 579 std::vector<ConstantPoolEntry> entries_; |
| 580 ConstantPoolArray::LayoutSection current_section_; |
| 581 ConstantPoolArray::NumberOfEntries number_of_entries_[2]; |
| 582 }; |
| 583 #endif |
| 584 |
| 585 |
| 586 class Assembler : public AssemblerBase { |
| 587 public: |
| 588 // Create an assembler. Instructions and relocation information are emitted |
| 589 // into a buffer, with the instructions starting from the beginning and the |
| 590 // relocation information starting from the end of the buffer. See CodeDesc |
| 591 // for a detailed comment on the layout (globals.h). |
| 592 // |
| 593 // If the provided buffer is NULL, the assembler allocates and grows its own |
| 594 // buffer, and buffer_size determines the initial buffer size. The buffer is |
| 595 // owned by the assembler and deallocated upon destruction of the assembler. |
| 596 // |
| 597 // If the provided buffer is not NULL, the assembler uses the provided buffer |
| 598 // for code generation and assumes its size to be buffer_size. If the buffer |
| 599 // is too small, a fatal error occurs. No deallocation of the buffer is done |
| 600 // upon destruction of the assembler. |
| 601 Assembler(Isolate* isolate, void* buffer, int buffer_size); |
| 602 virtual ~Assembler() { } |
| 603 |
| 604 // GetCode emits any pending (non-emitted) code and fills the descriptor |
| 605 // desc. GetCode() is idempotent; it returns the same result if no other |
| 606 // Assembler functions are invoked in between GetCode() calls. |
| 607 void GetCode(CodeDesc* desc); |
| 608 |
| 609 // Label operations & relative jumps (PPUM Appendix D) |
| 610 // |
| 611 // Takes a branch opcode (cc) and a label (L) and generates |
| 612 // either a backward branch or a forward branch and links it |
| 613 // to the label fixup chain. Usage: |
| 614 // |
| 615 // Label L; // unbound label |
| 616 // j(cc, &L); // forward branch to unbound label |
| 617 // bind(&L); // bind label to the current pc |
| 618 // j(cc, &L); // backward branch to bound label |
| 619 // bind(&L); // illegal: a label may be bound only once |
| 620 // |
| 621 // Note: The same Label can be used for forward and backward branches |
| 622 // but it may be bound only once. |
| 623 |
| 624 void bind(Label* L); // binds an unbound label L to the current code position |
| 625 // Determines if Label is bound and near enough so that a single |
| 626 // branch instruction can be used to reach it. |
| 627 bool is_near(Label* L, Condition cond); |
| 628 |
| 629 // Returns the branch offset to the given label from the current code position |
| 630 // Links the label to the current position if it is still unbound |
| 631 // Manages the jump elimination optimization if the second parameter is true. |
| 632 int branch_offset(Label* L, bool jump_elimination_allowed); |
| 633 |
| 634 // Puts a labels target address at the given position. |
| 635 // The high 8 bits are set to zero. |
| 636 void label_at_put(Label* L, int at_offset); |
| 637 |
| 638 #if V8_OOL_CONSTANT_POOL |
| 639 INLINE(static bool IsConstantPoolLoadStart(Address pc)); |
| 640 INLINE(static bool IsConstantPoolLoadEnd(Address pc)); |
| 641 INLINE(static int GetConstantPoolOffset(Address pc)); |
| 642 INLINE(static void SetConstantPoolOffset(Address pc, int offset)); |
| 643 |
| 644 // Return the address in the constant pool of the code target address used by |
| 645 // the branch/call instruction at pc, or the object in a mov. |
| 646 INLINE(static Address target_constant_pool_address_at( |
| 647 Address pc, ConstantPoolArray* constant_pool)); |
| 648 #endif |
| 649 |
| 650 // Read/Modify the code target address in the branch/call instruction at pc. |
| 651 INLINE(static Address target_address_at(Address pc, |
| 652 ConstantPoolArray* constant_pool)); |
| 653 INLINE(static void set_target_address_at(Address pc, |
| 654 ConstantPoolArray* constant_pool, |
| 655 Address target, |
| 656 ICacheFlushMode icache_flush_mode = |
| 657 FLUSH_ICACHE_IF_NEEDED)); |
| 658 INLINE(static Address target_address_at(Address pc, Code* code)) { |
| 659 ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL; |
| 660 return target_address_at(pc, constant_pool); |
| 661 } |
| 662 INLINE(static void set_target_address_at(Address pc, |
| 663 Code* code, |
| 664 Address target, |
| 665 ICacheFlushMode icache_flush_mode = |
| 666 FLUSH_ICACHE_IF_NEEDED)) { |
| 667 ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL; |
| 668 set_target_address_at(pc, constant_pool, target, icache_flush_mode); |
| 669 } |
| 670 |
| 671 // Return the code target address at a call site from the return address |
| 672 // of that call in the instruction stream. |
| 673 inline static Address target_address_from_return_address(Address pc); |
| 674 |
| 675 // Given the address of the beginning of a call, return the address |
| 676 // in the instruction stream that the call will return to. |
| 677 INLINE(static Address return_address_from_call_start(Address pc)); |
| 678 |
| 679 // Return the code target address of the patch debug break slot |
| 680 INLINE(static Address break_address_from_return_address(Address pc)); |
| 681 |
| 682 // This sets the branch destination. |
| 683 // This is for calls and branches within generated code. |
| 684 inline static void deserialization_set_special_target_at( |
| 685 Address instruction_payload, Code* code, Address target); |
| 686 |
| 687 // Size of an instruction. |
| 688 static const int kInstrSize = sizeof(Instr); |
| 689 |
| 690 // Here we are patching the address in the LUI/ORI instruction pair. |
| 691 // These values are used in the serialization process and must be zero for |
| 692 // PPC platform, as Code, Embedded Object or External-reference pointers |
| 693 // are split across two consecutive instructions and don't exist separately |
| 694 // in the code, so the serializer should not step forwards in memory after |
| 695 // a target is resolved and written. |
| 696 static const int kSpecialTargetSize = 0; |
| 697 |
| 698 // Number of instructions to load an address via a mov sequence. |
| 699 #if V8_TARGET_ARCH_PPC64 |
| 700 static const int kMovInstructionsConstantPool = 2; |
| 701 static const int kMovInstructionsNoConstantPool = 5; |
| 702 #else |
| 703 static const int kMovInstructionsConstantPool = 1; |
| 704 static const int kMovInstructionsNoConstantPool = 2; |
| 705 #endif |
| 706 #if V8_OOL_CONSTANT_POOL |
| 707 static const int kMovInstructions = kMovInstructionsConstantPool; |
| 708 #else |
| 709 static const int kMovInstructions = kMovInstructionsNoConstantPool; |
| 710 #endif |
| 711 |
| 712 // Distance between the instruction referring to the address of the call |
| 713 // target and the return address. |
| 714 |
| 715 // Call sequence is a FIXED_SEQUENCE: |
| 716 // mov r8, @ call address |
| 717 // mtlr r8 |
| 718 // blrl |
| 719 // @ return address |
| 720 static const int kCallTargetAddressOffset = |
| 721 (kMovInstructions + 2) * kInstrSize; |
| 722 |
| 723 // Distance between start of patched return sequence and the emitted address |
| 724 // to jump to. |
| 725 // Patched return sequence is a FIXED_SEQUENCE: |
| 726 // mov r0, <address> |
| 727 // mtlr r0 |
| 728 // blrl |
| 729 static const int kPatchReturnSequenceAddressOffset = 0 * kInstrSize; |
| 730 |
| 731 // Distance between start of patched debug break slot and the emitted address |
| 732 // to jump to. |
| 733 // Patched debug break slot code is a FIXED_SEQUENCE: |
| 734 // mov r0, <address> |
| 735 // mtlr r0 |
| 736 // blrl |
| 737 static const int kPatchDebugBreakSlotAddressOffset = 0 * kInstrSize; |
| 738 |
| 739 // This is the length of the BreakLocationIterator::SetDebugBreakAtReturn() |
| 740 // code patch FIXED_SEQUENCE |
| 741 static const int kJSReturnSequenceInstructions = |
| 742 kMovInstructionsNoConstantPool + 3; |
| 743 |
| 744 // This is the length of the code sequence from SetDebugBreakAtSlot() |
| 745 // FIXED_SEQUENCE |
| 746 static const int kDebugBreakSlotInstructions = |
| 747 kMovInstructionsNoConstantPool + 2; |
| 748 static const int kDebugBreakSlotLength = |
| 749 kDebugBreakSlotInstructions * kInstrSize; |
| 750 |
| 751 static inline int encode_crbit(const CRegister& cr, enum CRBit crbit) { |
| 752 return ((cr.code() * CRWIDTH) + crbit); |
| 753 } |
| 754 |
| 755 // --------------------------------------------------------------------------- |
| 756 // Code generation |
| 757 |
| 758 // Insert the smallest number of nop instructions |
| 759 // possible to align the pc offset to a multiple |
| 760 // of m. m must be a power of 2 (>= 4). |
| 761 void Align(int m); |
| 762 // Aligns code to something that's optimal for a jump target for the platform. |
| 763 void CodeTargetAlign(); |
| 764 |
| 765 // Branch instructions |
| 766 void bclr(BOfield bo, LKBit lk); |
| 767 void blr(); |
| 768 void bc(int branch_offset, BOfield bo, int condition_bit, LKBit lk = LeaveLK); |
| 769 void b(int branch_offset, LKBit lk); |
| 770 |
| 771 void bcctr(BOfield bo, LKBit lk); |
| 772 void bctr(); |
| 773 |
| 774 // Convenience branch instructions using labels |
| 775 void b(Label* L, LKBit lk = LeaveLK) { |
| 776 b(branch_offset(L, false), lk); |
| 777 } |
| 778 |
| 779 void bc_short(Condition cond, Label* L, CRegister cr = cr7, |
| 780 LKBit lk = LeaveLK) { |
| 781 DCHECK(cond != al); |
| 782 DCHECK(cr.code() >= 0 && cr.code() <= 7); |
| 783 |
| 784 int b_offset = branch_offset(L, false); |
| 785 |
| 786 switch (cond) { |
| 787 case eq: |
| 788 bc(b_offset, BT, encode_crbit(cr, CR_EQ), lk); |
| 789 break; |
| 790 case ne: |
| 791 bc(b_offset, BF, encode_crbit(cr, CR_EQ), lk); |
| 792 break; |
| 793 case gt: |
| 794 bc(b_offset, BT, encode_crbit(cr, CR_GT), lk); |
| 795 break; |
| 796 case le: |
| 797 bc(b_offset, BF, encode_crbit(cr, CR_GT), lk); |
| 798 break; |
| 799 case lt: |
| 800 bc(b_offset, BT, encode_crbit(cr, CR_LT), lk); |
| 801 break; |
| 802 case ge: |
| 803 bc(b_offset, BF, encode_crbit(cr, CR_LT), lk); |
| 804 break; |
| 805 case unordered: |
| 806 bc(b_offset, BT, encode_crbit(cr, CR_FU), lk); |
| 807 break; |
| 808 case ordered: |
| 809 bc(b_offset, BF, encode_crbit(cr, CR_FU), lk); |
| 810 break; |
| 811 case overflow: |
| 812 bc(b_offset, BT, encode_crbit(cr, CR_SO), lk); |
| 813 break; |
| 814 case nooverflow: |
| 815 bc(b_offset, BF, encode_crbit(cr, CR_SO), lk); |
| 816 break; |
| 817 default: |
| 818 UNIMPLEMENTED(); |
| 819 } |
| 820 } |
| 821 |
| 822 void b(Condition cond, Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) { |
| 823 if (cond == al) { |
| 824 b(L, lk); |
| 825 return; |
| 826 } |
| 827 |
| 828 if ((L->is_bound() && is_near(L, cond)) || |
| 829 !is_trampoline_emitted()) { |
| 830 bc_short(cond, L, cr, lk); |
| 831 return; |
| 832 } |
| 833 |
| 834 Label skip; |
| 835 Condition neg_cond = NegateCondition(cond); |
| 836 bc_short(neg_cond, &skip, cr); |
| 837 b(L, lk); |
| 838 bind(&skip); |
| 839 } |
| 840 |
| 841 void bne(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) { |
| 842 b(ne, L, cr, lk); } |
| 843 void beq(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) { |
| 844 b(eq, L, cr, lk); } |
| 845 void blt(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) { |
| 846 b(lt, L, cr, lk); } |
| 847 void bge(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) { |
| 848 b(ge, L, cr, lk); } |
| 849 void ble(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) { |
| 850 b(le, L, cr, lk); } |
| 851 void bgt(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) { |
| 852 b(gt, L, cr, lk); } |
| 853 void bunordered(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) { |
| 854 b(unordered, L, cr, lk); } |
| 855 void bordered(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) { |
| 856 b(ordered, L, cr, lk); } |
| 857 void boverflow(Label* L, CRegister cr = cr0, LKBit lk = LeaveLK) { |
| 858 b(overflow, L, cr, lk); } |
| 859 void bnooverflow(Label* L, CRegister cr = cr0, LKBit lk = LeaveLK) { |
| 860 b(nooverflow, L, cr, lk); } |
| 861 |
| 862 // Decrement CTR; branch if CTR != 0 |
| 863 void bdnz(Label* L, LKBit lk = LeaveLK) { |
| 864 bc(branch_offset(L, false), DCBNZ, 0, lk); |
| 865 } |
| 866 |
| 867 // Data-processing instructions |
| 868 |
| 869 void sub(Register dst, Register src1, Register src2, |
| 870 OEBit s = LeaveOE, RCBit r = LeaveRC); |
| 871 |
| 872 void subfic(Register dst, Register src, const Operand& imm); |
| 873 |
| 874 void subfc(Register dst, Register src1, Register src2, |
| 875 OEBit s = LeaveOE, RCBit r = LeaveRC); |
| 876 |
| 877 void add(Register dst, Register src1, Register src2, |
| 878 OEBit s = LeaveOE, RCBit r = LeaveRC); |
| 879 |
| 880 void addc(Register dst, Register src1, Register src2, |
| 881 OEBit o = LeaveOE, RCBit r = LeaveRC); |
| 882 |
| 883 void addze(Register dst, Register src1, OEBit o, RCBit r); |
| 884 |
| 885 void mullw(Register dst, Register src1, Register src2, |
| 886 OEBit o = LeaveOE, RCBit r = LeaveRC); |
| 887 |
| 888 void mulhw(Register dst, Register src1, Register src2, |
| 889 OEBit o = LeaveOE, RCBit r = LeaveRC); |
| 890 |
| 891 void divw(Register dst, Register src1, Register src2, |
| 892 OEBit o = LeaveOE, RCBit r = LeaveRC); |
| 893 |
| 894 void addi(Register dst, Register src, const Operand& imm); |
| 895 void addis(Register dst, Register src, const Operand& imm); |
| 896 void addic(Register dst, Register src, const Operand& imm); |
| 897 |
| 898 void and_(Register dst, Register src1, Register src2, RCBit rc = LeaveRC); |
| 899 void andc(Register dst, Register src1, Register src2, RCBit rc = LeaveRC); |
| 900 void andi(Register ra, Register rs, const Operand& imm); |
| 901 void andis(Register ra, Register rs, const Operand& imm); |
| 902 void nor(Register dst, Register src1, Register src2, RCBit r = LeaveRC); |
| 903 void notx(Register dst, Register src, RCBit r = LeaveRC); |
| 904 void ori(Register dst, Register src, const Operand& imm); |
| 905 void oris(Register dst, Register src, const Operand& imm); |
| 906 void orx(Register dst, Register src1, Register src2, RCBit rc = LeaveRC); |
| 907 void xori(Register dst, Register src, const Operand& imm); |
| 908 void xoris(Register ra, Register rs, const Operand& imm); |
| 909 void xor_(Register dst, Register src1, Register src2, RCBit rc = LeaveRC); |
| 910 void cmpi(Register src1, const Operand& src2, CRegister cr = cr7); |
| 911 void cmpli(Register src1, const Operand& src2, CRegister cr = cr7); |
| 912 void cmpwi(Register src1, const Operand& src2, CRegister cr = cr7); |
| 913 void cmplwi(Register src1, const Operand& src2, CRegister cr = cr7); |
| 914 void li(Register dst, const Operand& src); |
| 915 void lis(Register dst, const Operand& imm); |
| 916 void mr(Register dst, Register src); |
| 917 |
| 918 void lbz(Register dst, const MemOperand& src); |
| 919 void lbzx(Register dst, const MemOperand& src); |
| 920 void lbzux(Register dst, const MemOperand& src); |
| 921 void lhz(Register dst, const MemOperand& src); |
| 922 void lhzx(Register dst, const MemOperand& src); |
| 923 void lhzux(Register dst, const MemOperand& src); |
| 924 void lwz(Register dst, const MemOperand& src); |
| 925 void lwzu(Register dst, const MemOperand& src); |
| 926 void lwzx(Register dst, const MemOperand& src); |
| 927 void lwzux(Register dst, const MemOperand& src); |
| 928 void lwa(Register dst, const MemOperand& src); |
| 929 void stb(Register dst, const MemOperand& src); |
| 930 void stbx(Register dst, const MemOperand& src); |
| 931 void stbux(Register dst, const MemOperand& src); |
| 932 void sth(Register dst, const MemOperand& src); |
| 933 void sthx(Register dst, const MemOperand& src); |
| 934 void sthux(Register dst, const MemOperand& src); |
| 935 void stw(Register dst, const MemOperand& src); |
| 936 void stwu(Register dst, const MemOperand& src); |
| 937 void stwx(Register rs, const MemOperand& src); |
| 938 void stwux(Register rs, const MemOperand& src); |
| 939 |
| 940 void extsb(Register rs, Register ra, RCBit r = LeaveRC); |
| 941 void extsh(Register rs, Register ra, RCBit r = LeaveRC); |
| 942 |
| 943 void neg(Register rt, Register ra, OEBit o = LeaveOE, RCBit c = LeaveRC); |
| 944 |
| 945 #if V8_TARGET_ARCH_PPC64 |
| 946 void ld(Register rd, const MemOperand &src); |
| 947 void ldx(Register rd, const MemOperand &src); |
| 948 void ldu(Register rd, const MemOperand &src); |
| 949 void ldux(Register rd, const MemOperand &src); |
| 950 void std(Register rs, const MemOperand &src); |
| 951 void stdx(Register rs, const MemOperand &src); |
| 952 void stdu(Register rs, const MemOperand &src); |
| 953 void stdux(Register rs, const MemOperand &src); |
| 954 void rldic(Register dst, Register src, int sh, int mb, RCBit r = LeaveRC); |
| 955 void rldicl(Register dst, Register src, int sh, int mb, RCBit r = LeaveRC); |
| 956 void rldcl(Register ra, Register rs, Register rb, int mb, RCBit r = LeaveRC); |
| 957 void rldicr(Register dst, Register src, int sh, int me, RCBit r = LeaveRC); |
| 958 void rldimi(Register dst, Register src, int sh, int mb, RCBit r = LeaveRC); |
| 959 void sldi(Register dst, Register src, const Operand& val, RCBit rc = LeaveRC); |
| 960 void srdi(Register dst, Register src, const Operand& val, RCBit rc = LeaveRC); |
| 961 void clrrdi(Register dst, Register src, const Operand& val, |
| 962 RCBit rc = LeaveRC); |
| 963 void clrldi(Register dst, Register src, const Operand& val, |
| 964 RCBit rc = LeaveRC); |
| 965 void sradi(Register ra, Register rs, int sh, RCBit r = LeaveRC); |
| 966 void srd(Register dst, Register src1, Register src2, RCBit r = LeaveRC); |
| 967 void sld(Register dst, Register src1, Register src2, RCBit r = LeaveRC); |
| 968 void srad(Register dst, Register src1, Register src2, RCBit r = LeaveRC); |
| 969 void rotld(Register ra, Register rs, Register rb, RCBit r = LeaveRC); |
| 970 void rotldi(Register ra, Register rs, int sh, RCBit r = LeaveRC); |
| 971 void rotrdi(Register ra, Register rs, int sh, RCBit r = LeaveRC); |
| 972 void cntlzd_(Register dst, Register src, RCBit rc = LeaveRC); |
| 973 void extsw(Register rs, Register ra, RCBit r = LeaveRC); |
| 974 void mulld(Register dst, Register src1, Register src2, |
| 975 OEBit o = LeaveOE, RCBit r = LeaveRC); |
| 976 void divd(Register dst, Register src1, Register src2, |
| 977 OEBit o = LeaveOE, RCBit r = LeaveRC); |
| 978 #endif |
| 979 |
| 980 void rlwinm(Register ra, Register rs, int sh, int mb, int me, |
| 981 RCBit rc = LeaveRC); |
| 982 void rlwimi(Register ra, Register rs, int sh, int mb, int me, |
| 983 RCBit rc = LeaveRC); |
| 984 void rlwnm(Register ra, Register rs, Register rb, int mb, int me, |
| 985 RCBit rc = LeaveRC); |
| 986 void slwi(Register dst, Register src, const Operand& val, RCBit rc = LeaveRC); |
| 987 void srwi(Register dst, Register src, const Operand& val, RCBit rc = LeaveRC); |
| 988 void clrrwi(Register dst, Register src, const Operand& val, |
| 989 RCBit rc = LeaveRC); |
| 990 void clrlwi(Register dst, Register src, const Operand& val, |
| 991 RCBit rc = LeaveRC); |
| 992 void srawi(Register ra, Register rs, int sh, RCBit r = LeaveRC); |
| 993 void srw(Register dst, Register src1, Register src2, RCBit r = LeaveRC); |
| 994 void slw(Register dst, Register src1, Register src2, RCBit r = LeaveRC); |
| 995 void sraw(Register dst, Register src1, Register src2, RCBit r = LeaveRC); |
| 996 void rotlw(Register ra, Register rs, Register rb, RCBit r = LeaveRC); |
| 997 void rotlwi(Register ra, Register rs, int sh, RCBit r = LeaveRC); |
| 998 void rotrwi(Register ra, Register rs, int sh, RCBit r = LeaveRC); |
| 999 |
| 1000 void cntlzw_(Register dst, Register src, RCBit rc = LeaveRC); |
| 1001 |
| 1002 void subi(Register dst, Register src1, const Operand& src2); |
| 1003 |
| 1004 void cmp(Register src1, Register src2, CRegister cr = cr7); |
| 1005 void cmpl(Register src1, Register src2, CRegister cr = cr7); |
| 1006 void cmpw(Register src1, Register src2, CRegister cr = cr7); |
| 1007 void cmplw(Register src1, Register src2, CRegister cr = cr7); |
| 1008 |
| 1009 void mov(Register dst, const Operand& src); |
| 1010 |
| 1011 // Load the position of the label relative to the generated code object |
| 1012 // pointer in a register. |
| 1013 void mov_label_offset(Register dst, Label* label); |
| 1014 |
| 1015 // Multiply instructions |
| 1016 void mul(Register dst, Register src1, Register src2, |
| 1017 OEBit s = LeaveOE, RCBit r = LeaveRC); |
| 1018 |
| 1019 // Miscellaneous arithmetic instructions |
| 1020 |
| 1021 // Special register access |
| 1022 void crxor(int bt, int ba, int bb); |
| 1023 void crclr(int bt) { crxor(bt, bt, bt); } |
| 1024 void creqv(int bt, int ba, int bb); |
| 1025 void crset(int bt) { creqv(bt, bt, bt); } |
| 1026 void mflr(Register dst); |
| 1027 void mtlr(Register src); |
| 1028 void mtctr(Register src); |
| 1029 void mtxer(Register src); |
| 1030 void mcrfs(int bf, int bfa); |
| 1031 void mfcr(Register dst); |
| 1032 #if V8_TARGET_ARCH_PPC64 |
| 1033 void mffprd(Register dst, DoubleRegister src); |
| 1034 void mffprwz(Register dst, DoubleRegister src); |
| 1035 void mtfprd(DoubleRegister dst, Register src); |
| 1036 void mtfprwz(DoubleRegister dst, Register src); |
| 1037 void mtfprwa(DoubleRegister dst, Register src); |
| 1038 #endif |
| 1039 |
| 1040 void fake_asm(enum FAKE_OPCODE_T fopcode); |
| 1041 void marker_asm(int mcode); |
| 1042 void function_descriptor(); |
| 1043 |
| 1044 // Exception-generating instructions and debugging support |
| 1045 void stop(const char* msg, |
| 1046 Condition cond = al, |
| 1047 int32_t code = kDefaultStopCode, |
| 1048 CRegister cr = cr7); |
| 1049 |
| 1050 void bkpt(uint32_t imm16); // v5 and above |
| 1051 |
| 1052 // Informational messages when simulating |
| 1053 void info(const char* msg, |
| 1054 Condition cond = al, |
| 1055 int32_t code = kDefaultStopCode, |
| 1056 CRegister cr = cr7); |
| 1057 |
| 1058 void dcbf(Register ra, Register rb); |
| 1059 void sync(); |
| 1060 void icbi(Register ra, Register rb); |
| 1061 void isync(); |
| 1062 |
| 1063 // Support for floating point |
| 1064 void lfd(const DoubleRegister frt, const MemOperand& src); |
| 1065 void lfdu(const DoubleRegister frt, const MemOperand& src); |
| 1066 void lfdx(const DoubleRegister frt, const MemOperand& src); |
| 1067 void lfdux(const DoubleRegister frt, const MemOperand& src); |
| 1068 void lfs(const DoubleRegister frt, const MemOperand& src); |
| 1069 void lfsu(const DoubleRegister frt, const MemOperand& src); |
| 1070 void lfsx(const DoubleRegister frt, const MemOperand& src); |
| 1071 void lfsux(const DoubleRegister frt, const MemOperand& src); |
| 1072 void stfd(const DoubleRegister frs, const MemOperand& src); |
| 1073 void stfdu(const DoubleRegister frs, const MemOperand& src); |
| 1074 void stfdx(const DoubleRegister frs, const MemOperand& src); |
| 1075 void stfdux(const DoubleRegister frs, const MemOperand& src); |
| 1076 void stfs(const DoubleRegister frs, const MemOperand& src); |
| 1077 void stfsu(const DoubleRegister frs, const MemOperand& src); |
| 1078 void stfsx(const DoubleRegister frs, const MemOperand& src); |
| 1079 void stfsux(const DoubleRegister frs, const MemOperand& src); |
| 1080 |
| 1081 void fadd(const DoubleRegister frt, const DoubleRegister fra, |
| 1082 const DoubleRegister frb, RCBit rc = LeaveRC); |
| 1083 void fsub(const DoubleRegister frt, const DoubleRegister fra, |
| 1084 const DoubleRegister frb, RCBit rc = LeaveRC); |
| 1085 void fdiv(const DoubleRegister frt, const DoubleRegister fra, |
| 1086 const DoubleRegister frb, RCBit rc = LeaveRC); |
| 1087 void fmul(const DoubleRegister frt, const DoubleRegister fra, |
| 1088 const DoubleRegister frc, RCBit rc = LeaveRC); |
| 1089 void fcmpu(const DoubleRegister fra, const DoubleRegister frb, |
| 1090 CRegister cr = cr7); |
| 1091 void fmr(const DoubleRegister frt, const DoubleRegister frb, |
| 1092 RCBit rc = LeaveRC); |
| 1093 void fctiwz(const DoubleRegister frt, const DoubleRegister frb); |
| 1094 void fctiw(const DoubleRegister frt, const DoubleRegister frb); |
| 1095 void frim(const DoubleRegister frt, const DoubleRegister frb); |
| 1096 void frsp(const DoubleRegister frt, const DoubleRegister frb, |
| 1097 RCBit rc = LeaveRC); |
| 1098 void fcfid(const DoubleRegister frt, const DoubleRegister frb, |
| 1099 RCBit rc = LeaveRC); |
| 1100 void fctid(const DoubleRegister frt, const DoubleRegister frb, |
| 1101 RCBit rc = LeaveRC); |
| 1102 void fctidz(const DoubleRegister frt, const DoubleRegister frb, |
| 1103 RCBit rc = LeaveRC); |
| 1104 void fsel(const DoubleRegister frt, const DoubleRegister fra, |
| 1105 const DoubleRegister frc, const DoubleRegister frb, |
| 1106 RCBit rc = LeaveRC); |
| 1107 void fneg(const DoubleRegister frt, const DoubleRegister frb, |
| 1108 RCBit rc = LeaveRC); |
| 1109 void mtfsfi(int bf, int immediate, RCBit rc = LeaveRC); |
| 1110 void mffs(const DoubleRegister frt, RCBit rc = LeaveRC); |
| 1111 void mtfsf(const DoubleRegister frb, bool L = 1, int FLM = 0, bool W = 0, |
| 1112 RCBit rc = LeaveRC); |
| 1113 void fsqrt(const DoubleRegister frt, const DoubleRegister frb, |
| 1114 RCBit rc = LeaveRC); |
| 1115 void fabs(const DoubleRegister frt, const DoubleRegister frb, |
| 1116 RCBit rc = LeaveRC); |
| 1117 void fmadd(const DoubleRegister frt, const DoubleRegister fra, |
| 1118 const DoubleRegister frc, const DoubleRegister frb, |
| 1119 RCBit rc = LeaveRC); |
| 1120 void fmsub(const DoubleRegister frt, const DoubleRegister fra, |
| 1121 const DoubleRegister frc, const DoubleRegister frb, |
| 1122 RCBit rc = LeaveRC); |
| 1123 |
| 1124 // Pseudo instructions |
| 1125 |
| 1126 // Different nop operations are used by the code generator to detect certain |
| 1127 // states of the generated code. |
| 1128 enum NopMarkerTypes { |
| 1129 NON_MARKING_NOP = 0, |
| 1130 DEBUG_BREAK_NOP, |
| 1131 // IC markers. |
| 1132 PROPERTY_ACCESS_INLINED, |
| 1133 PROPERTY_ACCESS_INLINED_CONTEXT, |
| 1134 PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE, |
| 1135 // Helper values. |
| 1136 LAST_CODE_MARKER, |
| 1137 FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED |
| 1138 }; |
| 1139 |
| 1140 void nop(int type = 0); // 0 is the default non-marking type. |
| 1141 |
| 1142 void push(Register src) { |
| 1143 #if V8_TARGET_ARCH_PPC64 |
| 1144 stdu(src, MemOperand(sp, -8)); |
| 1145 #else |
| 1146 stwu(src, MemOperand(sp, -4)); |
| 1147 #endif |
| 1148 } |
| 1149 |
| 1150 void pop(Register dst) { |
| 1151 #if V8_TARGET_ARCH_PPC64 |
| 1152 ld(dst, MemOperand(sp)); |
| 1153 addi(sp, sp, Operand(8)); |
| 1154 #else |
| 1155 lwz(dst, MemOperand(sp)); |
| 1156 addi(sp, sp, Operand(4)); |
| 1157 #endif |
| 1158 } |
| 1159 |
| 1160 void pop() { |
| 1161 addi(sp, sp, Operand(kPointerSize)); |
| 1162 } |
| 1163 |
| 1164 // Jump unconditionally to given label. |
| 1165 void jmp(Label* L) { b(L); } |
| 1166 |
| 1167 // Check the code size generated from label to here. |
| 1168 int SizeOfCodeGeneratedSince(Label* label) { |
| 1169 return pc_offset() - label->pos(); |
| 1170 } |
| 1171 |
| 1172 // Check the number of instructions generated from label to here. |
| 1173 int InstructionsGeneratedSince(Label* label) { |
| 1174 return SizeOfCodeGeneratedSince(label) / kInstrSize; |
| 1175 } |
| 1176 |
| 1177 // Class for scoping postponing the trampoline pool generation. |
| 1178 class BlockTrampolinePoolScope { |
| 1179 public: |
| 1180 explicit BlockTrampolinePoolScope(Assembler* assem) : assem_(assem) { |
| 1181 assem_->StartBlockTrampolinePool(); |
| 1182 } |
| 1183 ~BlockTrampolinePoolScope() { |
| 1184 assem_->EndBlockTrampolinePool(); |
| 1185 } |
| 1186 |
| 1187 private: |
| 1188 Assembler* assem_; |
| 1189 |
| 1190 DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope); |
| 1191 }; |
| 1192 |
| 1193 // Debugging |
| 1194 |
| 1195 // Mark address of the ExitJSFrame code. |
| 1196 void RecordJSReturn(); |
| 1197 |
| 1198 // Mark address of a debug break slot. |
| 1199 void RecordDebugBreakSlot(); |
| 1200 |
| 1201 // Record the AST id of the CallIC being compiled, so that it can be placed |
| 1202 // in the relocation information. |
| 1203 void SetRecordedAstId(TypeFeedbackId ast_id) { |
| 1204 // PPC - this shouldn't be failing roohack DCHECK(recorded_ast_id_.IsNone()); |
| 1205 recorded_ast_id_ = ast_id; |
| 1206 } |
| 1207 |
| 1208 TypeFeedbackId RecordedAstId() { |
| 1209 // roohack - another issue??? DCHECK(!recorded_ast_id_.IsNone()); |
| 1210 return recorded_ast_id_; |
| 1211 } |
| 1212 |
| 1213 void ClearRecordedAstId() { recorded_ast_id_ = TypeFeedbackId::None(); } |
| 1214 |
| 1215 // Record a comment relocation entry that can be used by a disassembler. |
| 1216 // Use --code-comments to enable. |
| 1217 void RecordComment(const char* msg); |
| 1218 |
| 1219 // Writes a single byte or word of data in the code stream. Used |
| 1220 // for inline tables, e.g., jump-tables. |
| 1221 void db(uint8_t data); |
| 1222 void dd(uint32_t data); |
| 1223 void emit_ptr(uintptr_t data); |
| 1224 |
| 1225 PositionsRecorder* positions_recorder() { return &positions_recorder_; } |
| 1226 |
| 1227 // Read/patch instructions |
| 1228 Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); } |
| 1229 void instr_at_put(int pos, Instr instr) { |
| 1230 *reinterpret_cast<Instr*>(buffer_ + pos) = instr; |
| 1231 } |
| 1232 static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); } |
| 1233 static void instr_at_put(byte* pc, Instr instr) { |
| 1234 *reinterpret_cast<Instr*>(pc) = instr; |
| 1235 } |
| 1236 static Condition GetCondition(Instr instr); |
| 1237 |
| 1238 static bool IsLis(Instr instr); |
| 1239 static bool IsLi(Instr instr); |
| 1240 static bool IsAddic(Instr instr); |
| 1241 static bool IsOri(Instr instr); |
| 1242 |
| 1243 static bool IsBranch(Instr instr); |
| 1244 static Register GetRA(Instr instr); |
| 1245 static Register GetRB(Instr instr); |
| 1246 #if V8_TARGET_ARCH_PPC64 |
| 1247 static bool Is64BitLoadIntoR12(Instr instr1, Instr instr2, |
| 1248 Instr instr3, Instr instr4, Instr instr5); |
| 1249 #else |
| 1250 static bool Is32BitLoadIntoR12(Instr instr1, Instr instr2); |
| 1251 #endif |
| 1252 |
| 1253 static bool IsCmpRegister(Instr instr); |
| 1254 static bool IsCmpImmediate(Instr instr); |
| 1255 static bool IsRlwinm(Instr instr); |
| 1256 #if V8_TARGET_ARCH_PPC64 |
| 1257 static bool IsRldicl(Instr instr); |
| 1258 #endif |
| 1259 static bool IsCrSet(Instr instr); |
| 1260 static Register GetCmpImmediateRegister(Instr instr); |
| 1261 static int GetCmpImmediateRawImmediate(Instr instr); |
| 1262 static bool IsNop(Instr instr, int type = NON_MARKING_NOP); |
| 1263 |
| 1264 // Postpone the generation of the trampoline pool for the specified number of |
| 1265 // instructions. |
| 1266 void BlockTrampolinePoolFor(int instructions); |
| 1267 void CheckTrampolinePool(); |
| 1268 |
| 1269 int instructions_required_for_mov(const Operand& x) const; |
| 1270 |
| 1271 #if V8_OOL_CONSTANT_POOL |
| 1272 // Decide between using the constant pool vs. a mov immediate sequence. |
| 1273 bool use_constant_pool_for_mov(const Operand& x, bool canOptimize) const; |
| 1274 |
| 1275 // The code currently calls CheckBuffer() too often. This has the side |
| 1276 // effect of randomly growing the buffer in the middle of multi-instruction |
| 1277 // sequences. |
| 1278 // MacroAssembler::LoadConstantPoolPointerRegister() includes a relocation |
| 1279 // and multiple instructions. We cannot grow the buffer until the |
| 1280 // relocation and all of the instructions are written. |
| 1281 // |
| 1282 // This function allows outside callers to check and grow the buffer |
| 1283 void EnsureSpaceFor(int space_needed); |
| 1284 #endif |
| 1285 |
| 1286 // Allocate a constant pool of the correct size for the generated code. |
| 1287 Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate); |
| 1288 |
| 1289 // Generate the constant pool for the generated code. |
| 1290 void PopulateConstantPool(ConstantPoolArray* constant_pool); |
| 1291 |
| 1292 #if V8_OOL_CONSTANT_POOL |
| 1293 bool is_constant_pool_available() const { return constant_pool_available_; } |
| 1294 |
| 1295 bool is_constant_pool_full() const { |
| 1296 return constant_pool_builder_.is_full(); |
| 1297 } |
| 1298 |
| 1299 bool use_extended_constant_pool() const { |
| 1300 return constant_pool_builder_.current_section() == |
| 1301 ConstantPoolArray::EXTENDED_SECTION; |
| 1302 } |
| 1303 #endif |
| 1304 |
| 1305 #if ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL |
| 1306 static void RelocateInternalReference(Address pc, intptr_t delta, |
| 1307 Address code_start, |
| 1308 ICacheFlushMode icache_flush_mode = |
| 1309 FLUSH_ICACHE_IF_NEEDED); |
| 1310 static int DecodeInternalReference(Vector<char> buffer, Address pc); |
| 1311 #endif |
| 1312 |
| 1313 protected: |
| 1314 // Relocation for a type-recording IC has the AST id added to it. This |
| 1315 // member variable is a way to pass the information from the call site to |
| 1316 // the relocation info. |
| 1317 TypeFeedbackId recorded_ast_id_; |
| 1318 |
| 1319 int buffer_space() const { return reloc_info_writer.pos() - pc_; } |
| 1320 |
| 1321 // Decode branch instruction at pos and return branch target pos |
| 1322 int target_at(int pos); |
| 1323 |
| 1324 // Patch branch instruction at pos to branch to given branch target pos |
| 1325 void target_at_put(int pos, int target_pos); |
| 1326 |
| 1327 // Record reloc info for current pc_ |
| 1328 void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0); |
| 1329 void RecordRelocInfo(const RelocInfo& rinfo); |
| 1330 #if V8_OOL_CONSTANT_POOL |
| 1331 ConstantPoolArray::LayoutSection ConstantPoolAddEntry( |
| 1332 const RelocInfo& rinfo) { |
| 1333 return constant_pool_builder_.AddEntry(this, rinfo); |
| 1334 } |
| 1335 #endif |
| 1336 |
| 1337 // Block the emission of the trampoline pool before pc_offset. |
| 1338 void BlockTrampolinePoolBefore(int pc_offset) { |
| 1339 if (no_trampoline_pool_before_ < pc_offset) |
| 1340 no_trampoline_pool_before_ = pc_offset; |
| 1341 } |
| 1342 |
| 1343 void StartBlockTrampolinePool() { |
| 1344 trampoline_pool_blocked_nesting_++; |
| 1345 } |
| 1346 |
| 1347 void EndBlockTrampolinePool() { |
| 1348 trampoline_pool_blocked_nesting_--; |
| 1349 } |
| 1350 |
| 1351 bool is_trampoline_pool_blocked() const { |
| 1352 return trampoline_pool_blocked_nesting_ > 0; |
| 1353 } |
| 1354 |
| 1355 bool has_exception() const { |
| 1356 return internal_trampoline_exception_; |
| 1357 } |
| 1358 |
| 1359 bool is_trampoline_emitted() const { |
| 1360 return trampoline_emitted_; |
| 1361 } |
| 1362 |
| 1363 #if V8_OOL_CONSTANT_POOL |
| 1364 void set_constant_pool_available(bool available) { |
| 1365 constant_pool_available_ = available; |
| 1366 } |
| 1367 #endif |
| 1368 |
| 1369 private: |
| 1370 // Code generation |
| 1371 // The relocation writer's position is at least kGap bytes below the end of |
| 1372 // the generated instructions. This is so that multi-instruction sequences do |
| 1373 // not have to check for overflow. The same is true for writes of large |
| 1374 // relocation info entries. |
| 1375 static const int kGap = 32; |
| 1376 |
| 1377 // Repeated checking whether the trampoline pool should be emitted is rather |
| 1378 // expensive. By default we only check again once a number of instructions |
| 1379 // has been generated. |
| 1380 int next_buffer_check_; // pc offset of next buffer check. |
| 1381 |
| 1382 // Emission of the trampoline pool may be blocked in some code sequences. |
| 1383 int trampoline_pool_blocked_nesting_; // Block emission if this is not zero. |
| 1384 int no_trampoline_pool_before_; // Block emission before this pc offset. |
| 1385 |
| 1386 // Relocation info generation |
| 1387 // Each relocation is encoded as a variable size value |
| 1388 static const int kMaxRelocSize = RelocInfoWriter::kMaxSize; |
| 1389 RelocInfoWriter reloc_info_writer; |
| 1390 |
| 1391 // The bound position, before this we cannot do instruction elimination. |
| 1392 int last_bound_pos_; |
| 1393 |
| 1394 #if V8_OOL_CONSTANT_POOL |
| 1395 ConstantPoolBuilder constant_pool_builder_; |
| 1396 |
| 1397 // Indicates whether the constant pool can be accessed, which is only possible |
| 1398 // if kConstantPoolRegister points to the current code object's constant pool. |
| 1399 bool constant_pool_available_; |
| 1400 #endif |
| 1401 |
| 1402 // Code emission |
| 1403 inline void CheckBuffer(); |
| 1404 void GrowBuffer(); |
| 1405 inline void emit(Instr x); |
| 1406 inline void CheckTrampolinePoolQuick(); |
| 1407 |
| 1408 // Instruction generation |
| 1409 void a_form(Instr instr, DoubleRegister frt, DoubleRegister fra, |
| 1410 DoubleRegister frb, RCBit r); |
| 1411 void d_form(Instr instr, Register rt, Register ra, const intptr_t val, |
| 1412 bool signed_disp); |
| 1413 void x_form(Instr instr, Register ra, Register rs, Register rb, RCBit r); |
| 1414 void xo_form(Instr instr, Register rt, Register ra, Register rb, |
| 1415 OEBit o, RCBit r); |
| 1416 void md_form(Instr instr, Register ra, Register rs, int shift, int maskbit, |
| 1417 RCBit r); |
| 1418 void mds_form(Instr instr, Register ra, Register rs, Register rb, int maskbit, |
| 1419 RCBit r); |
| 1420 |
| 1421 // Labels |
| 1422 void print(Label* L); |
| 1423 int max_reach_from(int pos); |
| 1424 void bind_to(Label* L, int pos); |
| 1425 void next(Label* L); |
| 1426 |
| 1427 class Trampoline { |
| 1428 public: |
| 1429 Trampoline() { |
| 1430 next_slot_ = 0; |
| 1431 free_slot_count_ = 0; |
| 1432 } |
| 1433 Trampoline(int start, int slot_count) { |
| 1434 next_slot_ = start; |
| 1435 free_slot_count_ = slot_count; |
| 1436 } |
| 1437 int take_slot() { |
| 1438 int trampoline_slot = kInvalidSlotPos; |
| 1439 if (free_slot_count_ <= 0) { |
| 1440 // We have run out of space on trampolines. |
| 1441 // Make sure we fail in debug mode, so we become aware of each case |
| 1442 // when this happens. |
| 1443 DCHECK(0); |
| 1444 // Internal exception will be caught. |
| 1445 } else { |
| 1446 trampoline_slot = next_slot_; |
| 1447 free_slot_count_--; |
| 1448 next_slot_ += kTrampolineSlotsSize; |
| 1449 } |
| 1450 return trampoline_slot; |
| 1451 } |
| 1452 |
| 1453 private: |
| 1454 int next_slot_; |
| 1455 int free_slot_count_; |
| 1456 }; |
| 1457 |
| 1458 int32_t get_trampoline_entry(); |
| 1459 int unbound_labels_count_; |
| 1460 // If trampoline is emitted, generated code is becoming large. As |
| 1461 // this is already a slow case which can possibly break our code |
| 1462 // generation for the extreme case, we use this information to |
| 1463 // trigger different mode of branch instruction generation, where we |
| 1464 // no longer use a single branch instruction. |
| 1465 bool trampoline_emitted_; |
| 1466 static const int kTrampolineSlotsSize = kInstrSize; |
| 1467 static const int kMaxCondBranchReach = (1 << (16 - 1)) - 1; |
| 1468 static const int kMaxBlockTrampolineSectionSize = 64 * kInstrSize; |
| 1469 static const int kInvalidSlotPos = -1; |
| 1470 |
| 1471 Trampoline trampoline_; |
| 1472 bool internal_trampoline_exception_; |
| 1473 |
| 1474 friend class RegExpMacroAssemblerPPC; |
| 1475 friend class RelocInfo; |
| 1476 friend class CodePatcher; |
| 1477 friend class BlockTrampolinePoolScope; |
| 1478 #if V8_OOL_CONSTANT_POOL |
| 1479 friend class FrameAndConstantPoolScope; |
| 1480 friend class ConstantPoolUnavailableScope; |
| 1481 #endif |
| 1482 |
| 1483 PositionsRecorder positions_recorder_; |
| 1484 friend class PositionsRecorder; |
| 1485 friend class EnsureSpace; |
| 1486 }; |
| 1487 |
| 1488 |
| 1489 class EnsureSpace BASE_EMBEDDED { |
| 1490 public: |
| 1491 explicit EnsureSpace(Assembler* assembler) { |
| 1492 assembler->CheckBuffer(); |
| 1493 } |
| 1494 }; |
| 1495 |
| 1496 } } // namespace v8::internal |
| 1497 |
| 1498 #endif // V8_PPC_ASSEMBLER_PPC_H_ |
OLD | NEW |