| OLD | NEW |
| 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. | 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. |
| 2 // All Rights Reserved. | 2 // All Rights Reserved. |
| 3 // | 3 // |
| 4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
| 5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
| 6 // met: | 6 // met: |
| 7 // | 7 // |
| 8 // - Redistributions of source code must retain the above copyright notice, | 8 // - Redistributions of source code must retain the above copyright notice, |
| 9 // this list of conditions and the following disclaimer. | 9 // this list of conditions and the following disclaimer. |
| 10 // | 10 // |
| (...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 83 #elif defined(V8_TARGET_BIG_ENDIAN) | 83 #elif defined(V8_TARGET_BIG_ENDIAN) |
| 84 static const int kMantissaOffset = 4; | 84 static const int kMantissaOffset = 4; |
| 85 static const int kExponentOffset = 0; | 85 static const int kExponentOffset = 0; |
| 86 #else | 86 #else |
| 87 #error Unknown endianness | 87 #error Unknown endianness |
| 88 #endif | 88 #endif |
| 89 | 89 |
| 90 inline static int NumAllocatableRegisters(); | 90 inline static int NumAllocatableRegisters(); |
| 91 | 91 |
| 92 static int ToAllocationIndex(Register reg) { | 92 static int ToAllocationIndex(Register reg) { |
| 93 ASSERT((reg.code() - 2) < (kMaxNumAllocatableRegisters - 1) || | 93 DCHECK((reg.code() - 2) < (kMaxNumAllocatableRegisters - 1) || |
| 94 reg.is(from_code(kCpRegister))); | 94 reg.is(from_code(kCpRegister))); |
| 95 return reg.is(from_code(kCpRegister)) ? | 95 return reg.is(from_code(kCpRegister)) ? |
| 96 kMaxNumAllocatableRegisters - 1 : // Return last index for 'cp'. | 96 kMaxNumAllocatableRegisters - 1 : // Return last index for 'cp'. |
| 97 reg.code() - 2; // zero_reg and 'at' are skipped. | 97 reg.code() - 2; // zero_reg and 'at' are skipped. |
| 98 } | 98 } |
| 99 | 99 |
| 100 static Register FromAllocationIndex(int index) { | 100 static Register FromAllocationIndex(int index) { |
| 101 ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters); | 101 DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters); |
| 102 return index == kMaxNumAllocatableRegisters - 1 ? | 102 return index == kMaxNumAllocatableRegisters - 1 ? |
| 103 from_code(kCpRegister) : // Last index is always the 'cp' register. | 103 from_code(kCpRegister) : // Last index is always the 'cp' register. |
| 104 from_code(index + 2); // zero_reg and 'at' are skipped. | 104 from_code(index + 2); // zero_reg and 'at' are skipped. |
| 105 } | 105 } |
| 106 | 106 |
| 107 static const char* AllocationIndexToString(int index) { | 107 static const char* AllocationIndexToString(int index) { |
| 108 ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters); | 108 DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters); |
| 109 const char* const names[] = { | 109 const char* const names[] = { |
| 110 "v0", | 110 "v0", |
| 111 "v1", | 111 "v1", |
| 112 "a0", | 112 "a0", |
| 113 "a1", | 113 "a1", |
| 114 "a2", | 114 "a2", |
| 115 "a3", | 115 "a3", |
| 116 "t0", | 116 "t0", |
| 117 "t1", | 117 "t1", |
| 118 "t2", | 118 "t2", |
| 119 "t3", | 119 "t3", |
| 120 "t4", | 120 "t4", |
| 121 "t5", | 121 "t5", |
| 122 "t6", | 122 "t6", |
| 123 "s7", | 123 "s7", |
| 124 }; | 124 }; |
| 125 return names[index]; | 125 return names[index]; |
| 126 } | 126 } |
| 127 | 127 |
| 128 static Register from_code(int code) { | 128 static Register from_code(int code) { |
| 129 Register r = { code }; | 129 Register r = { code }; |
| 130 return r; | 130 return r; |
| 131 } | 131 } |
| 132 | 132 |
| 133 bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; } | 133 bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; } |
| 134 bool is(Register reg) const { return code_ == reg.code_; } | 134 bool is(Register reg) const { return code_ == reg.code_; } |
| 135 int code() const { | 135 int code() const { |
| 136 ASSERT(is_valid()); | 136 DCHECK(is_valid()); |
| 137 return code_; | 137 return code_; |
| 138 } | 138 } |
| 139 int bit() const { | 139 int bit() const { |
| 140 ASSERT(is_valid()); | 140 DCHECK(is_valid()); |
| 141 return 1 << code_; | 141 return 1 << code_; |
| 142 } | 142 } |
| 143 | 143 |
| 144 // Unfortunately we can't make this private in a struct. | 144 // Unfortunately we can't make this private in a struct. |
| 145 int code_; | 145 int code_; |
| 146 }; | 146 }; |
| 147 | 147 |
| 148 #define REGISTER(N, C) \ | 148 #define REGISTER(N, C) \ |
| 149 const int kRegister_ ## N ## _Code = C; \ | 149 const int kRegister_ ## N ## _Code = C; \ |
| 150 const Register N = { C } | 150 const Register N = { C } |
| (...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 219 static const int kNumReservedRegisters = 2; | 219 static const int kNumReservedRegisters = 2; |
| 220 static const int kMaxNumAllocatableRegisters = kMaxNumRegisters / 2 - | 220 static const int kMaxNumAllocatableRegisters = kMaxNumRegisters / 2 - |
| 221 kNumReservedRegisters; | 221 kNumReservedRegisters; |
| 222 | 222 |
| 223 inline static int NumRegisters(); | 223 inline static int NumRegisters(); |
| 224 inline static int NumAllocatableRegisters(); | 224 inline static int NumAllocatableRegisters(); |
| 225 inline static int ToAllocationIndex(FPURegister reg); | 225 inline static int ToAllocationIndex(FPURegister reg); |
| 226 static const char* AllocationIndexToString(int index); | 226 static const char* AllocationIndexToString(int index); |
| 227 | 227 |
| 228 static FPURegister FromAllocationIndex(int index) { | 228 static FPURegister FromAllocationIndex(int index) { |
| 229 ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters); | 229 DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters); |
| 230 return from_code(index * 2); | 230 return from_code(index * 2); |
| 231 } | 231 } |
| 232 | 232 |
| 233 static FPURegister from_code(int code) { | 233 static FPURegister from_code(int code) { |
| 234 FPURegister r = { code }; | 234 FPURegister r = { code }; |
| 235 return r; | 235 return r; |
| 236 } | 236 } |
| 237 | 237 |
| 238 bool is_valid() const { return 0 <= code_ && code_ < kMaxNumRegisters ; } | 238 bool is_valid() const { return 0 <= code_ && code_ < kMaxNumRegisters ; } |
| 239 bool is(FPURegister creg) const { return code_ == creg.code_; } | 239 bool is(FPURegister creg) const { return code_ == creg.code_; } |
| 240 FPURegister low() const { | 240 FPURegister low() const { |
| 241 // Find low reg of a Double-reg pair, which is the reg itself. | 241 // Find low reg of a Double-reg pair, which is the reg itself. |
| 242 ASSERT(code_ % 2 == 0); // Specified Double reg must be even. | 242 DCHECK(code_ % 2 == 0); // Specified Double reg must be even. |
| 243 FPURegister reg; | 243 FPURegister reg; |
| 244 reg.code_ = code_; | 244 reg.code_ = code_; |
| 245 ASSERT(reg.is_valid()); | 245 DCHECK(reg.is_valid()); |
| 246 return reg; | 246 return reg; |
| 247 } | 247 } |
| 248 FPURegister high() const { | 248 FPURegister high() const { |
| 249 // Find high reg of a Doubel-reg pair, which is reg + 1. | 249 // Find high reg of a Doubel-reg pair, which is reg + 1. |
| 250 ASSERT(code_ % 2 == 0); // Specified Double reg must be even. | 250 DCHECK(code_ % 2 == 0); // Specified Double reg must be even. |
| 251 FPURegister reg; | 251 FPURegister reg; |
| 252 reg.code_ = code_ + 1; | 252 reg.code_ = code_ + 1; |
| 253 ASSERT(reg.is_valid()); | 253 DCHECK(reg.is_valid()); |
| 254 return reg; | 254 return reg; |
| 255 } | 255 } |
| 256 | 256 |
| 257 int code() const { | 257 int code() const { |
| 258 ASSERT(is_valid()); | 258 DCHECK(is_valid()); |
| 259 return code_; | 259 return code_; |
| 260 } | 260 } |
| 261 int bit() const { | 261 int bit() const { |
| 262 ASSERT(is_valid()); | 262 DCHECK(is_valid()); |
| 263 return 1 << code_; | 263 return 1 << code_; |
| 264 } | 264 } |
| 265 void setcode(int f) { | 265 void setcode(int f) { |
| 266 code_ = f; | 266 code_ = f; |
| 267 ASSERT(is_valid()); | 267 DCHECK(is_valid()); |
| 268 } | 268 } |
| 269 // Unfortunately we can't make this private in a struct. | 269 // Unfortunately we can't make this private in a struct. |
| 270 int code_; | 270 int code_; |
| 271 }; | 271 }; |
| 272 | 272 |
| 273 // V8 now supports the O32 ABI, and the FPU Registers are organized as 32 | 273 // V8 now supports the O32 ABI, and the FPU Registers are organized as 32 |
| 274 // 32-bit registers, f0 through f31. When used as 'double' they are used | 274 // 32-bit registers, f0 through f31. When used as 'double' they are used |
| 275 // in pairs, starting with the even numbered register. So a double operation | 275 // in pairs, starting with the even numbered register. So a double operation |
| 276 // on f0 really uses f0 and f1. | 276 // on f0 really uses f0 and f1. |
| 277 // (Modern mips hardware also supports 32 64-bit registers, via setting | 277 // (Modern mips hardware also supports 32 64-bit registers, via setting |
| (...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 328 #define kLithiumScratchReg2 s4 | 328 #define kLithiumScratchReg2 s4 |
| 329 #define kLithiumScratchDouble f30 | 329 #define kLithiumScratchDouble f30 |
| 330 #define kDoubleRegZero f28 | 330 #define kDoubleRegZero f28 |
| 331 | 331 |
| 332 // FPU (coprocessor 1) control registers. | 332 // FPU (coprocessor 1) control registers. |
| 333 // Currently only FCSR (#31) is implemented. | 333 // Currently only FCSR (#31) is implemented. |
| 334 struct FPUControlRegister { | 334 struct FPUControlRegister { |
| 335 bool is_valid() const { return code_ == kFCSRRegister; } | 335 bool is_valid() const { return code_ == kFCSRRegister; } |
| 336 bool is(FPUControlRegister creg) const { return code_ == creg.code_; } | 336 bool is(FPUControlRegister creg) const { return code_ == creg.code_; } |
| 337 int code() const { | 337 int code() const { |
| 338 ASSERT(is_valid()); | 338 DCHECK(is_valid()); |
| 339 return code_; | 339 return code_; |
| 340 } | 340 } |
| 341 int bit() const { | 341 int bit() const { |
| 342 ASSERT(is_valid()); | 342 DCHECK(is_valid()); |
| 343 return 1 << code_; | 343 return 1 << code_; |
| 344 } | 344 } |
| 345 void setcode(int f) { | 345 void setcode(int f) { |
| 346 code_ = f; | 346 code_ = f; |
| 347 ASSERT(is_valid()); | 347 DCHECK(is_valid()); |
| 348 } | 348 } |
| 349 // Unfortunately we can't make this private in a struct. | 349 // Unfortunately we can't make this private in a struct. |
| 350 int code_; | 350 int code_; |
| 351 }; | 351 }; |
| 352 | 352 |
| 353 const FPUControlRegister no_fpucreg = { kInvalidFPUControlRegister }; | 353 const FPUControlRegister no_fpucreg = { kInvalidFPUControlRegister }; |
| 354 const FPUControlRegister FCSR = { kFCSRRegister }; | 354 const FPUControlRegister FCSR = { kFCSRRegister }; |
| 355 | 355 |
| 356 | 356 |
| 357 // ----------------------------------------------------------------------------- | 357 // ----------------------------------------------------------------------------- |
| (...skipping 12 matching lines...) Expand all Loading... |
| 370 explicit Operand(Handle<Object> handle); | 370 explicit Operand(Handle<Object> handle); |
| 371 INLINE(explicit Operand(Smi* value)); | 371 INLINE(explicit Operand(Smi* value)); |
| 372 | 372 |
| 373 // Register. | 373 // Register. |
| 374 INLINE(explicit Operand(Register rm)); | 374 INLINE(explicit Operand(Register rm)); |
| 375 | 375 |
| 376 // Return true if this is a register operand. | 376 // Return true if this is a register operand. |
| 377 INLINE(bool is_reg() const); | 377 INLINE(bool is_reg() const); |
| 378 | 378 |
| 379 inline int32_t immediate() const { | 379 inline int32_t immediate() const { |
| 380 ASSERT(!is_reg()); | 380 DCHECK(!is_reg()); |
| 381 return imm32_; | 381 return imm32_; |
| 382 } | 382 } |
| 383 | 383 |
| 384 Register rm() const { return rm_; } | 384 Register rm() const { return rm_; } |
| 385 | 385 |
| 386 private: | 386 private: |
| 387 Register rm_; | 387 Register rm_; |
| 388 int32_t imm32_; // Valid if rm_ == no_reg. | 388 int32_t imm32_; // Valid if rm_ == no_reg. |
| 389 RelocInfo::Mode rmode_; | 389 RelocInfo::Mode rmode_; |
| 390 | 390 |
| (...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 460 // Determines if Label is bound and near enough so that branch instruction | 460 // Determines if Label is bound and near enough so that branch instruction |
| 461 // can be used to reach it, instead of jump instruction. | 461 // can be used to reach it, instead of jump instruction. |
| 462 bool is_near(Label* L); | 462 bool is_near(Label* L); |
| 463 | 463 |
| 464 // Returns the branch offset to the given label from the current code | 464 // Returns the branch offset to the given label from the current code |
| 465 // position. Links the label to the current position if it is still unbound. | 465 // position. Links the label to the current position if it is still unbound. |
| 466 // Manages the jump elimination optimization if the second parameter is true. | 466 // Manages the jump elimination optimization if the second parameter is true. |
| 467 int32_t branch_offset(Label* L, bool jump_elimination_allowed); | 467 int32_t branch_offset(Label* L, bool jump_elimination_allowed); |
| 468 int32_t shifted_branch_offset(Label* L, bool jump_elimination_allowed) { | 468 int32_t shifted_branch_offset(Label* L, bool jump_elimination_allowed) { |
| 469 int32_t o = branch_offset(L, jump_elimination_allowed); | 469 int32_t o = branch_offset(L, jump_elimination_allowed); |
| 470 ASSERT((o & 3) == 0); // Assert the offset is aligned. | 470 DCHECK((o & 3) == 0); // Assert the offset is aligned. |
| 471 return o >> 2; | 471 return o >> 2; |
| 472 } | 472 } |
| 473 uint32_t jump_address(Label* L); | 473 uint32_t jump_address(Label* L); |
| 474 | 474 |
| 475 // Puts a labels target address at the given position. | 475 // Puts a labels target address at the given position. |
| 476 // The high 8 bits are set to zero. | 476 // The high 8 bits are set to zero. |
| 477 void label_at_put(Label* L, int at_offset); | 477 void label_at_put(Label* L, int at_offset); |
| 478 | 478 |
| 479 // Read/Modify the code target address in the branch/call instruction at pc. | 479 // Read/Modify the code target address in the branch/call instruction at pc. |
| 480 static Address target_address_at(Address pc); | 480 static Address target_address_at(Address pc); |
| (...skipping 118 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 599 FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED, | 599 FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED, |
| 600 // Code aging | 600 // Code aging |
| 601 CODE_AGE_MARKER_NOP = 6, | 601 CODE_AGE_MARKER_NOP = 6, |
| 602 CODE_AGE_SEQUENCE_NOP | 602 CODE_AGE_SEQUENCE_NOP |
| 603 }; | 603 }; |
| 604 | 604 |
| 605 // Type == 0 is the default non-marking nop. For mips this is a | 605 // Type == 0 is the default non-marking nop. For mips this is a |
| 606 // sll(zero_reg, zero_reg, 0). We use rt_reg == at for non-zero | 606 // sll(zero_reg, zero_reg, 0). We use rt_reg == at for non-zero |
| 607 // marking, to avoid conflict with ssnop and ehb instructions. | 607 // marking, to avoid conflict with ssnop and ehb instructions. |
| 608 void nop(unsigned int type = 0) { | 608 void nop(unsigned int type = 0) { |
| 609 ASSERT(type < 32); | 609 DCHECK(type < 32); |
| 610 Register nop_rt_reg = (type == 0) ? zero_reg : at; | 610 Register nop_rt_reg = (type == 0) ? zero_reg : at; |
| 611 sll(zero_reg, nop_rt_reg, type, true); | 611 sll(zero_reg, nop_rt_reg, type, true); |
| 612 } | 612 } |
| 613 | 613 |
| 614 | 614 |
| 615 // --------Branch-and-jump-instructions---------- | 615 // --------Branch-and-jump-instructions---------- |
| 616 // We don't use likely variant of instructions. | 616 // We don't use likely variant of instructions. |
| 617 void b(int16_t offset); | 617 void b(int16_t offset); |
| 618 void b(Label* L) { b(branch_offset(L, false)>>2); } | 618 void b(Label* L) { b(branch_offset(L, false)>>2); } |
| 619 void bal(int16_t offset); | 619 void bal(int16_t offset); |
| (...skipping 234 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 854 | 854 |
| 855 // Mark address of the ExitJSFrame code. | 855 // Mark address of the ExitJSFrame code. |
| 856 void RecordJSReturn(); | 856 void RecordJSReturn(); |
| 857 | 857 |
| 858 // Mark address of a debug break slot. | 858 // Mark address of a debug break slot. |
| 859 void RecordDebugBreakSlot(); | 859 void RecordDebugBreakSlot(); |
| 860 | 860 |
| 861 // Record the AST id of the CallIC being compiled, so that it can be placed | 861 // Record the AST id of the CallIC being compiled, so that it can be placed |
| 862 // in the relocation information. | 862 // in the relocation information. |
| 863 void SetRecordedAstId(TypeFeedbackId ast_id) { | 863 void SetRecordedAstId(TypeFeedbackId ast_id) { |
| 864 ASSERT(recorded_ast_id_.IsNone()); | 864 DCHECK(recorded_ast_id_.IsNone()); |
| 865 recorded_ast_id_ = ast_id; | 865 recorded_ast_id_ = ast_id; |
| 866 } | 866 } |
| 867 | 867 |
| 868 TypeFeedbackId RecordedAstId() { | 868 TypeFeedbackId RecordedAstId() { |
| 869 ASSERT(!recorded_ast_id_.IsNone()); | 869 DCHECK(!recorded_ast_id_.IsNone()); |
| 870 return recorded_ast_id_; | 870 return recorded_ast_id_; |
| 871 } | 871 } |
| 872 | 872 |
| 873 void ClearRecordedAstId() { recorded_ast_id_ = TypeFeedbackId::None(); } | 873 void ClearRecordedAstId() { recorded_ast_id_ = TypeFeedbackId::None(); } |
| 874 | 874 |
| 875 // Record a comment relocation entry that can be used by a disassembler. | 875 // Record a comment relocation entry that can be used by a disassembler. |
| 876 // Use --code-comments to enable. | 876 // Use --code-comments to enable. |
| 877 void RecordComment(const char* msg); | 877 void RecordComment(const char* msg); |
| 878 | 878 |
| 879 static int RelocateInternalReference(byte* pc, intptr_t pc_delta); | 879 static int RelocateInternalReference(byte* pc, intptr_t pc_delta); |
| (...skipping 134 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1014 } | 1014 } |
| 1015 | 1015 |
| 1016 void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi); | 1016 void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi); |
| 1017 | 1017 |
| 1018 bool is_trampoline_emitted() const { | 1018 bool is_trampoline_emitted() const { |
| 1019 return trampoline_emitted_; | 1019 return trampoline_emitted_; |
| 1020 } | 1020 } |
| 1021 | 1021 |
| 1022 // Temporarily block automatic assembly buffer growth. | 1022 // Temporarily block automatic assembly buffer growth. |
| 1023 void StartBlockGrowBuffer() { | 1023 void StartBlockGrowBuffer() { |
| 1024 ASSERT(!block_buffer_growth_); | 1024 DCHECK(!block_buffer_growth_); |
| 1025 block_buffer_growth_ = true; | 1025 block_buffer_growth_ = true; |
| 1026 } | 1026 } |
| 1027 | 1027 |
| 1028 void EndBlockGrowBuffer() { | 1028 void EndBlockGrowBuffer() { |
| 1029 ASSERT(block_buffer_growth_); | 1029 DCHECK(block_buffer_growth_); |
| 1030 block_buffer_growth_ = false; | 1030 block_buffer_growth_ = false; |
| 1031 } | 1031 } |
| 1032 | 1032 |
| 1033 bool is_buffer_growth_blocked() const { | 1033 bool is_buffer_growth_blocked() const { |
| 1034 return block_buffer_growth_; | 1034 return block_buffer_growth_; |
| 1035 } | 1035 } |
| 1036 | 1036 |
| 1037 private: | 1037 private: |
| 1038 // Buffer size and constant pool distance are checked together at regular | 1038 // Buffer size and constant pool distance are checked together at regular |
| 1039 // intervals of kBufferCheckInterval emitted bytes. | 1039 // intervals of kBufferCheckInterval emitted bytes. |
| (...skipping 141 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1181 } | 1181 } |
| 1182 int end() { | 1182 int end() { |
| 1183 return end_; | 1183 return end_; |
| 1184 } | 1184 } |
| 1185 int take_slot() { | 1185 int take_slot() { |
| 1186 int trampoline_slot = kInvalidSlotPos; | 1186 int trampoline_slot = kInvalidSlotPos; |
| 1187 if (free_slot_count_ <= 0) { | 1187 if (free_slot_count_ <= 0) { |
| 1188 // We have run out of space on trampolines. | 1188 // We have run out of space on trampolines. |
| 1189 // Make sure we fail in debug mode, so we become aware of each case | 1189 // Make sure we fail in debug mode, so we become aware of each case |
| 1190 // when this happens. | 1190 // when this happens. |
| 1191 ASSERT(0); | 1191 DCHECK(0); |
| 1192 // Internal exception will be caught. | 1192 // Internal exception will be caught. |
| 1193 } else { | 1193 } else { |
| 1194 trampoline_slot = next_slot_; | 1194 trampoline_slot = next_slot_; |
| 1195 free_slot_count_--; | 1195 free_slot_count_--; |
| 1196 next_slot_ += kTrampolineSlotsSize; | 1196 next_slot_ += kTrampolineSlotsSize; |
| 1197 } | 1197 } |
| 1198 return trampoline_slot; | 1198 return trampoline_slot; |
| 1199 } | 1199 } |
| 1200 | 1200 |
| 1201 private: | 1201 private: |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1234 class EnsureSpace BASE_EMBEDDED { | 1234 class EnsureSpace BASE_EMBEDDED { |
| 1235 public: | 1235 public: |
| 1236 explicit EnsureSpace(Assembler* assembler) { | 1236 explicit EnsureSpace(Assembler* assembler) { |
| 1237 assembler->CheckBuffer(); | 1237 assembler->CheckBuffer(); |
| 1238 } | 1238 } |
| 1239 }; | 1239 }; |
| 1240 | 1240 |
| 1241 } } // namespace v8::internal | 1241 } } // namespace v8::internal |
| 1242 | 1242 |
| 1243 #endif // V8_ARM_ASSEMBLER_MIPS_H_ | 1243 #endif // V8_ARM_ASSEMBLER_MIPS_H_ |
| OLD | NEW |