OLD | NEW |
1 // Copyright (c) 1994-2006 Sun Microsystems Inc. | 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. |
2 // All Rights Reserved. | 2 // All Rights Reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // - Redistributions of source code must retain the above copyright notice, | 8 // - Redistributions of source code must retain the above copyright notice, |
9 // this list of conditions and the following disclaimer. | 9 // this list of conditions and the following disclaimer. |
10 // | 10 // |
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
72 // Core register. | 72 // Core register. |
73 struct Register { | 73 struct Register { |
74 static const int kNumRegisters = v8::internal::kNumRegisters; | 74 static const int kNumRegisters = v8::internal::kNumRegisters; |
75 static const int kMaxNumAllocatableRegisters = 14; // v0 through t6 and cp. | 75 static const int kMaxNumAllocatableRegisters = 14; // v0 through t6 and cp. |
76 static const int kSizeInBytes = 8; | 76 static const int kSizeInBytes = 8; |
77 static const int kCpRegister = 23; // cp (s7) is the 23rd register. | 77 static const int kCpRegister = 23; // cp (s7) is the 23rd register. |
78 | 78 |
79 inline static int NumAllocatableRegisters(); | 79 inline static int NumAllocatableRegisters(); |
80 | 80 |
81 static int ToAllocationIndex(Register reg) { | 81 static int ToAllocationIndex(Register reg) { |
82 ASSERT((reg.code() - 2) < (kMaxNumAllocatableRegisters - 1) || | 82 DCHECK((reg.code() - 2) < (kMaxNumAllocatableRegisters - 1) || |
83 reg.is(from_code(kCpRegister))); | 83 reg.is(from_code(kCpRegister))); |
84 return reg.is(from_code(kCpRegister)) ? | 84 return reg.is(from_code(kCpRegister)) ? |
85 kMaxNumAllocatableRegisters - 1 : // Return last index for 'cp'. | 85 kMaxNumAllocatableRegisters - 1 : // Return last index for 'cp'. |
86 reg.code() - 2; // zero_reg and 'at' are skipped. | 86 reg.code() - 2; // zero_reg and 'at' are skipped. |
87 } | 87 } |
88 | 88 |
89 static Register FromAllocationIndex(int index) { | 89 static Register FromAllocationIndex(int index) { |
90 ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters); | 90 DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters); |
91 return index == kMaxNumAllocatableRegisters - 1 ? | 91 return index == kMaxNumAllocatableRegisters - 1 ? |
92 from_code(kCpRegister) : // Last index is always the 'cp' register. | 92 from_code(kCpRegister) : // Last index is always the 'cp' register. |
93 from_code(index + 2); // zero_reg and 'at' are skipped. | 93 from_code(index + 2); // zero_reg and 'at' are skipped. |
94 } | 94 } |
95 | 95 |
96 static const char* AllocationIndexToString(int index) { | 96 static const char* AllocationIndexToString(int index) { |
97 ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters); | 97 DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters); |
98 const char* const names[] = { | 98 const char* const names[] = { |
99 "v0", | 99 "v0", |
100 "v1", | 100 "v1", |
101 "a0", | 101 "a0", |
102 "a1", | 102 "a1", |
103 "a2", | 103 "a2", |
104 "a3", | 104 "a3", |
105 "a4", | 105 "a4", |
106 "a5", | 106 "a5", |
107 "a6", | 107 "a6", |
108 "a7", | 108 "a7", |
109 "t0", | 109 "t0", |
110 "t1", | 110 "t1", |
111 "t2", | 111 "t2", |
112 "s7", | 112 "s7", |
113 }; | 113 }; |
114 return names[index]; | 114 return names[index]; |
115 } | 115 } |
116 | 116 |
117 static Register from_code(int code) { | 117 static Register from_code(int code) { |
118 Register r = { code }; | 118 Register r = { code }; |
119 return r; | 119 return r; |
120 } | 120 } |
121 | 121 |
122 bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; } | 122 bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; } |
123 bool is(Register reg) const { return code_ == reg.code_; } | 123 bool is(Register reg) const { return code_ == reg.code_; } |
124 int code() const { | 124 int code() const { |
125 ASSERT(is_valid()); | 125 DCHECK(is_valid()); |
126 return code_; | 126 return code_; |
127 } | 127 } |
128 int bit() const { | 128 int bit() const { |
129 ASSERT(is_valid()); | 129 DCHECK(is_valid()); |
130 return 1 << code_; | 130 return 1 << code_; |
131 } | 131 } |
132 | 132 |
133 // Unfortunately we can't make this private in a struct. | 133 // Unfortunately we can't make this private in a struct. |
134 int code_; | 134 int code_; |
135 }; | 135 }; |
136 | 136 |
137 #define REGISTER(N, C) \ | 137 #define REGISTER(N, C) \ |
138 const int kRegister_ ## N ## _Code = C; \ | 138 const int kRegister_ ## N ## _Code = C; \ |
139 const Register N = { C } | 139 const Register N = { C } |
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
208 static const int kNumReservedRegisters = 2; | 208 static const int kNumReservedRegisters = 2; |
209 static const int kMaxNumAllocatableRegisters = kMaxNumRegisters / 2 - | 209 static const int kMaxNumAllocatableRegisters = kMaxNumRegisters / 2 - |
210 kNumReservedRegisters; | 210 kNumReservedRegisters; |
211 | 211 |
212 inline static int NumRegisters(); | 212 inline static int NumRegisters(); |
213 inline static int NumAllocatableRegisters(); | 213 inline static int NumAllocatableRegisters(); |
214 inline static int ToAllocationIndex(FPURegister reg); | 214 inline static int ToAllocationIndex(FPURegister reg); |
215 static const char* AllocationIndexToString(int index); | 215 static const char* AllocationIndexToString(int index); |
216 | 216 |
217 static FPURegister FromAllocationIndex(int index) { | 217 static FPURegister FromAllocationIndex(int index) { |
218 ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters); | 218 DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters); |
219 return from_code(index * 2); | 219 return from_code(index * 2); |
220 } | 220 } |
221 | 221 |
222 static FPURegister from_code(int code) { | 222 static FPURegister from_code(int code) { |
223 FPURegister r = { code }; | 223 FPURegister r = { code }; |
224 return r; | 224 return r; |
225 } | 225 } |
226 | 226 |
227 bool is_valid() const { return 0 <= code_ && code_ < kMaxNumRegisters ; } | 227 bool is_valid() const { return 0 <= code_ && code_ < kMaxNumRegisters ; } |
228 bool is(FPURegister creg) const { return code_ == creg.code_; } | 228 bool is(FPURegister creg) const { return code_ == creg.code_; } |
229 FPURegister low() const { | 229 FPURegister low() const { |
230 // TODO(plind): Create ASSERT for FR=0 mode. This usage suspect for FR=1. | 230 // TODO(plind): Create DCHECK for FR=0 mode. This usage suspect for FR=1. |
231 // Find low reg of a Double-reg pair, which is the reg itself. | 231 // Find low reg of a Double-reg pair, which is the reg itself. |
232 ASSERT(code_ % 2 == 0); // Specified Double reg must be even. | 232 DCHECK(code_ % 2 == 0); // Specified Double reg must be even. |
233 FPURegister reg; | 233 FPURegister reg; |
234 reg.code_ = code_; | 234 reg.code_ = code_; |
235 ASSERT(reg.is_valid()); | 235 DCHECK(reg.is_valid()); |
236 return reg; | 236 return reg; |
237 } | 237 } |
238 FPURegister high() const { | 238 FPURegister high() const { |
239 // TODO(plind): Create ASSERT for FR=0 mode. This usage illegal in FR=1. | 239 // TODO(plind): Create DCHECK for FR=0 mode. This usage illegal in FR=1. |
240 // Find high reg of a Doubel-reg pair, which is reg + 1. | 240 // Find high reg of a Doubel-reg pair, which is reg + 1. |
241 ASSERT(code_ % 2 == 0); // Specified Double reg must be even. | 241 DCHECK(code_ % 2 == 0); // Specified Double reg must be even. |
242 FPURegister reg; | 242 FPURegister reg; |
243 reg.code_ = code_ + 1; | 243 reg.code_ = code_ + 1; |
244 ASSERT(reg.is_valid()); | 244 DCHECK(reg.is_valid()); |
245 return reg; | 245 return reg; |
246 } | 246 } |
247 | 247 |
248 int code() const { | 248 int code() const { |
249 ASSERT(is_valid()); | 249 DCHECK(is_valid()); |
250 return code_; | 250 return code_; |
251 } | 251 } |
252 int bit() const { | 252 int bit() const { |
253 ASSERT(is_valid()); | 253 DCHECK(is_valid()); |
254 return 1 << code_; | 254 return 1 << code_; |
255 } | 255 } |
256 void setcode(int f) { | 256 void setcode(int f) { |
257 code_ = f; | 257 code_ = f; |
258 ASSERT(is_valid()); | 258 DCHECK(is_valid()); |
259 } | 259 } |
260 // Unfortunately we can't make this private in a struct. | 260 // Unfortunately we can't make this private in a struct. |
261 int code_; | 261 int code_; |
262 }; | 262 }; |
263 | 263 |
264 // V8 now supports the O32 ABI, and the FPU Registers are organized as 32 | 264 // V8 now supports the O32 ABI, and the FPU Registers are organized as 32 |
265 // 32-bit registers, f0 through f31. When used as 'double' they are used | 265 // 32-bit registers, f0 through f31. When used as 'double' they are used |
266 // in pairs, starting with the even numbered register. So a double operation | 266 // in pairs, starting with the even numbered register. So a double operation |
267 // on f0 really uses f0 and f1. | 267 // on f0 really uses f0 and f1. |
268 // (Modern mips hardware also supports 32 64-bit registers, via setting | 268 // (Modern mips hardware also supports 32 64-bit registers, via setting |
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
319 #define kLithiumScratchReg2 s4 | 319 #define kLithiumScratchReg2 s4 |
320 #define kLithiumScratchDouble f30 | 320 #define kLithiumScratchDouble f30 |
321 #define kDoubleRegZero f28 | 321 #define kDoubleRegZero f28 |
322 | 322 |
323 // FPU (coprocessor 1) control registers. | 323 // FPU (coprocessor 1) control registers. |
324 // Currently only FCSR (#31) is implemented. | 324 // Currently only FCSR (#31) is implemented. |
325 struct FPUControlRegister { | 325 struct FPUControlRegister { |
326 bool is_valid() const { return code_ == kFCSRRegister; } | 326 bool is_valid() const { return code_ == kFCSRRegister; } |
327 bool is(FPUControlRegister creg) const { return code_ == creg.code_; } | 327 bool is(FPUControlRegister creg) const { return code_ == creg.code_; } |
328 int code() const { | 328 int code() const { |
329 ASSERT(is_valid()); | 329 DCHECK(is_valid()); |
330 return code_; | 330 return code_; |
331 } | 331 } |
332 int bit() const { | 332 int bit() const { |
333 ASSERT(is_valid()); | 333 DCHECK(is_valid()); |
334 return 1 << code_; | 334 return 1 << code_; |
335 } | 335 } |
336 void setcode(int f) { | 336 void setcode(int f) { |
337 code_ = f; | 337 code_ = f; |
338 ASSERT(is_valid()); | 338 DCHECK(is_valid()); |
339 } | 339 } |
340 // Unfortunately we can't make this private in a struct. | 340 // Unfortunately we can't make this private in a struct. |
341 int code_; | 341 int code_; |
342 }; | 342 }; |
343 | 343 |
344 const FPUControlRegister no_fpucreg = { kInvalidFPUControlRegister }; | 344 const FPUControlRegister no_fpucreg = { kInvalidFPUControlRegister }; |
345 const FPUControlRegister FCSR = { kFCSRRegister }; | 345 const FPUControlRegister FCSR = { kFCSRRegister }; |
346 | 346 |
347 | 347 |
348 // ----------------------------------------------------------------------------- | 348 // ----------------------------------------------------------------------------- |
(...skipping 13 matching lines...) Expand all Loading... |
362 explicit Operand(Handle<Object> handle); | 362 explicit Operand(Handle<Object> handle); |
363 INLINE(explicit Operand(Smi* value)); | 363 INLINE(explicit Operand(Smi* value)); |
364 | 364 |
365 // Register. | 365 // Register. |
366 INLINE(explicit Operand(Register rm)); | 366 INLINE(explicit Operand(Register rm)); |
367 | 367 |
368 // Return true if this is a register operand. | 368 // Return true if this is a register operand. |
369 INLINE(bool is_reg() const); | 369 INLINE(bool is_reg() const); |
370 | 370 |
371 inline int64_t immediate() const { | 371 inline int64_t immediate() const { |
372 ASSERT(!is_reg()); | 372 DCHECK(!is_reg()); |
373 return imm64_; | 373 return imm64_; |
374 } | 374 } |
375 | 375 |
376 Register rm() const { return rm_; } | 376 Register rm() const { return rm_; } |
377 | 377 |
378 private: | 378 private: |
379 Register rm_; | 379 Register rm_; |
380 int64_t imm64_; // Valid if rm_ == no_reg. | 380 int64_t imm64_; // Valid if rm_ == no_reg. |
381 RelocInfo::Mode rmode_; | 381 RelocInfo::Mode rmode_; |
382 | 382 |
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
455 | 455 |
456 // Returns the branch offset to the given label from the current code | 456 // Returns the branch offset to the given label from the current code |
457 // position. Links the label to the current position if it is still unbound. | 457 // position. Links the label to the current position if it is still unbound. |
458 // Manages the jump elimination optimization if the second parameter is true. | 458 // Manages the jump elimination optimization if the second parameter is true. |
459 int32_t branch_offset(Label* L, bool jump_elimination_allowed); | 459 int32_t branch_offset(Label* L, bool jump_elimination_allowed); |
460 int32_t branch_offset_compact(Label* L, bool jump_elimination_allowed); | 460 int32_t branch_offset_compact(Label* L, bool jump_elimination_allowed); |
461 int32_t branch_offset21(Label* L, bool jump_elimination_allowed); | 461 int32_t branch_offset21(Label* L, bool jump_elimination_allowed); |
462 int32_t branch_offset21_compact(Label* L, bool jump_elimination_allowed); | 462 int32_t branch_offset21_compact(Label* L, bool jump_elimination_allowed); |
463 int32_t shifted_branch_offset(Label* L, bool jump_elimination_allowed) { | 463 int32_t shifted_branch_offset(Label* L, bool jump_elimination_allowed) { |
464 int32_t o = branch_offset(L, jump_elimination_allowed); | 464 int32_t o = branch_offset(L, jump_elimination_allowed); |
465 ASSERT((o & 3) == 0); // Assert the offset is aligned. | 465 DCHECK((o & 3) == 0); // Assert the offset is aligned. |
466 return o >> 2; | 466 return o >> 2; |
467 } | 467 } |
468 int32_t shifted_branch_offset_compact(Label* L, | 468 int32_t shifted_branch_offset_compact(Label* L, |
469 bool jump_elimination_allowed) { | 469 bool jump_elimination_allowed) { |
470 int32_t o = branch_offset_compact(L, jump_elimination_allowed); | 470 int32_t o = branch_offset_compact(L, jump_elimination_allowed); |
471 ASSERT((o & 3) == 0); // Assert the offset is aligned. | 471 DCHECK((o & 3) == 0); // Assert the offset is aligned. |
472 return o >> 2; | 472 return o >> 2; |
473 } | 473 } |
474 uint64_t jump_address(Label* L); | 474 uint64_t jump_address(Label* L); |
475 | 475 |
476 // Puts a labels target address at the given position. | 476 // Puts a labels target address at the given position. |
477 // The high 8 bits are set to zero. | 477 // The high 8 bits are set to zero. |
478 void label_at_put(Label* L, int at_offset); | 478 void label_at_put(Label* L, int at_offset); |
479 | 479 |
480 // Read/Modify the code target address in the branch/call instruction at pc. | 480 // Read/Modify the code target address in the branch/call instruction at pc. |
481 static Address target_address_at(Address pc); | 481 static Address target_address_at(Address pc); |
(...skipping 119 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
601 FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED, | 601 FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED, |
602 // Code aging | 602 // Code aging |
603 CODE_AGE_MARKER_NOP = 6, | 603 CODE_AGE_MARKER_NOP = 6, |
604 CODE_AGE_SEQUENCE_NOP | 604 CODE_AGE_SEQUENCE_NOP |
605 }; | 605 }; |
606 | 606 |
607 // Type == 0 is the default non-marking nop. For mips this is a | 607 // Type == 0 is the default non-marking nop. For mips this is a |
608 // sll(zero_reg, zero_reg, 0). We use rt_reg == at for non-zero | 608 // sll(zero_reg, zero_reg, 0). We use rt_reg == at for non-zero |
609 // marking, to avoid conflict with ssnop and ehb instructions. | 609 // marking, to avoid conflict with ssnop and ehb instructions. |
610 void nop(unsigned int type = 0) { | 610 void nop(unsigned int type = 0) { |
611 ASSERT(type < 32); | 611 DCHECK(type < 32); |
612 Register nop_rt_reg = (type == 0) ? zero_reg : at; | 612 Register nop_rt_reg = (type == 0) ? zero_reg : at; |
613 sll(zero_reg, nop_rt_reg, type, true); | 613 sll(zero_reg, nop_rt_reg, type, true); |
614 } | 614 } |
615 | 615 |
616 | 616 |
617 // --------Branch-and-jump-instructions---------- | 617 // --------Branch-and-jump-instructions---------- |
618 // We don't use likely variant of instructions. | 618 // We don't use likely variant of instructions. |
619 void b(int16_t offset); | 619 void b(int16_t offset); |
620 void b(Label* L) { b(branch_offset(L, false)>>2); } | 620 void b(Label* L) { b(branch_offset(L, false)>>2); } |
621 void bal(int16_t offset); | 621 void bal(int16_t offset); |
(...skipping 401 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1023 | 1023 |
1024 // Mark address of the ExitJSFrame code. | 1024 // Mark address of the ExitJSFrame code. |
1025 void RecordJSReturn(); | 1025 void RecordJSReturn(); |
1026 | 1026 |
1027 // Mark address of a debug break slot. | 1027 // Mark address of a debug break slot. |
1028 void RecordDebugBreakSlot(); | 1028 void RecordDebugBreakSlot(); |
1029 | 1029 |
1030 // Record the AST id of the CallIC being compiled, so that it can be placed | 1030 // Record the AST id of the CallIC being compiled, so that it can be placed |
1031 // in the relocation information. | 1031 // in the relocation information. |
1032 void SetRecordedAstId(TypeFeedbackId ast_id) { | 1032 void SetRecordedAstId(TypeFeedbackId ast_id) { |
1033 ASSERT(recorded_ast_id_.IsNone()); | 1033 DCHECK(recorded_ast_id_.IsNone()); |
1034 recorded_ast_id_ = ast_id; | 1034 recorded_ast_id_ = ast_id; |
1035 } | 1035 } |
1036 | 1036 |
1037 TypeFeedbackId RecordedAstId() { | 1037 TypeFeedbackId RecordedAstId() { |
1038 ASSERT(!recorded_ast_id_.IsNone()); | 1038 DCHECK(!recorded_ast_id_.IsNone()); |
1039 return recorded_ast_id_; | 1039 return recorded_ast_id_; |
1040 } | 1040 } |
1041 | 1041 |
1042 void ClearRecordedAstId() { recorded_ast_id_ = TypeFeedbackId::None(); } | 1042 void ClearRecordedAstId() { recorded_ast_id_ = TypeFeedbackId::None(); } |
1043 | 1043 |
1044 // Record a comment relocation entry that can be used by a disassembler. | 1044 // Record a comment relocation entry that can be used by a disassembler. |
1045 // Use --code-comments to enable. | 1045 // Use --code-comments to enable. |
1046 void RecordComment(const char* msg); | 1046 void RecordComment(const char* msg); |
1047 | 1047 |
1048 static int RelocateInternalReference(byte* pc, intptr_t pc_delta); | 1048 static int RelocateInternalReference(byte* pc, intptr_t pc_delta); |
(...skipping 134 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1183 } | 1183 } |
1184 | 1184 |
1185 void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi); | 1185 void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi); |
1186 | 1186 |
1187 bool is_trampoline_emitted() const { | 1187 bool is_trampoline_emitted() const { |
1188 return trampoline_emitted_; | 1188 return trampoline_emitted_; |
1189 } | 1189 } |
1190 | 1190 |
1191 // Temporarily block automatic assembly buffer growth. | 1191 // Temporarily block automatic assembly buffer growth. |
1192 void StartBlockGrowBuffer() { | 1192 void StartBlockGrowBuffer() { |
1193 ASSERT(!block_buffer_growth_); | 1193 DCHECK(!block_buffer_growth_); |
1194 block_buffer_growth_ = true; | 1194 block_buffer_growth_ = true; |
1195 } | 1195 } |
1196 | 1196 |
1197 void EndBlockGrowBuffer() { | 1197 void EndBlockGrowBuffer() { |
1198 ASSERT(block_buffer_growth_); | 1198 DCHECK(block_buffer_growth_); |
1199 block_buffer_growth_ = false; | 1199 block_buffer_growth_ = false; |
1200 } | 1200 } |
1201 | 1201 |
1202 bool is_buffer_growth_blocked() const { | 1202 bool is_buffer_growth_blocked() const { |
1203 return block_buffer_growth_; | 1203 return block_buffer_growth_; |
1204 } | 1204 } |
1205 | 1205 |
1206 private: | 1206 private: |
1207 // Buffer size and constant pool distance are checked together at regular | 1207 // Buffer size and constant pool distance are checked together at regular |
1208 // intervals of kBufferCheckInterval emitted bytes. | 1208 // intervals of kBufferCheckInterval emitted bytes. |
(...skipping 142 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1351 } | 1351 } |
1352 int end() { | 1352 int end() { |
1353 return end_; | 1353 return end_; |
1354 } | 1354 } |
1355 int take_slot() { | 1355 int take_slot() { |
1356 int trampoline_slot = kInvalidSlotPos; | 1356 int trampoline_slot = kInvalidSlotPos; |
1357 if (free_slot_count_ <= 0) { | 1357 if (free_slot_count_ <= 0) { |
1358 // We have run out of space on trampolines. | 1358 // We have run out of space on trampolines. |
1359 // Make sure we fail in debug mode, so we become aware of each case | 1359 // Make sure we fail in debug mode, so we become aware of each case |
1360 // when this happens. | 1360 // when this happens. |
1361 ASSERT(0); | 1361 DCHECK(0); |
1362 // Internal exception will be caught. | 1362 // Internal exception will be caught. |
1363 } else { | 1363 } else { |
1364 trampoline_slot = next_slot_; | 1364 trampoline_slot = next_slot_; |
1365 free_slot_count_--; | 1365 free_slot_count_--; |
1366 next_slot_ += kTrampolineSlotsSize; | 1366 next_slot_ += kTrampolineSlotsSize; |
1367 } | 1367 } |
1368 return trampoline_slot; | 1368 return trampoline_slot; |
1369 } | 1369 } |
1370 | 1370 |
1371 private: | 1371 private: |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1404 class EnsureSpace BASE_EMBEDDED { | 1404 class EnsureSpace BASE_EMBEDDED { |
1405 public: | 1405 public: |
1406 explicit EnsureSpace(Assembler* assembler) { | 1406 explicit EnsureSpace(Assembler* assembler) { |
1407 assembler->CheckBuffer(); | 1407 assembler->CheckBuffer(); |
1408 } | 1408 } |
1409 }; | 1409 }; |
1410 | 1410 |
1411 } } // namespace v8::internal | 1411 } } // namespace v8::internal |
1412 | 1412 |
1413 #endif // V8_ARM_ASSEMBLER_MIPS_H_ | 1413 #endif // V8_ARM_ASSEMBLER_MIPS_H_ |
OLD | NEW |