Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1130)

Side by Side Diff: src/ppc/assembler-ppc.cc

Issue 1155703006: Revert of Embedded constant pools. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 5 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/ppc/assembler-ppc.h ('k') | src/ppc/assembler-ppc-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 1994-2006 Sun Microsystems Inc. 1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved. 2 // All Rights Reserved.
3 // 3 //
4 // Redistribution and use in source and binary forms, with or without 4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions 5 // modification, are permitted provided that the following conditions
6 // are met: 6 // are met:
7 // 7 //
8 // - Redistributions of source code must retain the above copyright notice, 8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer. 9 // this list of conditions and the following disclaimer.
10 // 10 //
(...skipping 130 matching lines...) Expand 10 before | Expand all | Expand 10 after
141 // ----------------------------------------------------------------------------- 141 // -----------------------------------------------------------------------------
142 // Implementation of RelocInfo 142 // Implementation of RelocInfo
143 143
144 const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE | 144 const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE |
145 1 << RelocInfo::INTERNAL_REFERENCE_ENCODED; 145 1 << RelocInfo::INTERNAL_REFERENCE_ENCODED;
146 146
147 147
148 bool RelocInfo::IsCodedSpecially() { 148 bool RelocInfo::IsCodedSpecially() {
149 // The deserializer needs to know whether a pointer is specially 149 // The deserializer needs to know whether a pointer is specially
150 // coded. Being specially coded on PPC means that it is a lis/ori 150 // coded. Being specially coded on PPC means that it is a lis/ori
151 // instruction sequence or is a constant pool entry, and these are 151 // instruction sequence, and these are always the case inside code
152 // always the case inside code objects. 152 // objects.
153 return true; 153 return true;
154 } 154 }
155 155
156 156
157 bool RelocInfo::IsInConstantPool() { 157 bool RelocInfo::IsInConstantPool() {
158 if (FLAG_enable_embedded_constant_pool) {
159 Address constant_pool = host_->constant_pool();
160 return (constant_pool && Assembler::IsConstantPoolLoadStart(pc_));
161 }
162 return false; 158 return false;
163 } 159 }
164 160
165 161
166 // ----------------------------------------------------------------------------- 162 // -----------------------------------------------------------------------------
167 // Implementation of Operand and MemOperand 163 // Implementation of Operand and MemOperand
168 // See assembler-ppc-inl.h for inlined constructors 164 // See assembler-ppc-inl.h for inlined constructors
169 165
170 Operand::Operand(Handle<Object> handle) { 166 Operand::Operand(Handle<Object> handle) {
171 AllowDeferredHandleDereference using_raw_address; 167 AllowDeferredHandleDereference using_raw_address;
(...skipping 26 matching lines...) Expand all
198 } 194 }
199 195
200 196
201 // ----------------------------------------------------------------------------- 197 // -----------------------------------------------------------------------------
202 // Specific instructions, constants, and masks. 198 // Specific instructions, constants, and masks.
203 199
204 200
205 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size) 201 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
206 : AssemblerBase(isolate, buffer, buffer_size), 202 : AssemblerBase(isolate, buffer, buffer_size),
207 recorded_ast_id_(TypeFeedbackId::None()), 203 recorded_ast_id_(TypeFeedbackId::None()),
208 constant_pool_builder_(kLoadPtrMaxReachBits, kLoadDoubleMaxReachBits),
209 positions_recorder_(this) { 204 positions_recorder_(this) {
210 reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_); 205 reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
211 206
212 no_trampoline_pool_before_ = 0; 207 no_trampoline_pool_before_ = 0;
213 trampoline_pool_blocked_nesting_ = 0; 208 trampoline_pool_blocked_nesting_ = 0;
214 constant_pool_entry_sharing_blocked_nesting_ = 0;
215 // We leave space (kMaxBlockTrampolineSectionSize) 209 // We leave space (kMaxBlockTrampolineSectionSize)
216 // for BlockTrampolinePoolScope buffer. 210 // for BlockTrampolinePoolScope buffer.
217 next_buffer_check_ = 211 next_buffer_check_ =
218 FLAG_force_long_branches ? kMaxInt : kMaxCondBranchReach - 212 FLAG_force_long_branches ? kMaxInt : kMaxCondBranchReach -
219 kMaxBlockTrampolineSectionSize; 213 kMaxBlockTrampolineSectionSize;
220 internal_trampoline_exception_ = false; 214 internal_trampoline_exception_ = false;
221 last_bound_pos_ = 0; 215 last_bound_pos_ = 0;
222 trampoline_emitted_ = FLAG_force_long_branches; 216 trampoline_emitted_ = FLAG_force_long_branches;
223 unbound_labels_count_ = 0; 217 unbound_labels_count_ = 0;
224 ClearRecordedAstId(); 218 ClearRecordedAstId();
225 relocations_.reserve(128); 219 relocations_.reserve(128);
226 } 220 }
227 221
228 222
229 void Assembler::GetCode(CodeDesc* desc) { 223 void Assembler::GetCode(CodeDesc* desc) {
230 // Emit constant pool if necessary.
231 int constant_pool_offset = EmitConstantPool();
232
233 EmitRelocations(); 224 EmitRelocations();
234 225
235 // Set up code descriptor. 226 // Set up code descriptor.
236 desc->buffer = buffer_; 227 desc->buffer = buffer_;
237 desc->buffer_size = buffer_size_; 228 desc->buffer_size = buffer_size_;
238 desc->instr_size = pc_offset(); 229 desc->instr_size = pc_offset();
239 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos(); 230 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
240 desc->constant_pool_size =
241 (constant_pool_offset ? desc->instr_size - constant_pool_offset : 0);
242 desc->origin = this; 231 desc->origin = this;
243 } 232 }
244 233
245 234
246 void Assembler::Align(int m) { 235 void Assembler::Align(int m) {
247 #if V8_TARGET_ARCH_PPC64 236 #if V8_TARGET_ARCH_PPC64
248 DCHECK(m >= 4 && base::bits::IsPowerOfTwo64(m)); 237 DCHECK(m >= 4 && base::bits::IsPowerOfTwo64(m));
249 #else 238 #else
250 DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m)); 239 DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
251 #endif 240 #endif
252 // First ensure instruction alignment 241 while ((pc_offset() & (m - 1)) != 0) {
253 while (pc_offset() & (kInstrSize - 1)) {
254 db(0);
255 }
256 // Then pad to requested alignedment with nops
257 while (pc_offset() & (m - 1)) {
258 nop(); 242 nop();
259 } 243 }
260 } 244 }
261 245
262 246
263 void Assembler::CodeTargetAlign() { Align(8); } 247 void Assembler::CodeTargetAlign() { Align(8); }
264 248
265 249
266 Condition Assembler::GetCondition(Instr instr) { 250 Condition Assembler::GetCondition(Instr instr) {
267 switch (instr & kCondMask) { 251 switch (instr & kCondMask) {
(...skipping 212 matching lines...) Expand 10 before | Expand all | Expand 10 after
480 int32_t offset = target_pos + SIGN_EXT_IMM16(operands & kImm16Mask); 464 int32_t offset = target_pos + SIGN_EXT_IMM16(operands & kImm16Mask);
481 CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos), 2, 465 CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos), 2,
482 CodePatcher::DONT_FLUSH); 466 CodePatcher::DONT_FLUSH);
483 patcher.masm()->bitwise_add32(dst, base, offset); 467 patcher.masm()->bitwise_add32(dst, base, offset);
484 break; 468 break;
485 } 469 }
486 case kUnboundMovLabelAddrOpcode: { 470 case kUnboundMovLabelAddrOpcode: {
487 // Load the address of the label in a register. 471 // Load the address of the label in a register.
488 Register dst = Register::from_code(instr_at(pos + kInstrSize)); 472 Register dst = Register::from_code(instr_at(pos + kInstrSize));
489 CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos), 473 CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
490 kMovInstructionsNoConstantPool, 474 kMovInstructions, CodePatcher::DONT_FLUSH);
491 CodePatcher::DONT_FLUSH);
492 // Keep internal references relative until EmitRelocations. 475 // Keep internal references relative until EmitRelocations.
493 patcher.masm()->bitwise_mov(dst, target_pos); 476 patcher.masm()->bitwise_mov(dst, target_pos);
494 break; 477 break;
495 } 478 }
496 case kUnboundJumpTableEntryOpcode: { 479 case kUnboundJumpTableEntryOpcode: {
497 CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos), 480 CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
498 kPointerSize / kInstrSize, CodePatcher::DONT_FLUSH); 481 kPointerSize / kInstrSize, CodePatcher::DONT_FLUSH);
499 // Keep internal references relative until EmitRelocations. 482 // Keep internal references relative until EmitRelocations.
500 patcher.masm()->dp(target_pos); 483 patcher.masm()->emit_ptr(target_pos);
501 break; 484 break;
502 } 485 }
503 default: 486 default:
504 DCHECK(false); 487 DCHECK(false);
505 break; 488 break;
506 } 489 }
507 } 490 }
508 491
509 492
510 int Assembler::max_reach_from(int pos) { 493 int Assembler::max_reach_from(int pos) {
(...skipping 991 matching lines...) Expand 10 before | Expand all | Expand 10 after
1502 1485
1503 1486
1504 // Function descriptor for AIX. 1487 // Function descriptor for AIX.
1505 // Code address skips the function descriptor "header". 1488 // Code address skips the function descriptor "header".
1506 // TOC and static chain are ignored and set to 0. 1489 // TOC and static chain are ignored and set to 0.
1507 void Assembler::function_descriptor() { 1490 void Assembler::function_descriptor() {
1508 #if ABI_USES_FUNCTION_DESCRIPTORS 1491 #if ABI_USES_FUNCTION_DESCRIPTORS
1509 Label instructions; 1492 Label instructions;
1510 DCHECK(pc_offset() == 0); 1493 DCHECK(pc_offset() == 0);
1511 emit_label_addr(&instructions); 1494 emit_label_addr(&instructions);
1512 dp(0); 1495 emit_ptr(0);
1513 dp(0); 1496 emit_ptr(0);
1514 bind(&instructions); 1497 bind(&instructions);
1515 #endif 1498 #endif
1516 } 1499 }
1517 1500
1518 1501
1519 int Assembler::instructions_required_for_mov(Register dst,
1520 const Operand& src) const {
1521 bool canOptimize =
1522 !(src.must_output_reloc_info(this) || is_trampoline_pool_blocked());
1523 if (use_constant_pool_for_mov(dst, src, canOptimize)) {
1524 if (ConstantPoolAccessIsInOverflow()) {
1525 return kMovInstructionsConstantPool + 1;
1526 }
1527 return kMovInstructionsConstantPool;
1528 }
1529 DCHECK(!canOptimize);
1530 return kMovInstructionsNoConstantPool;
1531 }
1532
1533
1534 bool Assembler::use_constant_pool_for_mov(Register dst, const Operand& src,
1535 bool canOptimize) const {
1536 if (!FLAG_enable_embedded_constant_pool || !is_constant_pool_available()) {
1537 // If there is no constant pool available, we must use a mov
1538 // immediate sequence.
1539 return false;
1540 }
1541
1542 intptr_t value = src.immediate();
1543 #if V8_TARGET_ARCH_PPC64
1544 bool allowOverflow = !((canOptimize && is_int32(value)) || dst.is(r0));
1545 #else
1546 bool allowOverflow = !(canOptimize || dst.is(r0));
1547 #endif
1548 if (canOptimize && is_int16(value)) {
1549 // Prefer a single-instruction load-immediate.
1550 return false;
1551 }
1552 if (!allowOverflow && ConstantPoolAccessIsInOverflow()) {
1553 // Prefer non-relocatable two-instruction bitwise-mov32 over
1554 // overflow sequence.
1555 return false;
1556 }
1557
1558 return true;
1559 }
1560
1561
1562 void Assembler::EnsureSpaceFor(int space_needed) { 1502 void Assembler::EnsureSpaceFor(int space_needed) {
1563 if (buffer_space() <= (kGap + space_needed)) { 1503 if (buffer_space() <= (kGap + space_needed)) {
1564 GrowBuffer(space_needed); 1504 GrowBuffer(space_needed);
1565 } 1505 }
1566 } 1506 }
1567 1507
1568 1508
1569 bool Operand::must_output_reloc_info(const Assembler* assembler) const { 1509 bool Operand::must_output_reloc_info(const Assembler* assembler) const {
1570 if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) { 1510 if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
1571 if (assembler != NULL && assembler->predictable_code_size()) return true; 1511 if (assembler != NULL && assembler->predictable_code_size()) return true;
(...skipping 12 matching lines...) Expand all
1584 // Todo - break this dependency so we can optimize mov() in general 1524 // Todo - break this dependency so we can optimize mov() in general
1585 // and only use the generic version when we require a fixed sequence 1525 // and only use the generic version when we require a fixed sequence
1586 void Assembler::mov(Register dst, const Operand& src) { 1526 void Assembler::mov(Register dst, const Operand& src) {
1587 intptr_t value = src.immediate(); 1527 intptr_t value = src.immediate();
1588 bool relocatable = src.must_output_reloc_info(this); 1528 bool relocatable = src.must_output_reloc_info(this);
1589 bool canOptimize; 1529 bool canOptimize;
1590 1530
1591 canOptimize = 1531 canOptimize =
1592 !(relocatable || (is_trampoline_pool_blocked() && !is_int16(value))); 1532 !(relocatable || (is_trampoline_pool_blocked() && !is_int16(value)));
1593 1533
1594 if (use_constant_pool_for_mov(dst, src, canOptimize)) {
1595 DCHECK(is_constant_pool_available());
1596 if (relocatable) {
1597 RecordRelocInfo(src.rmode_);
1598 }
1599 ConstantPoolEntry::Access access = ConstantPoolAddEntry(src.rmode_, value);
1600 #if V8_TARGET_ARCH_PPC64
1601 if (access == ConstantPoolEntry::OVERFLOWED) {
1602 addis(dst, kConstantPoolRegister, Operand::Zero());
1603 ld(dst, MemOperand(dst, 0));
1604 } else {
1605 ld(dst, MemOperand(kConstantPoolRegister, 0));
1606 }
1607 #else
1608 if (access == ConstantPoolEntry::OVERFLOWED) {
1609 addis(dst, kConstantPoolRegister, Operand::Zero());
1610 lwz(dst, MemOperand(dst, 0));
1611 } else {
1612 lwz(dst, MemOperand(kConstantPoolRegister, 0));
1613 }
1614 #endif
1615 return;
1616 }
1617
1618 if (canOptimize) { 1534 if (canOptimize) {
1619 if (is_int16(value)) { 1535 if (is_int16(value)) {
1620 li(dst, Operand(value)); 1536 li(dst, Operand(value));
1621 } else { 1537 } else {
1622 uint16_t u16; 1538 uint16_t u16;
1623 #if V8_TARGET_ARCH_PPC64 1539 #if V8_TARGET_ARCH_PPC64
1624 if (is_int32(value)) { 1540 if (is_int32(value)) {
1625 #endif 1541 #endif
1626 lis(dst, Operand(value >> 16)); 1542 lis(dst, Operand(value >> 16));
1627 #if V8_TARGET_ARCH_PPC64 1543 #if V8_TARGET_ARCH_PPC64
(...skipping 145 matching lines...) Expand 10 before | Expand all | Expand 10 after
1773 DCHECK(is_int26(link)); 1689 DCHECK(is_int26(link));
1774 1690
1775 // When the label is bound, these instructions will be patched 1691 // When the label is bound, these instructions will be patched
1776 // with a multi-instruction mov sequence that will load the 1692 // with a multi-instruction mov sequence that will load the
1777 // destination register with the address of the label. 1693 // destination register with the address of the label.
1778 // 1694 //
1779 // target_at extracts the link and target_at_put patches the instructions. 1695 // target_at extracts the link and target_at_put patches the instructions.
1780 BlockTrampolinePoolScope block_trampoline_pool(this); 1696 BlockTrampolinePoolScope block_trampoline_pool(this);
1781 emit(kUnboundMovLabelAddrOpcode | (link & kImm26Mask)); 1697 emit(kUnboundMovLabelAddrOpcode | (link & kImm26Mask));
1782 emit(dst.code()); 1698 emit(dst.code());
1783 DCHECK(kMovInstructionsNoConstantPool >= 2); 1699 DCHECK(kMovInstructions >= 2);
1784 for (int i = 0; i < kMovInstructionsNoConstantPool - 2; i++) nop(); 1700 for (int i = 0; i < kMovInstructions - 2; i++) nop();
1785 } 1701 }
1786 } 1702 }
1787 1703
1788 1704
1789 void Assembler::emit_label_addr(Label* label) { 1705 void Assembler::emit_label_addr(Label* label) {
1790 CheckBuffer(); 1706 CheckBuffer();
1791 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE); 1707 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
1792 int position = link(label); 1708 int position = link(label);
1793 if (label->is_bound()) { 1709 if (label->is_bound()) {
1794 // Keep internal references relative until EmitRelocations. 1710 // Keep internal references relative until EmitRelocations.
1795 dp(position); 1711 emit_ptr(position);
1796 } else { 1712 } else {
1797 // Encode internal reference to unbound label. We use a dummy opcode 1713 // Encode internal reference to unbound label. We use a dummy opcode
1798 // such that it won't collide with any opcode that might appear in the 1714 // such that it won't collide with any opcode that might appear in the
1799 // label's chain. 1715 // label's chain.
1800 int link = position - pc_offset(); 1716 int link = position - pc_offset();
1801 DCHECK_EQ(0, link & 3); 1717 DCHECK_EQ(0, link & 3);
1802 link >>= 2; 1718 link >>= 2;
1803 DCHECK(is_int26(link)); 1719 DCHECK(is_int26(link));
1804 1720
1805 // When the label is bound, the instruction(s) will be patched 1721 // When the label is bound, the instruction(s) will be patched
(...skipping 110 matching lines...) Expand 10 before | Expand all | Expand 10 after
1916 1832
1917 1833
1918 void Assembler::isync() { emit(EXT1 | ISYNC); } 1834 void Assembler::isync() { emit(EXT1 | ISYNC); }
1919 1835
1920 1836
1921 // Floating point support 1837 // Floating point support
1922 1838
1923 void Assembler::lfd(const DoubleRegister frt, const MemOperand& src) { 1839 void Assembler::lfd(const DoubleRegister frt, const MemOperand& src) {
1924 int offset = src.offset(); 1840 int offset = src.offset();
1925 Register ra = src.ra(); 1841 Register ra = src.ra();
1926 DCHECK(!ra.is(r0));
1927 DCHECK(is_int16(offset)); 1842 DCHECK(is_int16(offset));
1928 int imm16 = offset & kImm16Mask; 1843 int imm16 = offset & kImm16Mask;
1929 // could be x_form instruction with some casting magic 1844 // could be x_form instruction with some casting magic
1930 emit(LFD | frt.code() * B21 | ra.code() * B16 | imm16); 1845 emit(LFD | frt.code() * B21 | ra.code() * B16 | imm16);
1931 } 1846 }
1932 1847
1933 1848
1934 void Assembler::lfdu(const DoubleRegister frt, const MemOperand& src) { 1849 void Assembler::lfdu(const DoubleRegister frt, const MemOperand& src) {
1935 int offset = src.offset(); 1850 int offset = src.offset();
1936 Register ra = src.ra(); 1851 Register ra = src.ra();
1937 DCHECK(!ra.is(r0));
1938 DCHECK(is_int16(offset)); 1852 DCHECK(is_int16(offset));
1939 int imm16 = offset & kImm16Mask; 1853 int imm16 = offset & kImm16Mask;
1940 // could be x_form instruction with some casting magic 1854 // could be x_form instruction with some casting magic
1941 emit(LFDU | frt.code() * B21 | ra.code() * B16 | imm16); 1855 emit(LFDU | frt.code() * B21 | ra.code() * B16 | imm16);
1942 } 1856 }
1943 1857
1944 1858
1945 void Assembler::lfdx(const DoubleRegister frt, const MemOperand& src) { 1859 void Assembler::lfdx(const DoubleRegister frt, const MemOperand& src) {
1946 Register ra = src.ra(); 1860 Register ra = src.ra();
1947 Register rb = src.rb(); 1861 Register rb = src.rb();
(...skipping 379 matching lines...) Expand 10 before | Expand all | Expand 10 after
2327 } 2241 }
2328 2242
2329 2243
2330 void Assembler::dd(uint32_t data) { 2244 void Assembler::dd(uint32_t data) {
2331 CheckBuffer(); 2245 CheckBuffer();
2332 *reinterpret_cast<uint32_t*>(pc_) = data; 2246 *reinterpret_cast<uint32_t*>(pc_) = data;
2333 pc_ += sizeof(uint32_t); 2247 pc_ += sizeof(uint32_t);
2334 } 2248 }
2335 2249
2336 2250
2337 void Assembler::dq(uint64_t value) { 2251 void Assembler::emit_ptr(intptr_t data) {
2338 CheckBuffer(); 2252 CheckBuffer();
2339 *reinterpret_cast<uint64_t*>(pc_) = value; 2253 *reinterpret_cast<intptr_t*>(pc_) = data;
2340 pc_ += sizeof(uint64_t); 2254 pc_ += sizeof(intptr_t);
2341 } 2255 }
2342 2256
2343 2257
2344 void Assembler::dp(uintptr_t data) { 2258 void Assembler::emit_double(double value) {
2345 CheckBuffer(); 2259 CheckBuffer();
2346 *reinterpret_cast<uintptr_t*>(pc_) = data; 2260 *reinterpret_cast<double*>(pc_) = value;
2347 pc_ += sizeof(uintptr_t); 2261 pc_ += sizeof(double);
2348 } 2262 }
2349 2263
2350 2264
2351 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { 2265 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
2352 if (RelocInfo::IsNone(rmode) ||
2353 // Don't record external references unless the heap will be serialized.
2354 (rmode == RelocInfo::EXTERNAL_REFERENCE && !serializer_enabled() &&
2355 !emit_debug_code())) {
2356 return;
2357 }
2358 if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
2359 data = RecordedAstId().ToInt();
2360 ClearRecordedAstId();
2361 }
2362 DeferredRelocInfo rinfo(pc_offset(), rmode, data); 2266 DeferredRelocInfo rinfo(pc_offset(), rmode, data);
2363 relocations_.push_back(rinfo); 2267 RecordRelocInfo(rinfo);
2364 } 2268 }
2365 2269
2366 2270
2271 void Assembler::RecordRelocInfo(const DeferredRelocInfo& rinfo) {
2272 if (rinfo.rmode() >= RelocInfo::JS_RETURN &&
2273 rinfo.rmode() <= RelocInfo::DEBUG_BREAK_SLOT) {
2274 // Adjust code for new modes.
2275 DCHECK(RelocInfo::IsDebugBreakSlot(rinfo.rmode()) ||
2276 RelocInfo::IsJSReturn(rinfo.rmode()) ||
2277 RelocInfo::IsComment(rinfo.rmode()) ||
2278 RelocInfo::IsPosition(rinfo.rmode()));
2279 }
2280 if (!RelocInfo::IsNone(rinfo.rmode())) {
2281 // Don't record external references unless the heap will be serialized.
2282 if (rinfo.rmode() == RelocInfo::EXTERNAL_REFERENCE) {
2283 if (!serializer_enabled() && !emit_debug_code()) {
2284 return;
2285 }
2286 }
2287 if (rinfo.rmode() == RelocInfo::CODE_TARGET_WITH_ID) {
2288 DeferredRelocInfo reloc_info_with_ast_id(rinfo.position(), rinfo.rmode(),
2289 RecordedAstId().ToInt());
2290 ClearRecordedAstId();
2291 relocations_.push_back(reloc_info_with_ast_id);
2292 } else {
2293 relocations_.push_back(rinfo);
2294 }
2295 }
2296 }
2297
2298
2367 void Assembler::EmitRelocations() { 2299 void Assembler::EmitRelocations() {
2368 EnsureSpaceFor(relocations_.size() * kMaxRelocSize); 2300 EnsureSpaceFor(relocations_.size() * kMaxRelocSize);
2369 2301
2370 for (std::vector<DeferredRelocInfo>::iterator it = relocations_.begin(); 2302 for (std::vector<DeferredRelocInfo>::iterator it = relocations_.begin();
2371 it != relocations_.end(); it++) { 2303 it != relocations_.end(); it++) {
2372 RelocInfo::Mode rmode = it->rmode(); 2304 RelocInfo::Mode rmode = it->rmode();
2373 Address pc = buffer_ + it->position(); 2305 Address pc = buffer_ + it->position();
2374 Code* code = NULL; 2306 Code* code = NULL;
2375 RelocInfo rinfo(pc, rmode, it->data(), code); 2307 RelocInfo rinfo(pc, rmode, it->data(), code);
2376 2308
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after
2439 } else { 2371 } else {
2440 // Number of branches to unbound label at this point is zero, so we can 2372 // Number of branches to unbound label at this point is zero, so we can
2441 // move next buffer check to maximum. 2373 // move next buffer check to maximum.
2442 next_buffer_check_ = 2374 next_buffer_check_ =
2443 pc_offset() + kMaxCondBranchReach - kMaxBlockTrampolineSectionSize; 2375 pc_offset() + kMaxCondBranchReach - kMaxBlockTrampolineSectionSize;
2444 } 2376 }
2445 return; 2377 return;
2446 } 2378 }
2447 2379
2448 2380
2381 Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
2382 DCHECK(!FLAG_enable_ool_constant_pool);
2383 return isolate->factory()->empty_constant_pool_array();
2384 }
2385
2386
2387 void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
2388 DCHECK(!FLAG_enable_ool_constant_pool);
2389 }
2449 } // namespace internal 2390 } // namespace internal
2450 } // namespace v8 2391 } // namespace v8
2451 2392
2452 #endif // V8_TARGET_ARCH_PPC 2393 #endif // V8_TARGET_ARCH_PPC
OLDNEW
« no previous file with comments | « src/ppc/assembler-ppc.h ('k') | src/ppc/assembler-ppc-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698