Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(5)

Side by Side Diff: src/ppc/assembler-ppc.cc

Issue 986553005: Contribution of PowerPC port (continuation of 422063005) - serialize.cc cleanup (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 5 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 1994-2006 Sun Microsystems Inc. 1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved. 2 // All Rights Reserved.
3 // 3 //
4 // Redistribution and use in source and binary forms, with or without 4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions 5 // modification, are permitted provided that the following conditions
6 // are met: 6 // are met:
7 // 7 //
8 // - Redistributions of source code must retain the above copyright notice, 8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer. 9 // this list of conditions and the following disclaimer.
10 // 10 //
(...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after
135 "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10", 135 "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10",
136 "d11", "d12", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22", 136 "d11", "d12", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22",
137 "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"}; 137 "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"};
138 return names[index]; 138 return names[index];
139 } 139 }
140 140
141 141
142 // ----------------------------------------------------------------------------- 142 // -----------------------------------------------------------------------------
143 // Implementation of RelocInfo 143 // Implementation of RelocInfo
144 144
145 const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE | 145 const int RelocInfo::kInternalReferenceMask =
146 1 << RelocInfo::INTERNAL_REFERENCE_ENCODED; 146 1 << RelocInfo::INTERNAL_REFERENCE |
147 1 << RelocInfo::INTERNAL_REFERENCE_ENCODED;
148 const int RelocInfo::kApplyMask = RelocInfo::kInternalReferenceMask;
147 149
148 150
149 bool RelocInfo::IsCodedSpecially() { 151 bool RelocInfo::IsCodedSpecially() {
150 // The deserializer needs to know whether a pointer is specially 152 // The deserializer needs to know whether a pointer is specially
151 // coded. Being specially coded on PPC means that it is a lis/ori 153 // coded. Being specially coded on PPC means that it is a lis/ori
152 // instruction sequence, and these are always the case inside code 154 // instruction sequence, and these are always the case inside code
153 // objects. 155 // objects.
154 return true; 156 return true;
155 } 157 }
156 158
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after
210 // We leave space (kMaxBlockTrampolineSectionSize) 212 // We leave space (kMaxBlockTrampolineSectionSize)
211 // for BlockTrampolinePoolScope buffer. 213 // for BlockTrampolinePoolScope buffer.
212 next_buffer_check_ = 214 next_buffer_check_ =
213 FLAG_force_long_branches ? kMaxInt : kMaxCondBranchReach - 215 FLAG_force_long_branches ? kMaxInt : kMaxCondBranchReach -
214 kMaxBlockTrampolineSectionSize; 216 kMaxBlockTrampolineSectionSize;
215 internal_trampoline_exception_ = false; 217 internal_trampoline_exception_ = false;
216 last_bound_pos_ = 0; 218 last_bound_pos_ = 0;
217 trampoline_emitted_ = FLAG_force_long_branches; 219 trampoline_emitted_ = FLAG_force_long_branches;
218 unbound_labels_count_ = 0; 220 unbound_labels_count_ = 0;
219 ClearRecordedAstId(); 221 ClearRecordedAstId();
222 relocations_.reserve(128);
220 } 223 }
221 224
222 225
223 void Assembler::GetCode(CodeDesc* desc) { 226 void Assembler::GetCode(CodeDesc* desc) {
224 reloc_info_writer.Finish(); 227 EmitRelocations();
225 228
226 // Set up code descriptor. 229 // Set up code descriptor.
227 desc->buffer = buffer_; 230 desc->buffer = buffer_;
228 desc->buffer_size = buffer_size_; 231 desc->buffer_size = buffer_size_;
229 desc->instr_size = pc_offset(); 232 desc->instr_size = pc_offset();
230 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos(); 233 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
231 desc->origin = this; 234 desc->origin = this;
232 } 235 }
233 236
234 237
(...skipping 136 matching lines...) Expand 10 before | Expand all | Expand 10 after
371 // instruction using the label. 374 // instruction using the label.
372 375
373 376
374 // The link chain is terminated by a negative code position (must be aligned) 377 // The link chain is terminated by a negative code position (must be aligned)
375 const int kEndOfChain = -4; 378 const int kEndOfChain = -4;
376 379
377 380
378 // Dummy opcodes for unbound label mov instructions or jump table entries. 381 // Dummy opcodes for unbound label mov instructions or jump table entries.
379 enum { 382 enum {
380 kUnboundMovLabelOffsetOpcode = 0 << 26, 383 kUnboundMovLabelOffsetOpcode = 0 << 26,
381 kUnboundMovLabelAddrOpcode = 1 << 26, 384 kUnboundAddLabelOffsetOpcode = 1 << 26,
382 kUnboundJumpTableEntryOpcode = 2 << 26 385 kUnboundMovLabelAddrOpcode = 2 << 26,
386 kUnboundJumpTableEntryOpcode = 3 << 26
383 }; 387 };
384 388
385 389
386 int Assembler::target_at(int pos) { 390 int Assembler::target_at(int pos) {
387 Instr instr = instr_at(pos); 391 Instr instr = instr_at(pos);
388 // check which type of branch this is 16 or 26 bit offset 392 // check which type of branch this is 16 or 26 bit offset
389 int opcode = instr & kOpcodeMask; 393 int opcode = instr & kOpcodeMask;
390 int link; 394 int link;
391 switch (opcode) { 395 switch (opcode) {
392 case BX: 396 case BX:
393 link = SIGN_EXT_IMM26(instr & kImm26Mask); 397 link = SIGN_EXT_IMM26(instr & kImm26Mask);
394 link &= ~(kAAMask | kLKMask); // discard AA|LK bits if present 398 link &= ~(kAAMask | kLKMask); // discard AA|LK bits if present
395 break; 399 break;
396 case BCX: 400 case BCX:
397 link = SIGN_EXT_IMM16((instr & kImm16Mask)); 401 link = SIGN_EXT_IMM16((instr & kImm16Mask));
398 link &= ~(kAAMask | kLKMask); // discard AA|LK bits if present 402 link &= ~(kAAMask | kLKMask); // discard AA|LK bits if present
399 break; 403 break;
400 case kUnboundMovLabelOffsetOpcode: 404 case kUnboundMovLabelOffsetOpcode:
405 case kUnboundAddLabelOffsetOpcode:
401 case kUnboundMovLabelAddrOpcode: 406 case kUnboundMovLabelAddrOpcode:
402 case kUnboundJumpTableEntryOpcode: 407 case kUnboundJumpTableEntryOpcode:
403 link = SIGN_EXT_IMM26(instr & kImm26Mask); 408 link = SIGN_EXT_IMM26(instr & kImm26Mask);
404 link <<= 2; 409 link <<= 2;
405 break; 410 break;
406 default: 411 default:
407 DCHECK(false); 412 DCHECK(false);
408 return -1; 413 return -1;
409 } 414 }
410 415
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
447 case kUnboundMovLabelOffsetOpcode: { 452 case kUnboundMovLabelOffsetOpcode: {
448 // Load the position of the label relative to the generated code object 453 // Load the position of the label relative to the generated code object
449 // pointer in a register. 454 // pointer in a register.
450 Register dst = Register::from_code(instr_at(pos + kInstrSize)); 455 Register dst = Register::from_code(instr_at(pos + kInstrSize));
451 int32_t offset = target_pos + (Code::kHeaderSize - kHeapObjectTag); 456 int32_t offset = target_pos + (Code::kHeaderSize - kHeapObjectTag);
452 CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos), 2, 457 CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos), 2,
453 CodePatcher::DONT_FLUSH); 458 CodePatcher::DONT_FLUSH);
454 patcher.masm()->bitwise_mov32(dst, offset); 459 patcher.masm()->bitwise_mov32(dst, offset);
455 break; 460 break;
456 } 461 }
462 case kUnboundAddLabelOffsetOpcode: {
463 // dst = base + position + immediate
464 Instr operands = instr_at(pos + kInstrSize);
465 Register dst = Register::from_code((operands >> 21) & 0x1f);
466 Register base = Register::from_code((operands >> 16) & 0x1f);
467 int32_t offset = target_pos + SIGN_EXT_IMM16(operands & kImm16Mask);
468 CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos), 2,
469 CodePatcher::DONT_FLUSH);
470 patcher.masm()->bitwise_add32(dst, base, offset);
471 break;
472 }
457 case kUnboundMovLabelAddrOpcode: { 473 case kUnboundMovLabelAddrOpcode: {
458 // Load the address of the label in a register. 474 // Load the address of the label in a register.
459 Register dst = Register::from_code(instr_at(pos + kInstrSize)); 475 Register dst = Register::from_code(instr_at(pos + kInstrSize));
460 intptr_t addr = reinterpret_cast<uintptr_t>(buffer_ + target_pos);
461 CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos), 476 CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
462 kMovInstructions, CodePatcher::DONT_FLUSH); 477 kMovInstructions, CodePatcher::DONT_FLUSH);
463 AddBoundInternalReferenceLoad(pos); 478 // Keep internal references relative until EmitRelocations.
464 patcher.masm()->bitwise_mov(dst, addr); 479 patcher.masm()->bitwise_mov(dst, target_pos);
465 break; 480 break;
466 } 481 }
467 case kUnboundJumpTableEntryOpcode: { 482 case kUnboundJumpTableEntryOpcode: {
468 intptr_t addr = reinterpret_cast<uintptr_t>(buffer_ + target_pos);
469 CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos), 483 CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
470 kPointerSize / kInstrSize, CodePatcher::DONT_FLUSH); 484 kPointerSize / kInstrSize, CodePatcher::DONT_FLUSH);
471 AddBoundInternalReference(pos); 485 // Keep internal references relative until EmitRelocations.
472 patcher.masm()->emit_ptr(addr); 486 patcher.masm()->emit_ptr(target_pos);
473 break; 487 break;
474 } 488 }
475 default: 489 default:
476 DCHECK(false); 490 DCHECK(false);
477 break; 491 break;
478 } 492 }
479 } 493 }
480 494
481 495
482 int Assembler::max_reach_from(int pos) { 496 int Assembler::max_reach_from(int pos) {
483 Instr instr = instr_at(pos); 497 Instr instr = instr_at(pos);
484 int opcode = instr & kOpcodeMask; 498 int opcode = instr & kOpcodeMask;
485 499
486 // check which type of branch this is 16 or 26 bit offset 500 // check which type of branch this is 16 or 26 bit offset
487 switch (opcode) { 501 switch (opcode) {
488 case BX: 502 case BX:
489 return 26; 503 return 26;
490 case BCX: 504 case BCX:
491 return 16; 505 return 16;
492 case kUnboundMovLabelOffsetOpcode: 506 case kUnboundMovLabelOffsetOpcode:
507 case kUnboundAddLabelOffsetOpcode:
493 case kUnboundMovLabelAddrOpcode: 508 case kUnboundMovLabelAddrOpcode:
494 case kUnboundJumpTableEntryOpcode: 509 case kUnboundJumpTableEntryOpcode:
495 return 0; // no limit on reach 510 return 0; // no limit on reach
496 } 511 }
497 512
498 DCHECK(false); 513 DCHECK(false);
499 return 0; 514 return 0;
500 } 515 }
501 516
502 517
(...skipping 977 matching lines...) Expand 10 before | Expand all | Expand 10 after
1480 Label instructions; 1495 Label instructions;
1481 DCHECK(pc_offset() == 0); 1496 DCHECK(pc_offset() == 0);
1482 emit_label_addr(&instructions); 1497 emit_label_addr(&instructions);
1483 emit_ptr(0); 1498 emit_ptr(0);
1484 emit_ptr(0); 1499 emit_ptr(0);
1485 bind(&instructions); 1500 bind(&instructions);
1486 #endif 1501 #endif
1487 } 1502 }
1488 1503
1489 1504
1490 void Assembler::RelocateInternalReference(Address pc, intptr_t delta,
1491 Address code_start,
1492 RelocInfo::Mode rmode,
1493 ICacheFlushMode icache_flush_mode) {
1494 if (RelocInfo::IsInternalReference(rmode)) {
1495 // Jump table entry
1496 DCHECK(delta || code_start);
1497 uintptr_t* entry = reinterpret_cast<uintptr_t*>(pc);
1498 if (delta) {
1499 *entry += delta;
1500 } else {
1501 // remove when serializer properly supports internal references
1502 *entry = reinterpret_cast<uintptr_t>(code_start) + 3 * kPointerSize;
1503 }
1504 } else {
1505 // mov sequence
1506 DCHECK(delta || code_start);
1507 DCHECK(RelocInfo::IsInternalReferenceEncoded(rmode));
1508 ConstantPoolArray* constant_pool = NULL;
1509 Address addr;
1510 if (delta) {
1511 addr = target_address_at(pc, constant_pool) + delta;
1512 } else {
1513 // remove when serializer properly supports internal references
1514 addr = code_start;
1515 }
1516 set_target_address_at(pc, constant_pool, addr, icache_flush_mode);
1517 }
1518 }
1519
1520
1521 void Assembler::EnsureSpaceFor(int space_needed) { 1505 void Assembler::EnsureSpaceFor(int space_needed) {
1522 if (buffer_space() <= (kGap + space_needed)) { 1506 if (buffer_space() <= (kGap + space_needed)) {
1523 GrowBuffer(space_needed); 1507 GrowBuffer(space_needed);
1524 } 1508 }
1525 } 1509 }
1526 1510
1527 1511
1528 bool Operand::must_output_reloc_info(const Assembler* assembler) const { 1512 bool Operand::must_output_reloc_info(const Assembler* assembler) const {
1529 if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) { 1513 if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
1530 if (assembler != NULL && assembler->predictable_code_size()) return true; 1514 if (assembler != NULL && assembler->predictable_code_size()) return true;
1531 return assembler->serializer_enabled(); 1515 return assembler->serializer_enabled();
1532 } else if (RelocInfo::IsNone(rmode_)) { 1516 } else if (RelocInfo::IsNone(rmode_)) {
1533 return false; 1517 return false;
1534 } 1518 }
1535 return true; 1519 return true;
1536 } 1520 }
1537 1521
1538 1522
1539 // Primarily used for loading constants 1523 // Primarily used for loading constants
1540 // This should really move to be in macro-assembler as it 1524 // This should really move to be in macro-assembler as it
1541 // is really a pseudo instruction 1525 // is really a pseudo instruction
1542 // Some usages of this intend for a FIXED_SEQUENCE to be used 1526 // Some usages of this intend for a FIXED_SEQUENCE to be used
1543 // Todo - break this dependency so we can optimize mov() in general 1527 // Todo - break this dependency so we can optimize mov() in general
1544 // and only use the generic version when we require a fixed sequence 1528 // and only use the generic version when we require a fixed sequence
1545 void Assembler::mov(Register dst, const Operand& src) { 1529 void Assembler::mov(Register dst, const Operand& src) {
1546 intptr_t value = src.immediate(); 1530 intptr_t value = src.immediate();
1531 bool relocatable = src.must_output_reloc_info(this);
1547 bool canOptimize; 1532 bool canOptimize;
1548 RelocInfo rinfo(pc_, src.rmode_, value, NULL);
1549 1533
1550 canOptimize = !(src.must_output_reloc_info(this) || 1534 canOptimize =
1551 (is_trampoline_pool_blocked() && !is_int16(value))); 1535 !(relocatable || (is_trampoline_pool_blocked() && !is_int16(value)));
1552 1536
1553 if (canOptimize) { 1537 if (canOptimize) {
1554 if (is_int16(value)) { 1538 if (is_int16(value)) {
1555 li(dst, Operand(value)); 1539 li(dst, Operand(value));
1556 } else { 1540 } else {
1557 uint16_t u16; 1541 uint16_t u16;
1558 #if V8_TARGET_ARCH_PPC64 1542 #if V8_TARGET_ARCH_PPC64
1559 if (is_int32(value)) { 1543 if (is_int32(value)) {
1560 #endif 1544 #endif
1561 lis(dst, Operand(value >> 16)); 1545 lis(dst, Operand(value >> 16));
(...skipping 17 matching lines...) Expand all
1579 #endif 1563 #endif
1580 u16 = (value & 0xffff); 1564 u16 = (value & 0xffff);
1581 if (u16) { 1565 if (u16) {
1582 ori(dst, dst, Operand(u16)); 1566 ori(dst, dst, Operand(u16));
1583 } 1567 }
1584 } 1568 }
1585 return; 1569 return;
1586 } 1570 }
1587 1571
1588 DCHECK(!canOptimize); 1572 DCHECK(!canOptimize);
1589 if (src.must_output_reloc_info(this)) { 1573 if (relocatable) {
1590 RecordRelocInfo(rinfo); 1574 RecordRelocInfo(src.rmode_);
1591 } 1575 }
1592 bitwise_mov(dst, value); 1576 bitwise_mov(dst, value);
1593 } 1577 }
1594 1578
1595 1579
1596 void Assembler::bitwise_mov(Register dst, intptr_t value) { 1580 void Assembler::bitwise_mov(Register dst, intptr_t value) {
1597 BlockTrampolinePoolScope block_trampoline_pool(this); 1581 BlockTrampolinePoolScope block_trampoline_pool(this);
1598 #if V8_TARGET_ARCH_PPC64 1582 #if V8_TARGET_ARCH_PPC64
1599 int32_t hi_32 = static_cast<int32_t>(value >> 32); 1583 int32_t hi_32 = static_cast<int32_t>(value >> 32);
1600 int32_t lo_32 = static_cast<int32_t>(value); 1584 int32_t lo_32 = static_cast<int32_t>(value);
(...skipping 17 matching lines...) Expand all
1618 1602
1619 void Assembler::bitwise_mov32(Register dst, int32_t value) { 1603 void Assembler::bitwise_mov32(Register dst, int32_t value) {
1620 BlockTrampolinePoolScope block_trampoline_pool(this); 1604 BlockTrampolinePoolScope block_trampoline_pool(this);
1621 int hi_word = static_cast<int>(value >> 16); 1605 int hi_word = static_cast<int>(value >> 16);
1622 int lo_word = static_cast<int>(value & 0xffff); 1606 int lo_word = static_cast<int>(value & 0xffff);
1623 lis(dst, Operand(SIGN_EXT_IMM16(hi_word))); 1607 lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
1624 ori(dst, dst, Operand(lo_word)); 1608 ori(dst, dst, Operand(lo_word));
1625 } 1609 }
1626 1610
1627 1611
1612 void Assembler::bitwise_add32(Register dst, Register src, int32_t value) {
1613 BlockTrampolinePoolScope block_trampoline_pool(this);
1614 if (is_int16(value)) {
1615 addi(dst, src, Operand(value));
1616 nop();
1617 } else {
1618 int hi_word = static_cast<int>(value >> 16);
1619 int lo_word = static_cast<int>(value & 0xffff);
1620 if (lo_word & 0x8000) hi_word++;
1621 addis(dst, src, Operand(SIGN_EXT_IMM16(hi_word)));
1622 addic(dst, dst, Operand(SIGN_EXT_IMM16(lo_word)));
1623 }
1624 }
1625
1626
1628 void Assembler::mov_label_offset(Register dst, Label* label) { 1627 void Assembler::mov_label_offset(Register dst, Label* label) {
1629 int position = link(label); 1628 int position = link(label);
1630 if (label->is_bound()) { 1629 if (label->is_bound()) {
1631 // Load the position of the label relative to the generated code object. 1630 // Load the position of the label relative to the generated code object.
1632 mov(dst, Operand(position + Code::kHeaderSize - kHeapObjectTag)); 1631 mov(dst, Operand(position + Code::kHeaderSize - kHeapObjectTag));
1633 } else { 1632 } else {
1634 // Encode internal reference to unbound label. We use a dummy opcode 1633 // Encode internal reference to unbound label. We use a dummy opcode
1635 // such that it won't collide with any opcode that might appear in the 1634 // such that it won't collide with any opcode that might appear in the
1636 // label's chain. Encode the destination register in the 2nd instruction. 1635 // label's chain. Encode the destination register in the 2nd instruction.
1637 int link = position - pc_offset(); 1636 int link = position - pc_offset();
1638 DCHECK_EQ(0, link & 3); 1637 DCHECK_EQ(0, link & 3);
1639 link >>= 2; 1638 link >>= 2;
1640 DCHECK(is_int26(link)); 1639 DCHECK(is_int26(link));
1641 1640
1642 // When the label is bound, these instructions will be patched 1641 // When the label is bound, these instructions will be patched
1643 // with a 2 instruction mov sequence that will load the 1642 // with a 2 instruction mov sequence that will load the
1644 // destination register with the position of the label from the 1643 // destination register with the position of the label from the
1645 // beginning of the code. 1644 // beginning of the code.
1646 // 1645 //
1647 // target_at extracts the link and target_at_put patches the instructions. 1646 // target_at extracts the link and target_at_put patches the instructions.
1648 BlockTrampolinePoolScope block_trampoline_pool(this); 1647 BlockTrampolinePoolScope block_trampoline_pool(this);
1649 emit(kUnboundMovLabelOffsetOpcode | (link & kImm26Mask)); 1648 emit(kUnboundMovLabelOffsetOpcode | (link & kImm26Mask));
1650 emit(dst.code()); 1649 emit(dst.code());
1651 } 1650 }
1652 } 1651 }
1653 1652
1654 1653
1654 void Assembler::add_label_offset(Register dst, Register base, Label* label,
1655 int delta) {
1656 int position = link(label);
1657 if (label->is_bound()) {
1658 // dst = base + position + delta
1659 position += delta;
1660 bitwise_add32(dst, base, position);
1661 } else {
1662 // Encode internal reference to unbound label. We use a dummy opcode
1663 // such that it won't collide with any opcode that might appear in the
1664 // label's chain. Encode the operands in the 2nd instruction.
1665 int link = position - pc_offset();
1666 DCHECK_EQ(0, link & 3);
1667 link >>= 2;
1668 DCHECK(is_int26(link));
1669 DCHECK(is_int16(delta));
1670
1671 BlockTrampolinePoolScope block_trampoline_pool(this);
1672 emit(kUnboundAddLabelOffsetOpcode | (link & kImm26Mask));
1673 emit(dst.code() * B21 | base.code() * B16 | (delta & kImm16Mask));
1674 }
1675 }
1676
1677
1655 void Assembler::mov_label_addr(Register dst, Label* label) { 1678 void Assembler::mov_label_addr(Register dst, Label* label) {
1656 CheckBuffer(); 1679 CheckBuffer();
1657 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED); 1680 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
1658 int position = link(label); 1681 int position = link(label);
1659 if (label->is_bound()) { 1682 if (label->is_bound()) {
1660 // CheckBuffer() is called too frequently. This will pre-grow 1683 // Keep internal references relative until EmitRelocations.
1661 // the buffer if needed to avoid spliting the relocation and instructions 1684 bitwise_mov(dst, position);
1662 EnsureSpaceFor(kMovInstructions * kInstrSize);
1663
1664 intptr_t addr = reinterpret_cast<uintptr_t>(buffer_ + position);
1665 AddBoundInternalReferenceLoad(pc_offset());
1666 bitwise_mov(dst, addr);
1667 } else { 1685 } else {
1668 // Encode internal reference to unbound label. We use a dummy opcode 1686 // Encode internal reference to unbound label. We use a dummy opcode
1669 // such that it won't collide with any opcode that might appear in the 1687 // such that it won't collide with any opcode that might appear in the
1670 // label's chain. Encode the destination register in the 2nd instruction. 1688 // label's chain. Encode the destination register in the 2nd instruction.
1671 int link = position - pc_offset(); 1689 int link = position - pc_offset();
1672 DCHECK_EQ(0, link & 3); 1690 DCHECK_EQ(0, link & 3);
1673 link >>= 2; 1691 link >>= 2;
1674 DCHECK(is_int26(link)); 1692 DCHECK(is_int26(link));
1675 1693
1676 // When the label is bound, these instructions will be patched 1694 // When the label is bound, these instructions will be patched
1677 // with a multi-instruction mov sequence that will load the 1695 // with a multi-instruction mov sequence that will load the
1678 // destination register with the address of the label. 1696 // destination register with the address of the label.
1679 // 1697 //
1680 // target_at extracts the link and target_at_put patches the instructions. 1698 // target_at extracts the link and target_at_put patches the instructions.
1681 BlockTrampolinePoolScope block_trampoline_pool(this); 1699 BlockTrampolinePoolScope block_trampoline_pool(this);
1682 emit(kUnboundMovLabelAddrOpcode | (link & kImm26Mask)); 1700 emit(kUnboundMovLabelAddrOpcode | (link & kImm26Mask));
1683 emit(dst.code()); 1701 emit(dst.code());
1684 DCHECK(kMovInstructions >= 2); 1702 DCHECK(kMovInstructions >= 2);
1685 for (int i = 0; i < kMovInstructions - 2; i++) nop(); 1703 for (int i = 0; i < kMovInstructions - 2; i++) nop();
1686 } 1704 }
1687 } 1705 }
1688 1706
1689 1707
1690 void Assembler::emit_label_addr(Label* label) { 1708 void Assembler::emit_label_addr(Label* label) {
1691 CheckBuffer(); 1709 CheckBuffer();
1692 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE); 1710 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
1693 int position = link(label); 1711 int position = link(label);
1694 if (label->is_bound()) { 1712 if (label->is_bound()) {
1695 // CheckBuffer() is called too frequently. This will pre-grow 1713 // Keep internal references relative until EmitRelocations.
1696 // the buffer if needed to avoid spliting the relocation and entry. 1714 emit_ptr(position);
1697 EnsureSpaceFor(kPointerSize);
1698
1699 intptr_t addr = reinterpret_cast<uintptr_t>(buffer_ + position);
1700 AddBoundInternalReference(pc_offset());
1701 emit_ptr(addr);
1702 } else { 1715 } else {
1703 // Encode internal reference to unbound label. We use a dummy opcode 1716 // Encode internal reference to unbound label. We use a dummy opcode
1704 // such that it won't collide with any opcode that might appear in the 1717 // such that it won't collide with any opcode that might appear in the
1705 // label's chain. 1718 // label's chain.
1706 int link = position - pc_offset(); 1719 int link = position - pc_offset();
1707 DCHECK_EQ(0, link & 3); 1720 DCHECK_EQ(0, link & 3);
1708 link >>= 2; 1721 link >>= 2;
1709 DCHECK(is_int26(link)); 1722 DCHECK(is_int26(link));
1710 1723
1711 // When the label is bound, the instruction(s) will be patched 1724 // When the label is bound, the instruction(s) will be patched
(...skipping 499 matching lines...) Expand 10 before | Expand all | Expand 10 after
2211 desc.reloc_size); 2224 desc.reloc_size);
2212 2225
2213 // Switch buffers. 2226 // Switch buffers.
2214 DeleteArray(buffer_); 2227 DeleteArray(buffer_);
2215 buffer_ = desc.buffer; 2228 buffer_ = desc.buffer;
2216 buffer_size_ = desc.buffer_size; 2229 buffer_size_ = desc.buffer_size;
2217 pc_ += pc_delta; 2230 pc_ += pc_delta;
2218 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta, 2231 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
2219 reloc_info_writer.last_pc() + pc_delta); 2232 reloc_info_writer.last_pc() + pc_delta);
2220 2233
2221 // Relocate internal references 2234 // Nothing else to do here since we keep all internal references and
2222 for (int pos : internal_reference_positions_) { 2235 // deferred relocation entries relative to the buffer (until
2223 RelocateInternalReference(buffer_ + pos, pc_delta, 0, 2236 // EmitRelocations).
2224 RelocInfo::INTERNAL_REFERENCE);
2225 }
2226 for (int pos : internal_reference_load_positions_) {
2227 RelocateInternalReference(buffer_ + pos, pc_delta, 0,
2228 RelocInfo::INTERNAL_REFERENCE_ENCODED);
2229 }
2230 } 2237 }
2231 2238
2232 2239
2233 void Assembler::db(uint8_t data) { 2240 void Assembler::db(uint8_t data) {
2234 CheckBuffer(); 2241 CheckBuffer();
2235 *reinterpret_cast<uint8_t*>(pc_) = data; 2242 *reinterpret_cast<uint8_t*>(pc_) = data;
2236 pc_ += sizeof(uint8_t); 2243 pc_ += sizeof(uint8_t);
2237 } 2244 }
2238 2245
2239 2246
2240 void Assembler::dd(uint32_t data) { 2247 void Assembler::dd(uint32_t data) {
2241 CheckBuffer(); 2248 CheckBuffer();
2242 *reinterpret_cast<uint32_t*>(pc_) = data; 2249 *reinterpret_cast<uint32_t*>(pc_) = data;
2243 pc_ += sizeof(uint32_t); 2250 pc_ += sizeof(uint32_t);
2244 } 2251 }
2245 2252
2246 2253
2247 void Assembler::emit_ptr(intptr_t data) { 2254 void Assembler::emit_ptr(intptr_t data) {
2248 CheckBuffer(); 2255 CheckBuffer();
2249 *reinterpret_cast<intptr_t*>(pc_) = data; 2256 *reinterpret_cast<intptr_t*>(pc_) = data;
2250 pc_ += sizeof(intptr_t); 2257 pc_ += sizeof(intptr_t);
2251 } 2258 }
2252 2259
2253 2260
2261 void Assembler::emit_double(double value) {
2262 CheckBuffer();
2263 *reinterpret_cast<double*>(pc_) = value;
2264 pc_ += sizeof(double);
2265 }
2266
2267
2254 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { 2268 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
2255 RelocInfo rinfo(pc_, rmode, data, NULL); 2269 DeferredRelocInfo rinfo(pc_offset(), rmode, data);
2256 RecordRelocInfo(rinfo); 2270 RecordRelocInfo(rinfo);
2257 } 2271 }
2258 2272
2259 2273
2260 void Assembler::RecordRelocInfo(const RelocInfo& rinfo) { 2274 void Assembler::RecordRelocInfo(const DeferredRelocInfo& rinfo) {
2261 if (rinfo.rmode() >= RelocInfo::JS_RETURN && 2275 if (rinfo.rmode() >= RelocInfo::JS_RETURN &&
2262 rinfo.rmode() <= RelocInfo::DEBUG_BREAK_SLOT) { 2276 rinfo.rmode() <= RelocInfo::DEBUG_BREAK_SLOT) {
2263 // Adjust code for new modes. 2277 // Adjust code for new modes.
2264 DCHECK(RelocInfo::IsDebugBreakSlot(rinfo.rmode()) || 2278 DCHECK(RelocInfo::IsDebugBreakSlot(rinfo.rmode()) ||
2265 RelocInfo::IsJSReturn(rinfo.rmode()) || 2279 RelocInfo::IsJSReturn(rinfo.rmode()) ||
2266 RelocInfo::IsComment(rinfo.rmode()) || 2280 RelocInfo::IsComment(rinfo.rmode()) ||
2267 RelocInfo::IsPosition(rinfo.rmode())); 2281 RelocInfo::IsPosition(rinfo.rmode()));
2268 } 2282 }
2269 if (!RelocInfo::IsNone(rinfo.rmode())) { 2283 if (!RelocInfo::IsNone(rinfo.rmode())) {
2270 // Don't record external references unless the heap will be serialized. 2284 // Don't record external references unless the heap will be serialized.
2271 if (rinfo.rmode() == RelocInfo::EXTERNAL_REFERENCE) { 2285 if (rinfo.rmode() == RelocInfo::EXTERNAL_REFERENCE) {
2272 if (!serializer_enabled() && !emit_debug_code()) { 2286 if (!serializer_enabled() && !emit_debug_code()) {
2273 return; 2287 return;
2274 } 2288 }
2275 } 2289 }
2276 DCHECK(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
2277 if (rinfo.rmode() == RelocInfo::CODE_TARGET_WITH_ID) { 2290 if (rinfo.rmode() == RelocInfo::CODE_TARGET_WITH_ID) {
2278 RelocInfo reloc_info_with_ast_id(rinfo.pc(), rinfo.rmode(), 2291 DeferredRelocInfo reloc_info_with_ast_id(rinfo.position(), rinfo.rmode(),
2279 RecordedAstId().ToInt(), NULL); 2292 RecordedAstId().ToInt());
2280 ClearRecordedAstId(); 2293 ClearRecordedAstId();
2281 reloc_info_writer.Write(&reloc_info_with_ast_id); 2294 relocations_.push_back(reloc_info_with_ast_id);
2282 } else { 2295 } else {
2283 reloc_info_writer.Write(&rinfo); 2296 relocations_.push_back(rinfo);
2284 } 2297 }
2285 } 2298 }
2286 } 2299 }
2287 2300
2288 2301
2302 void Assembler::EmitRelocations() {
2303 EnsureSpaceFor(relocations_.size() * kMaxRelocSize);
2304
2305 for (std::vector<DeferredRelocInfo>::iterator it = relocations_.begin();
2306 it != relocations_.end(); it++) {
2307 RelocInfo::Mode rmode = it->rmode();
2308 RelocInfo rinfo(buffer_ + it->position(), rmode, it->data(), NULL);
2309
2310 // Fix up internal references now that they are guaranteed to be bound.
2311 if (RelocInfo::IsInternalReference(rmode) ||
2312 RelocInfo::IsInternalReferenceEncoded(rmode)) {
2313 intptr_t pos =
2314 reinterpret_cast<intptr_t>(rinfo.target_internal_reference());
2315 rinfo.set_target_internal_reference(buffer_ + pos, SKIP_ICACHE_FLUSH);
2316 }
2317
2318 reloc_info_writer.Write(&rinfo);
2319 }
2320
2321 reloc_info_writer.Finish();
2322 }
2323
2324
2289 void Assembler::BlockTrampolinePoolFor(int instructions) { 2325 void Assembler::BlockTrampolinePoolFor(int instructions) {
2290 BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize); 2326 BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
2291 } 2327 }
2292 2328
2293 2329
2294 void Assembler::CheckTrampolinePool() { 2330 void Assembler::CheckTrampolinePool() {
2295 // Some small sequences of instructions must not be broken up by the 2331 // Some small sequences of instructions must not be broken up by the
2296 // insertion of a trampoline pool; such sequences are protected by setting 2332 // insertion of a trampoline pool; such sequences are protected by setting
2297 // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_, 2333 // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
2298 // which are both checked here. Also, recursive calls to CheckTrampolinePool 2334 // which are both checked here. Also, recursive calls to CheckTrampolinePool
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
2346 } 2382 }
2347 2383
2348 2384
2349 void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) { 2385 void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
2350 DCHECK(!FLAG_enable_ool_constant_pool); 2386 DCHECK(!FLAG_enable_ool_constant_pool);
2351 } 2387 }
2352 } 2388 }
2353 } // namespace v8::internal 2389 } // namespace v8::internal
2354 2390
2355 #endif // V8_TARGET_ARCH_PPC 2391 #endif // V8_TARGET_ARCH_PPC
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698