| Index: src/assembler.cc
|
| diff --git a/src/assembler.cc b/src/assembler.cc
|
| index 2555bbbfaa6620187e7e3ca56345ac7ceb1f49a0..90d08bb6821177d179fbc0c161c5e7eae448a698 100644
|
| --- a/src/assembler.cc
|
| +++ b/src/assembler.cc
|
| @@ -135,7 +135,7 @@ AssemblerBase::AssemblerBase(Isolate* isolate, void* buffer, int buffer_size)
|
| predictable_code_size_(false),
|
| // We may use the assembler without an isolate.
|
| serializer_enabled_(isolate && isolate->serializer_enabled()),
|
| - ool_constant_pool_available_(false) {
|
| + constant_pool_available_(false) {
|
| if (FLAG_mask_constants_with_cookie && isolate != NULL) {
|
| jit_cookie_ = isolate->random_number_generator()->NextInt();
|
| }
|
| @@ -1635,6 +1635,208 @@ bool PositionsRecorder::WriteRecordedPositions() {
|
| }
|
|
|
|
|
| +ConstantPoolBuilder::ConstantPoolBuilder(int ptr_reach_bits,
|
| + int double_reach_bits) {
|
| + info_[ConstantPoolEntry::INTPTR].entries.reserve(64);
|
| + info_[ConstantPoolEntry::INTPTR].regular_reach_bits = ptr_reach_bits;
|
| + info_[ConstantPoolEntry::DOUBLE].regular_reach_bits = double_reach_bits;
|
| +}
|
| +
|
| +
|
| +ConstantPoolEntry::Access ConstantPoolBuilder::NextAccess(
|
| + ConstantPoolEntry::Type type) const {
|
| + const PerTypeEntryInfo& info = info_[type];
|
| +
|
| + if (info.overflow()) return ConstantPoolEntry::OVERFLOWED;
|
| +
|
| + int dbl_count = info_[ConstantPoolEntry::DOUBLE].regular_count;
|
| + int dbl_offset = dbl_count * kDoubleSize;
|
| + int ptr_count = info_[ConstantPoolEntry::INTPTR].regular_count;
|
| + int ptr_offset = ptr_count * kPointerSize + dbl_offset;
|
| +
|
| + if (type == ConstantPoolEntry::DOUBLE) {
|
| + // Double overflow detection must take into account the reach for both types
|
| + int ptr_reach_bits = info_[ConstantPoolEntry::INTPTR].regular_reach_bits;
|
| + if (!is_uintn(dbl_offset, info.regular_reach_bits) ||
|
| + (ptr_count > 0 &&
|
| + !is_uintn(ptr_offset + kDoubleSize - kPointerSize, ptr_reach_bits))) {
|
| + return ConstantPoolEntry::OVERFLOWED;
|
| + }
|
| + } else {
|
| + DCHECK(type == ConstantPoolEntry::INTPTR);
|
| + if (!is_uintn(ptr_offset, info.regular_reach_bits)) {
|
| + return ConstantPoolEntry::OVERFLOWED;
|
| + }
|
| + }
|
| +
|
| + return ConstantPoolEntry::REGULAR;
|
| +}
|
| +
|
| +
|
| +ConstantPoolEntry::Access ConstantPoolBuilder::AddEntry(
|
| + ConstantPoolEntry& entry, ConstantPoolEntry::Type type) {
|
| + DCHECK(!emitted_label_.is_bound());
|
| + PerTypeEntryInfo& info = info_[type];
|
| + const int entry_size = ConstantPoolEntry::size(type);
|
| + bool merged = false;
|
| +
|
| + if (entry.sharing_ok()) {
|
| + // Try to merge entries
|
| + std::vector<ConstantPoolEntry>::iterator it = info.shared_entries.begin();
|
| + int end = static_cast<int>(info.shared_entries.size());
|
| + for (int i = 0; i < end; i++, it++) {
|
| + if ((entry_size == kPointerSize) ? entry.value() == it->value()
|
| + : entry.value64() == it->value64()) {
|
| + // Merge with found entry.
|
| + entry.set_merged_index(i);
|
| + merged = true;
|
| + break;
|
| + }
|
| + }
|
| + }
|
| +
|
| + // By definition, merged entries have regular access.
|
| + DCHECK(!merged || entry.merged_index() < info.regular_count);
|
| + ConstantPoolEntry::Access access =
|
| + (merged ? ConstantPoolEntry::REGULAR : NextAccess(type));
|
| +
|
| + // Enforce an upper bound on search time by limiting the search to
|
| + // unique sharable entries which fit in the regular section.
|
| + if (entry.sharing_ok() && !merged && access == ConstantPoolEntry::REGULAR) {
|
| + info.shared_entries.push_back(entry);
|
| + } else {
|
| + info.entries.push_back(entry);
|
| + }
|
| +
|
| + // We're done if we found a match or have already triggered the
|
| + // overflow state.
|
| + if (merged || info.overflow()) return access;
|
| +
|
| + if (access == ConstantPoolEntry::REGULAR) {
|
| + info.regular_count++;
|
| + } else {
|
| + info.overflow_start = static_cast<int>(info.entries.size()) - 1;
|
| + }
|
| +
|
| + return access;
|
| +}
|
| +
|
| +
|
| +void ConstantPoolBuilder::EmitSharedEntries(Assembler* assm,
|
| + ConstantPoolEntry::Type type) {
|
| + PerTypeEntryInfo& info = info_[type];
|
| + std::vector<ConstantPoolEntry>& shared_entries = info.shared_entries;
|
| + const int entry_size = ConstantPoolEntry::size(type);
|
| + int base = emitted_label_.pos();
|
| + DCHECK(base > 0);
|
| + int shared_end = static_cast<int>(shared_entries.size());
|
| + std::vector<ConstantPoolEntry>::iterator shared_it = shared_entries.begin();
|
| + for (int i = 0; i < shared_end; i++, shared_it++) {
|
| + int offset = assm->pc_offset() - base;
|
| + shared_it->set_offset(offset); // Save offset for merged entries.
|
| + if (entry_size == kPointerSize) {
|
| + assm->dp(shared_it->value());
|
| + } else {
|
| + assm->dq(shared_it->value64());
|
| + }
|
| + DCHECK(is_uintn(offset, info.regular_reach_bits));
|
| +
|
| + // Patch load sequence with correct offset.
|
| + assm->PatchConstantPoolAccessInstruction(shared_it->position(), offset,
|
| + ConstantPoolEntry::REGULAR, type);
|
| + }
|
| +}
|
| +
|
| +
|
| +void ConstantPoolBuilder::EmitGroup(Assembler* assm,
|
| + ConstantPoolEntry::Access access,
|
| + ConstantPoolEntry::Type type) {
|
| + PerTypeEntryInfo& info = info_[type];
|
| + const bool overflow = info.overflow();
|
| + std::vector<ConstantPoolEntry>& entries = info.entries;
|
| + std::vector<ConstantPoolEntry>& shared_entries = info.shared_entries;
|
| + const int entry_size = ConstantPoolEntry::size(type);
|
| + int base = emitted_label_.pos();
|
| + DCHECK(base > 0);
|
| + int begin;
|
| + int end;
|
| +
|
| + if (access == ConstantPoolEntry::REGULAR) {
|
| + // Emit any shared entries first
|
| + EmitSharedEntries(assm, type);
|
| + }
|
| +
|
| + if (access == ConstantPoolEntry::REGULAR) {
|
| + begin = 0;
|
| + end = overflow ? info.overflow_start : static_cast<int>(entries.size());
|
| + } else {
|
| + DCHECK(access == ConstantPoolEntry::OVERFLOWED);
|
| + if (!overflow) return;
|
| + begin = info.overflow_start;
|
| + end = static_cast<int>(entries.size());
|
| + }
|
| +
|
| + std::vector<ConstantPoolEntry>::iterator it = entries.begin();
|
| + if (begin > 0) std::advance(it, begin);
|
| + for (int i = begin; i < end; i++, it++) {
|
| + // Update constant pool if necessary and get the entry's offset.
|
| + int offset;
|
| + ConstantPoolEntry::Access entry_access;
|
| + if (!it->is_merged()) {
|
| + // Emit new entry
|
| + offset = assm->pc_offset() - base;
|
| + entry_access = access;
|
| + if (entry_size == kPointerSize) {
|
| + assm->dp(it->value());
|
| + } else {
|
| + assm->dq(it->value64());
|
| + }
|
| + } else {
|
| + // Retrieve offset from shared entry.
|
| + offset = shared_entries[it->merged_index()].offset();
|
| + entry_access = ConstantPoolEntry::REGULAR;
|
| + }
|
| +
|
| + DCHECK(entry_access == ConstantPoolEntry::OVERFLOWED ||
|
| + is_uintn(offset, info.regular_reach_bits));
|
| +
|
| + // Patch load sequence with correct offset.
|
| + assm->PatchConstantPoolAccessInstruction(it->position(), offset,
|
| + entry_access, type);
|
| + }
|
| +}
|
| +
|
| +
|
| +// Emit and return position of pool. Zero implies no constant pool.
|
| +int ConstantPoolBuilder::Emit(Assembler* assm) {
|
| + bool emitted = emitted_label_.is_bound();
|
| + bool empty = IsEmpty();
|
| +
|
| + if (!emitted) {
|
| + // Mark start of constant pool. Align if necessary.
|
| + if (!empty) assm->Align(kDoubleSize);
|
| + assm->bind(&emitted_label_);
|
| + if (!empty) {
|
| + // Emit in groups based on access and type.
|
| + // Emit doubles first for alignment purposes.
|
| + EmitGroup(assm, ConstantPoolEntry::REGULAR, ConstantPoolEntry::DOUBLE);
|
| + EmitGroup(assm, ConstantPoolEntry::REGULAR, ConstantPoolEntry::INTPTR);
|
| + if (info_[ConstantPoolEntry::DOUBLE].overflow()) {
|
| + assm->Align(kDoubleSize);
|
| + EmitGroup(assm, ConstantPoolEntry::OVERFLOWED,
|
| + ConstantPoolEntry::DOUBLE);
|
| + }
|
| + if (info_[ConstantPoolEntry::INTPTR].overflow()) {
|
| + EmitGroup(assm, ConstantPoolEntry::OVERFLOWED,
|
| + ConstantPoolEntry::INTPTR);
|
| + }
|
| + }
|
| + }
|
| +
|
| + return !empty ? emitted_label_.pos() : 0;
|
| +}
|
| +
|
| +
|
| // Platform specific but identical code for all the platforms.
|
|
|
|
|
|
|