Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(671)

Side by Side Diff: src/arm64/assembler-arm64.cc

Issue 338523005: ARM64: updated literal pool implementation. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Addressed review comments Created 6 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm64/assembler-arm64.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // 2 //
3 // Redistribution and use in source and binary forms, with or without 3 // Redistribution and use in source and binary forms, with or without
4 // modification, are permitted provided that the following conditions are 4 // modification, are permitted provided that the following conditions are
5 // met: 5 // met:
6 // 6 //
7 // * Redistributions of source code must retain the above copyright 7 // * Redistributions of source code must retain the above copyright
8 // notice, this list of conditions and the following disclaimer. 8 // notice, this list of conditions and the following disclaimer.
9 // * Redistributions in binary form must reproduce the above 9 // * Redistributions in binary form must reproduce the above
10 // copyright notice, this list of conditions and the following 10 // copyright notice, this list of conditions and the following
(...skipping 278 matching lines...) Expand 10 before | Expand all | Expand 10 after
289 RelocInfo::Mode rmode = immediate_.rmode(); 289 RelocInfo::Mode rmode = immediate_.rmode();
290 290
291 if (rmode == RelocInfo::EXTERNAL_REFERENCE) { 291 if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
292 return assembler->serializer_enabled(); 292 return assembler->serializer_enabled();
293 } 293 }
294 294
295 return !RelocInfo::IsNone(rmode); 295 return !RelocInfo::IsNone(rmode);
296 } 296 }
297 297
298 298
299 // Constant Pool.
300 void ConstPool::RecordEntry(intptr_t data,
301 RelocInfo::Mode mode) {
302 ASSERT(mode != RelocInfo::COMMENT &&
303 mode != RelocInfo::POSITION &&
304 mode != RelocInfo::STATEMENT_POSITION &&
305 mode != RelocInfo::CONST_POOL &&
306 mode != RelocInfo::VENEER_POOL &&
307 mode != RelocInfo::CODE_AGE_SEQUENCE);
308
309 uint64_t raw_data = static_cast<uint64_t>(data);
310 int offset = assm_->pc_offset();
311 if (IsEmpty()) {
312 first_use_ = offset;
313 }
314
315 std::pair<uint64_t, int> entry = std::make_pair(raw_data, offset);
316 if (CanBeShared(mode)) {
317 shared_entries_.insert(entry);
318 if (shared_entries_.count(entry.first) == 1) {
319 shared_entries_count++;
320 }
321 } else {
322 unique_entries_.push_back(entry);
323 }
324
325 if (EntryCount() > Assembler::kApproxMaxPoolEntryCount) {
326 // Request constant pool emission after the next instruction.
327 assm_->SetNextConstPoolCheckIn(1);
328 }
329 }
330
331
332 int ConstPool::DistanceToFirstUse() {
333 ASSERT(first_use_ >= 0);
334 return assm_->pc_offset() - first_use_;
335 }
336
337
338 int ConstPool::MaxPcOffset() {
339 // There are no pending entries in the pool so we can never get out of
340 // range.
341 if (IsEmpty()) return kMaxInt;
342
343 // Entries are not necessarily emitted in the order they are added so in the
344 // worst case the first constant pool use will be accessing the last entry.
345 return first_use_ + kMaxLoadLiteralRange - WorstCaseSize();
346 }
347
348
349 int ConstPool::WorstCaseSize() {
350 if (IsEmpty()) return 0;
351
352 // Max size prologue:
353 // b over
354 // ldr xzr, #pool_size
355 // blr xzr
356 // nop
357 // All entries are 64-bit for now.
358 return 4 * kInstructionSize + EntryCount() * kPointerSize;
359 }
360
361
362 int ConstPool::SizeIfEmittedAtCurrentPc(bool require_jump) {
363 if (IsEmpty()) return 0;
364
365 // Prologue is:
366 // b over ;; if require_jump
367 // ldr xzr, #pool_size
368 // blr xzr
369 // nop ;; if not 64-bit aligned
370 int prologue_size = require_jump ? kInstructionSize : 0;
371 prologue_size += 2 * kInstructionSize;
372 prologue_size += IsAligned(assm_->pc_offset() + prologue_size, 8) ?
373 0 : kInstructionSize;
374
375 // All entries are 64-bit for now.
376 return prologue_size + EntryCount() * kPointerSize;
377 }
378
379
380 void ConstPool::Emit(bool require_jump) {
381 ASSERT(!assm_->is_const_pool_blocked());
382 // Prevent recursive pool emission and protect from veneer pools.
383 Assembler::BlockPoolsScope block_pools(assm_);
384
385 int size = SizeIfEmittedAtCurrentPc(require_jump);
386 Label size_check;
387 assm_->bind(&size_check);
388
389 assm_->RecordConstPool(size);
390 // Emit the constant pool. It is preceded by an optional branch if
391 // require_jump and a header which will:
392 // 1) Encode the size of the constant pool, for use by the disassembler.
393 // 2) Terminate the program, to try to prevent execution from accidentally
394 // flowing into the constant pool.
395 // 3) align the pool entries to 64-bit.
396 // The header is therefore made of up to three arm64 instructions:
397 // ldr xzr, #<size of the constant pool in 32-bit words>
398 // blr xzr
399 // nop
400 //
401 // If executed, the header will likely segfault and lr will point to the
402 // instruction following the offending blr.
403 // TODO(all): Make the alignment part less fragile. Currently code is
404 // allocated as a byte array so there are no guarantees the alignment will
405 // be preserved on compaction. Currently it works as allocation seems to be
406 // 64-bit aligned.
407
408 // Emit branch if required
409 Label after_pool;
410 if (require_jump) {
411 assm_->b(&after_pool);
412 }
413
414 // Emit the header.
415 assm_->RecordComment("[ Constant Pool");
416 EmitMarker();
417 EmitGuard();
418 assm_->Align(8);
419
420 // Emit constant pool entries.
421 // TODO(all): currently each relocated constant is 64 bits, consider adding
422 // support for 32-bit entries.
423 EmitEntries();
424 assm_->RecordComment("]");
425
426 if (after_pool.is_linked()) {
427 assm_->bind(&after_pool);
428 }
429
430 ASSERT(assm_->SizeOfCodeGeneratedSince(&size_check) ==
431 static_cast<unsigned>(size));
432 }
433
434
435 void ConstPool::Clear() {
436 shared_entries_.clear();
437 shared_entries_count = 0;
438 unique_entries_.clear();
439 first_use_ = -1;
440 }
441
442
443 bool ConstPool::CanBeShared(RelocInfo::Mode mode) {
444 // Constant pool currently does not support 32-bit entries.
445 ASSERT(mode != RelocInfo::NONE32);
446
447 return RelocInfo::IsNone(mode) ||
448 (!assm_->serializer_enabled() && (mode >= RelocInfo::CELL));
449 }
450
451
452 void ConstPool::EmitMarker() {
453 // A constant pool size is expressed in number of 32-bits words.
454 // Currently all entries are 64-bit.
455 // + 1 is for the crash guard.
456 // + 0/1 for alignment.
457 int word_count = EntryCount() * 2 + 1 +
458 (IsAligned(assm_->pc_offset(), 8)) ? 0 : 1;
459 assm_->Emit(LDR_x_lit |
460 Assembler::ImmLLiteral(word_count) |
461 Assembler::Rt(xzr));
462 }
463
464
465 void ConstPool::EmitGuard() {
466 #ifdef DEBUG
467 Instruction* instr = reinterpret_cast<Instruction*>(assm_->pc());
468 ASSERT(instr->preceding()->IsLdrLiteralX() &&
469 instr->preceding()->Rt() == xzr.code());
470 #endif
471 assm_->EmitPoolGuard();
472 }
473
474
475 void ConstPool::EmitEntries() {
476 ASSERT(IsAligned(assm_->pc_offset(), 8));
477
478 typedef std::multimap<uint64_t, int>::const_iterator SharedEntriesIterator;
479 SharedEntriesIterator value_it;
480 // Iterate through the keys (constant pool values).
481 for (value_it = shared_entries_.begin();
482 value_it != shared_entries_.end();
483 value_it = shared_entries_.upper_bound(value_it->first)) {
484 std::pair<SharedEntriesIterator, SharedEntriesIterator> range;
485 uint64_t data = value_it->first;
486 range = shared_entries_.equal_range(data);
487 SharedEntriesIterator offset_it;
488 // Iterate through the offsets of a given key.
489 for (offset_it = range.first; offset_it != range.second; offset_it++) {
490 Instruction* instr = assm_->InstructionAt(offset_it->second);
491
492 // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
493 ASSERT(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
494 instr->SetImmPCOffsetTarget(assm_->pc());
495 }
496 assm_->dc64(data);
497 }
498 shared_entries_.clear();
499 shared_entries_count = 0;
500
501 // Emit unique entries.
502 std::vector<std::pair<uint64_t, int> >::const_iterator unique_it;
503 for (unique_it = unique_entries_.begin();
504 unique_it != unique_entries_.end();
505 unique_it++) {
506 Instruction* instr = assm_->InstructionAt(unique_it->second);
507
508 // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
509 ASSERT(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
510 instr->SetImmPCOffsetTarget(assm_->pc());
511 assm_->dc64(unique_it->first);
512 }
513 unique_entries_.clear();
514 first_use_ = -1;
515 }
516
517
299 // Assembler 518 // Assembler
300
301 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size) 519 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
302 : AssemblerBase(isolate, buffer, buffer_size), 520 : AssemblerBase(isolate, buffer, buffer_size),
521 constpool_(this),
303 recorded_ast_id_(TypeFeedbackId::None()), 522 recorded_ast_id_(TypeFeedbackId::None()),
304 unresolved_branches_(), 523 unresolved_branches_(),
305 positions_recorder_(this) { 524 positions_recorder_(this) {
306 const_pool_blocked_nesting_ = 0; 525 const_pool_blocked_nesting_ = 0;
307 veneer_pool_blocked_nesting_ = 0; 526 veneer_pool_blocked_nesting_ = 0;
308 Reset(); 527 Reset();
309 } 528 }
310 529
311 530
312 Assembler::~Assembler() { 531 Assembler::~Assembler() {
313 ASSERT(num_pending_reloc_info_ == 0); 532 ASSERT(constpool_.IsEmpty());
314 ASSERT(const_pool_blocked_nesting_ == 0); 533 ASSERT(const_pool_blocked_nesting_ == 0);
315 ASSERT(veneer_pool_blocked_nesting_ == 0); 534 ASSERT(veneer_pool_blocked_nesting_ == 0);
316 } 535 }
317 536
318 537
319 void Assembler::Reset() { 538 void Assembler::Reset() {
320 #ifdef DEBUG 539 #ifdef DEBUG
321 ASSERT((pc_ >= buffer_) && (pc_ < buffer_ + buffer_size_)); 540 ASSERT((pc_ >= buffer_) && (pc_ < buffer_ + buffer_size_));
322 ASSERT(const_pool_blocked_nesting_ == 0); 541 ASSERT(const_pool_blocked_nesting_ == 0);
323 ASSERT(veneer_pool_blocked_nesting_ == 0); 542 ASSERT(veneer_pool_blocked_nesting_ == 0);
324 ASSERT(unresolved_branches_.empty()); 543 ASSERT(unresolved_branches_.empty());
325 memset(buffer_, 0, pc_ - buffer_); 544 memset(buffer_, 0, pc_ - buffer_);
326 #endif 545 #endif
327 pc_ = buffer_; 546 pc_ = buffer_;
328 reloc_info_writer.Reposition(reinterpret_cast<byte*>(buffer_ + buffer_size_), 547 reloc_info_writer.Reposition(reinterpret_cast<byte*>(buffer_ + buffer_size_),
329 reinterpret_cast<byte*>(pc_)); 548 reinterpret_cast<byte*>(pc_));
330 num_pending_reloc_info_ = 0; 549 constpool_.Clear();
331 next_constant_pool_check_ = 0; 550 next_constant_pool_check_ = 0;
332 next_veneer_pool_check_ = kMaxInt; 551 next_veneer_pool_check_ = kMaxInt;
333 no_const_pool_before_ = 0; 552 no_const_pool_before_ = 0;
334 first_const_pool_use_ = -1;
335 ClearRecordedAstId(); 553 ClearRecordedAstId();
336 } 554 }
337 555
338 556
339 void Assembler::GetCode(CodeDesc* desc) { 557 void Assembler::GetCode(CodeDesc* desc) {
340 // Emit constant pool if necessary. 558 // Emit constant pool if necessary.
341 CheckConstPool(true, false); 559 CheckConstPool(true, false);
342 ASSERT(num_pending_reloc_info_ == 0); 560 ASSERT(constpool_.IsEmpty());
343 561
344 // Set up code descriptor. 562 // Set up code descriptor.
345 if (desc) { 563 if (desc) {
346 desc->buffer = reinterpret_cast<byte*>(buffer_); 564 desc->buffer = reinterpret_cast<byte*>(buffer_);
347 desc->buffer_size = buffer_size_; 565 desc->buffer_size = buffer_size_;
348 desc->instr_size = pc_offset(); 566 desc->instr_size = pc_offset();
349 desc->reloc_size = (reinterpret_cast<byte*>(buffer_) + buffer_size_) - 567 desc->reloc_size = (reinterpret_cast<byte*>(buffer_) + buffer_size_) -
350 reloc_info_writer.pos(); 568 reloc_info_writer.pos();
351 desc->origin = this; 569 desc->origin = this;
352 } 570 }
(...skipping 262 matching lines...) Expand 10 before | Expand all | Expand 10 after
615 // Prevent constant pool checks happening by setting the next check to 833 // Prevent constant pool checks happening by setting the next check to
616 // the biggest possible offset. 834 // the biggest possible offset.
617 next_constant_pool_check_ = kMaxInt; 835 next_constant_pool_check_ = kMaxInt;
618 } 836 }
619 } 837 }
620 838
621 839
622 void Assembler::EndBlockConstPool() { 840 void Assembler::EndBlockConstPool() {
623 if (--const_pool_blocked_nesting_ == 0) { 841 if (--const_pool_blocked_nesting_ == 0) {
624 // Check the constant pool hasn't been blocked for too long. 842 // Check the constant pool hasn't been blocked for too long.
625 ASSERT((num_pending_reloc_info_ == 0) || 843 ASSERT(pc_offset() < constpool_.MaxPcOffset());
626 (pc_offset() < (first_const_pool_use_ + kMaxDistToConstPool)));
627 // Two cases: 844 // Two cases:
628 // * no_const_pool_before_ >= next_constant_pool_check_ and the emission is 845 // * no_const_pool_before_ >= next_constant_pool_check_ and the emission is
629 // still blocked 846 // still blocked
630 // * no_const_pool_before_ < next_constant_pool_check_ and the next emit 847 // * no_const_pool_before_ < next_constant_pool_check_ and the next emit
631 // will trigger a check. 848 // will trigger a check.
632 next_constant_pool_check_ = no_const_pool_before_; 849 next_constant_pool_check_ = no_const_pool_before_;
633 } 850 }
634 } 851 }
635 852
636 853
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
675 } 892 }
676 #endif 893 #endif
677 if (IsConstantPoolAt(instr)) { 894 if (IsConstantPoolAt(instr)) {
678 return instr->ImmLLiteral(); 895 return instr->ImmLLiteral();
679 } else { 896 } else {
680 return -1; 897 return -1;
681 } 898 }
682 } 899 }
683 900
684 901
685 void Assembler::ConstantPoolMarker(uint32_t size) {
686 ASSERT(is_const_pool_blocked());
687 // + 1 is for the crash guard.
688 Emit(LDR_x_lit | ImmLLiteral(size + 1) | Rt(xzr));
689 }
690
691
692 void Assembler::EmitPoolGuard() { 902 void Assembler::EmitPoolGuard() {
693 // We must generate only one instruction as this is used in scopes that 903 // We must generate only one instruction as this is used in scopes that
694 // control the size of the code generated. 904 // control the size of the code generated.
695 Emit(BLR | Rn(xzr)); 905 Emit(BLR | Rn(xzr));
696 } 906 }
697 907
698 908
699 void Assembler::ConstantPoolGuard() {
700 #ifdef DEBUG
701 // Currently this is only used after a constant pool marker.
702 ASSERT(is_const_pool_blocked());
703 Instruction* instr = reinterpret_cast<Instruction*>(pc_);
704 ASSERT(instr->preceding()->IsLdrLiteralX() &&
705 instr->preceding()->Rt() == xzr.code());
706 #endif
707 EmitPoolGuard();
708 }
709
710
711 void Assembler::StartBlockVeneerPool() { 909 void Assembler::StartBlockVeneerPool() {
712 ++veneer_pool_blocked_nesting_; 910 ++veneer_pool_blocked_nesting_;
713 } 911 }
714 912
715 913
716 void Assembler::EndBlockVeneerPool() { 914 void Assembler::EndBlockVeneerPool() {
717 if (--veneer_pool_blocked_nesting_ == 0) { 915 if (--veneer_pool_blocked_nesting_ == 0) {
718 // Check the veneer pool hasn't been blocked for too long. 916 // Check the veneer pool hasn't been blocked for too long.
719 ASSERT(unresolved_branches_.empty() || 917 ASSERT(unresolved_branches_.empty() ||
720 (pc_offset() < unresolved_branches_first_limit())); 918 (pc_offset() < unresolved_branches_first_limit()));
(...skipping 1738 matching lines...) Expand 10 before | Expand all | Expand 10 after
2459 buffer_ = desc.buffer; 2657 buffer_ = desc.buffer;
2460 buffer_size_ = desc.buffer_size; 2658 buffer_size_ = desc.buffer_size;
2461 pc_ = reinterpret_cast<byte*>(pc_) + pc_delta; 2659 pc_ = reinterpret_cast<byte*>(pc_) + pc_delta;
2462 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta, 2660 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
2463 reloc_info_writer.last_pc() + pc_delta); 2661 reloc_info_writer.last_pc() + pc_delta);
2464 2662
2465 // None of our relocation types are pc relative pointing outside the code 2663 // None of our relocation types are pc relative pointing outside the code
2466 // buffer nor pc absolute pointing inside the code buffer, so there is no need 2664 // buffer nor pc absolute pointing inside the code buffer, so there is no need
2467 // to relocate any emitted relocation entries. 2665 // to relocate any emitted relocation entries.
2468 2666
2469 // Relocate pending relocation entries. 2667 // Pending relocation entries are also relative, no need to relocate.
2470 for (int i = 0; i < num_pending_reloc_info_; i++) {
2471 RelocInfo& rinfo = pending_reloc_info_[i];
2472 ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
2473 rinfo.rmode() != RelocInfo::POSITION);
2474 if (rinfo.rmode() != RelocInfo::JS_RETURN) {
2475 rinfo.set_pc(rinfo.pc() + pc_delta);
2476 }
2477 }
2478 } 2668 }
2479 2669
2480 2670
2481 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { 2671 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
2482 // We do not try to reuse pool constants. 2672 // We do not try to reuse pool constants.
2483 RelocInfo rinfo(reinterpret_cast<byte*>(pc_), rmode, data, NULL); 2673 RelocInfo rinfo(reinterpret_cast<byte*>(pc_), rmode, data, NULL);
2484 if (((rmode >= RelocInfo::JS_RETURN) && 2674 if (((rmode >= RelocInfo::JS_RETURN) &&
2485 (rmode <= RelocInfo::DEBUG_BREAK_SLOT)) || 2675 (rmode <= RelocInfo::DEBUG_BREAK_SLOT)) ||
2486 (rmode == RelocInfo::CONST_POOL) || 2676 (rmode == RelocInfo::CONST_POOL) ||
2487 (rmode == RelocInfo::VENEER_POOL)) { 2677 (rmode == RelocInfo::VENEER_POOL)) {
2488 // Adjust code for new modes. 2678 // Adjust code for new modes.
2489 ASSERT(RelocInfo::IsDebugBreakSlot(rmode) 2679 ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
2490 || RelocInfo::IsJSReturn(rmode) 2680 || RelocInfo::IsJSReturn(rmode)
2491 || RelocInfo::IsComment(rmode) 2681 || RelocInfo::IsComment(rmode)
2492 || RelocInfo::IsPosition(rmode) 2682 || RelocInfo::IsPosition(rmode)
2493 || RelocInfo::IsConstPool(rmode) 2683 || RelocInfo::IsConstPool(rmode)
2494 || RelocInfo::IsVeneerPool(rmode)); 2684 || RelocInfo::IsVeneerPool(rmode));
2495 // These modes do not need an entry in the constant pool. 2685 // These modes do not need an entry in the constant pool.
2496 } else { 2686 } else {
2497 ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo); 2687 constpool_.RecordEntry(data, rmode);
2498 if (num_pending_reloc_info_ == 0) {
2499 first_const_pool_use_ = pc_offset();
2500 }
2501 pending_reloc_info_[num_pending_reloc_info_++] = rinfo;
2502 // Make sure the constant pool is not emitted in place of the next 2688 // Make sure the constant pool is not emitted in place of the next
2503 // instruction for which we just recorded relocation info. 2689 // instruction for which we just recorded relocation info.
2504 BlockConstPoolFor(1); 2690 BlockConstPoolFor(1);
2505 } 2691 }
2506 2692
2507 if (!RelocInfo::IsNone(rmode)) { 2693 if (!RelocInfo::IsNone(rmode)) {
2508 // Don't record external references unless the heap will be serialized. 2694 // Don't record external references unless the heap will be serialized.
2509 if (rmode == RelocInfo::EXTERNAL_REFERENCE && 2695 if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
2510 !serializer_enabled() && !emit_debug_code()) { 2696 !serializer_enabled() && !emit_debug_code()) {
2511 return; 2697 return;
2512 } 2698 }
2513 ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here 2699 ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
2514 if (rmode == RelocInfo::CODE_TARGET_WITH_ID) { 2700 if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
2515 RelocInfo reloc_info_with_ast_id( 2701 RelocInfo reloc_info_with_ast_id(
2516 reinterpret_cast<byte*>(pc_), rmode, RecordedAstId().ToInt(), NULL); 2702 reinterpret_cast<byte*>(pc_), rmode, RecordedAstId().ToInt(), NULL);
2517 ClearRecordedAstId(); 2703 ClearRecordedAstId();
2518 reloc_info_writer.Write(&reloc_info_with_ast_id); 2704 reloc_info_writer.Write(&reloc_info_with_ast_id);
2519 } else { 2705 } else {
2520 reloc_info_writer.Write(&rinfo); 2706 reloc_info_writer.Write(&rinfo);
2521 } 2707 }
2522 } 2708 }
2523 } 2709 }
2524 2710
2525 2711
2526 void Assembler::BlockConstPoolFor(int instructions) { 2712 void Assembler::BlockConstPoolFor(int instructions) {
2527 int pc_limit = pc_offset() + instructions * kInstructionSize; 2713 int pc_limit = pc_offset() + instructions * kInstructionSize;
2528 if (no_const_pool_before_ < pc_limit) { 2714 if (no_const_pool_before_ < pc_limit) {
2529 // If there are some pending entries, the constant pool cannot be blocked
2530 // further than first_const_pool_use_ + kMaxDistToConstPool
2531 ASSERT((num_pending_reloc_info_ == 0) ||
2532 (pc_limit < (first_const_pool_use_ + kMaxDistToConstPool)));
2533 no_const_pool_before_ = pc_limit; 2715 no_const_pool_before_ = pc_limit;
2716 // Make sure the pool won't be blocked for too long.
2717 ASSERT(pc_limit < constpool_.MaxPcOffset());
2534 } 2718 }
2535 2719
2536 if (next_constant_pool_check_ < no_const_pool_before_) { 2720 if (next_constant_pool_check_ < no_const_pool_before_) {
2537 next_constant_pool_check_ = no_const_pool_before_; 2721 next_constant_pool_check_ = no_const_pool_before_;
2538 } 2722 }
2539 } 2723 }
2540 2724
2541 2725
2542 void Assembler::CheckConstPool(bool force_emit, bool require_jump) { 2726 void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
2543 // Some short sequence of instruction mustn't be broken up by constant pool 2727 // Some short sequence of instruction mustn't be broken up by constant pool
2544 // emission, such sequences are protected by calls to BlockConstPoolFor and 2728 // emission, such sequences are protected by calls to BlockConstPoolFor and
2545 // BlockConstPoolScope. 2729 // BlockConstPoolScope.
2546 if (is_const_pool_blocked()) { 2730 if (is_const_pool_blocked()) {
2547 // Something is wrong if emission is forced and blocked at the same time. 2731 // Something is wrong if emission is forced and blocked at the same time.
2548 ASSERT(!force_emit); 2732 ASSERT(!force_emit);
2549 return; 2733 return;
2550 } 2734 }
2551 2735
2552 // There is nothing to do if there are no pending constant pool entries. 2736 // There is nothing to do if there are no pending constant pool entries.
2553 if (num_pending_reloc_info_ == 0) { 2737 if (constpool_.IsEmpty()) {
2554 // Calculate the offset of the next check. 2738 // Calculate the offset of the next check.
2555 next_constant_pool_check_ = pc_offset() + kCheckConstPoolInterval; 2739 SetNextConstPoolCheckIn(kCheckConstPoolInterval);
2556 return; 2740 return;
2557 } 2741 }
2558 2742
2559 // We emit a constant pool when: 2743 // We emit a constant pool when:
2560 // * requested to do so by parameter force_emit (e.g. after each function). 2744 // * requested to do so by parameter force_emit (e.g. after each function).
2561 // * the distance to the first instruction accessing the constant pool is 2745 // * the distance to the first instruction accessing the constant pool is
2562 // kAvgDistToConstPool or more. 2746 // kApproxMaxDistToConstPool or more.
2563 // * no jump is required and the distance to the first instruction accessing 2747 // * the number of entries in the pool is kApproxMaxPoolEntryCount or more.
2564 // the constant pool is at least kMaxDistToPConstool / 2. 2748 int dist = constpool_.DistanceToFirstUse();
2565 ASSERT(first_const_pool_use_ >= 0); 2749 int count = constpool_.EntryCount();
2566 int dist = pc_offset() - first_const_pool_use_; 2750 if (!force_emit &&
2567 if (!force_emit && dist < kAvgDistToConstPool && 2751 (dist < kApproxMaxDistToConstPool) &&
2568 (require_jump || (dist < (kMaxDistToConstPool / 2)))) { 2752 (count < kApproxMaxPoolEntryCount)) {
2569 return; 2753 return;
2570 } 2754 }
2571 2755
2572 int jump_instr = require_jump ? kInstructionSize : 0;
2573 int size_pool_marker = kInstructionSize;
2574 int size_pool_guard = kInstructionSize;
2575 int pool_size = jump_instr + size_pool_marker + size_pool_guard +
2576 num_pending_reloc_info_ * kPointerSize;
2577 int needed_space = pool_size + kGap;
2578 2756
2579 // Emit veneers for branches that would go out of range during emission of the 2757 // Emit veneers for branches that would go out of range during emission of the
2580 // constant pool. 2758 // constant pool.
2581 CheckVeneerPool(false, require_jump, kVeneerDistanceMargin + pool_size); 2759 int worst_case_size = constpool_.WorstCaseSize();
2582 2760 CheckVeneerPool(false, require_jump,
2583 Label size_check; 2761 kVeneerDistanceMargin + worst_case_size);
2584 bind(&size_check);
2585 2762
2586 // Check that the code buffer is large enough before emitting the constant 2763 // Check that the code buffer is large enough before emitting the constant
2587 // pool (include the jump over the pool, the constant pool marker, the 2764 // pool (this includes the gap to the relocation information).
2588 // constant pool guard, and the gap to the relocation information). 2765 int needed_space = worst_case_size + kGap + 1 * kInstructionSize;
2589 while (buffer_space() <= needed_space) { 2766 while (buffer_space() <= needed_space) {
2590 GrowBuffer(); 2767 GrowBuffer();
2591 } 2768 }
2592 2769
2593 { 2770 Label size_check;
2594 // Block recursive calls to CheckConstPool and protect from veneer pools. 2771 bind(&size_check);
2595 BlockPoolsScope block_pools(this); 2772 constpool_.Emit(require_jump);
2596 RecordConstPool(pool_size); 2773 ASSERT(SizeOfCodeGeneratedSince(&size_check) <=
2597 2774 static_cast<unsigned>(worst_case_size));
2598 // Emit jump over constant pool if necessary.
2599 Label after_pool;
2600 if (require_jump) {
2601 b(&after_pool);
2602 }
2603
2604 // Emit a constant pool header. The header has two goals:
2605 // 1) Encode the size of the constant pool, for use by the disassembler.
2606 // 2) Terminate the program, to try to prevent execution from accidentally
2607 // flowing into the constant pool.
2608 // The header is therefore made of two arm64 instructions:
2609 // ldr xzr, #<size of the constant pool in 32-bit words>
2610 // blr xzr
2611 // If executed the code will likely segfault and lr will point to the
2612 // beginning of the constant pool.
2613 // TODO(all): currently each relocated constant is 64 bits, consider adding
2614 // support for 32-bit entries.
2615 RecordComment("[ Constant Pool");
2616 ConstantPoolMarker(2 * num_pending_reloc_info_);
2617 ConstantPoolGuard();
2618
2619 // Emit constant pool entries.
2620 for (int i = 0; i < num_pending_reloc_info_; i++) {
2621 RelocInfo& rinfo = pending_reloc_info_[i];
2622 ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
2623 rinfo.rmode() != RelocInfo::POSITION &&
2624 rinfo.rmode() != RelocInfo::STATEMENT_POSITION &&
2625 rinfo.rmode() != RelocInfo::CONST_POOL &&
2626 rinfo.rmode() != RelocInfo::VENEER_POOL);
2627
2628 Instruction* instr = reinterpret_cast<Instruction*>(rinfo.pc());
2629 // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
2630 ASSERT(instr->IsLdrLiteral() &&
2631 instr->ImmLLiteral() == 0);
2632
2633 instr->SetImmPCOffsetTarget(reinterpret_cast<Instruction*>(pc_));
2634 dc64(rinfo.data());
2635 }
2636
2637 num_pending_reloc_info_ = 0;
2638 first_const_pool_use_ = -1;
2639
2640 RecordComment("]");
2641
2642 if (after_pool.is_linked()) {
2643 bind(&after_pool);
2644 }
2645 }
2646 2775
2647 // Since a constant pool was just emitted, move the check offset forward by 2776 // Since a constant pool was just emitted, move the check offset forward by
2648 // the standard interval. 2777 // the standard interval.
2649 next_constant_pool_check_ = pc_offset() + kCheckConstPoolInterval; 2778 SetNextConstPoolCheckIn(kCheckConstPoolInterval);
2650
2651 ASSERT(SizeOfCodeGeneratedSince(&size_check) ==
2652 static_cast<unsigned>(pool_size));
2653 } 2779 }
2654 2780
2655 2781
2656 bool Assembler::ShouldEmitVeneer(int max_reachable_pc, int margin) { 2782 bool Assembler::ShouldEmitVeneer(int max_reachable_pc, int margin) {
2657 // Account for the branch around the veneers and the guard. 2783 // Account for the branch around the veneers and the guard.
2658 int protection_offset = 2 * kInstructionSize; 2784 int protection_offset = 2 * kInstructionSize;
2659 return pc_offset() > max_reachable_pc - margin - protection_offset - 2785 return pc_offset() > max_reachable_pc - margin - protection_offset -
2660 static_cast<int>(unresolved_branches_.size() * kMaxVeneerCodeSize); 2786 static_cast<int>(unresolved_branches_.size() * kMaxVeneerCodeSize);
2661 } 2787 }
2662 2788
(...skipping 220 matching lines...) Expand 10 before | Expand all | Expand 10 after
2883 adr(rd, 0); 3009 adr(rd, 0);
2884 MovInt64(scratch, target_offset); 3010 MovInt64(scratch, target_offset);
2885 add(rd, rd, scratch); 3011 add(rd, rd, scratch);
2886 } 3012 }
2887 } 3013 }
2888 3014
2889 3015
2890 } } // namespace v8::internal 3016 } } // namespace v8::internal
2891 3017
2892 #endif // V8_TARGET_ARCH_ARM64 3018 #endif // V8_TARGET_ARCH_ARM64
OLDNEW
« no previous file with comments | « src/arm64/assembler-arm64.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698