Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
| 2 // | 2 // |
| 3 // Redistribution and use in source and binary forms, with or without | 3 // Redistribution and use in source and binary forms, with or without |
| 4 // modification, are permitted provided that the following conditions are | 4 // modification, are permitted provided that the following conditions are |
| 5 // met: | 5 // met: |
| 6 // | 6 // |
| 7 // * Redistributions of source code must retain the above copyright | 7 // * Redistributions of source code must retain the above copyright |
| 8 // notice, this list of conditions and the following disclaimer. | 8 // notice, this list of conditions and the following disclaimer. |
| 9 // * Redistributions in binary form must reproduce the above | 9 // * Redistributions in binary form must reproduce the above |
| 10 // copyright notice, this list of conditions and the following | 10 // copyright notice, this list of conditions and the following |
| (...skipping 278 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 289 RelocInfo::Mode rmode = immediate_.rmode(); | 289 RelocInfo::Mode rmode = immediate_.rmode(); |
| 290 | 290 |
| 291 if (rmode == RelocInfo::EXTERNAL_REFERENCE) { | 291 if (rmode == RelocInfo::EXTERNAL_REFERENCE) { |
| 292 return assembler->serializer_enabled(); | 292 return assembler->serializer_enabled(); |
| 293 } | 293 } |
| 294 | 294 |
| 295 return !RelocInfo::IsNone(rmode); | 295 return !RelocInfo::IsNone(rmode); |
| 296 } | 296 } |
| 297 | 297 |
| 298 | 298 |
| 299 // Constant Pool. | |
| 300 void ConstPool::RecordEntry(intptr_t data, | |
| 301 RelocInfo::Mode mode) { | |
| 302 ASSERT(mode != RelocInfo::COMMENT && | |
| 303 mode != RelocInfo::POSITION && | |
| 304 mode != RelocInfo::STATEMENT_POSITION && | |
| 305 mode != RelocInfo::CONST_POOL && | |
| 306 mode != RelocInfo::VENEER_POOL && | |
| 307 mode != RelocInfo::CODE_AGE_SEQUENCE); | |
| 308 | |
| 309 uint64_t raw_data = static_cast<uint64_t>(data); | |
| 310 int offset = assm_->pc_offset(); | |
| 311 if (IsEmpty()) { | |
| 312 first_use_ = offset; | |
| 313 } | |
| 314 | |
| 315 std::pair<uint64_t, int> entry = std::make_pair(raw_data, offset); | |
| 316 if (CanBeShared(mode)) { | |
| 317 shared_entries_.insert(entry); | |
| 318 if (shared_entries_.count(entry.first) == 1) { | |
| 319 shared_entries_count++; | |
| 320 } | |
| 321 } else { | |
| 322 unique_entries_.push_back(entry); | |
| 323 } | |
| 324 } | |
| 325 | |
| 326 | |
| 327 int ConstPool::DistanceToFirstUse() { | |
| 328 ASSERT(first_use_ >= 0); | |
| 329 return assm_->pc_offset() - first_use_; | |
| 330 } | |
| 331 | |
| 332 | |
| 333 int ConstPool::MaxPcOffset() { | |
| 334 // There are no pending entries in the pool so we can never get out of | |
| 335 // range. | |
| 336 if (IsEmpty()) return kMaxInt; | |
| 337 | |
| 338 // Entries can be reshuffled so in the worst case the first constant pool use | |
| 339 // will be accessing the last entry. | |
|
rmcilroy
2014/06/17 13:09:56
nit - this comment was a bit unclear to me first (
Rodolph Perfetta (ARM)
2014/06/18 16:53:22
Done.
| |
| 340 return first_use_ + kMaxLoadLiteralRange - WorstCaseSize(); | |
| 341 } | |
| 342 | |
| 343 | |
| 344 int ConstPool::WorstCaseSize() { | |
| 345 if (IsEmpty()) return 0; | |
| 346 | |
| 347 // Max size prologue: | |
| 348 // b over | |
| 349 // ldr xzr, #pool_size | |
| 350 // blr xzr | |
| 351 // nop | |
| 352 // All entries are 64-bit for now. | |
| 353 return 4 * kInstructionSize + EntryCount() * kPointerSize; | |
| 354 } | |
| 355 | |
| 356 | |
| 357 int ConstPool::SizeIfEmittedAtCurrentPc(bool require_jump) { | |
| 358 if (IsEmpty()) return 0; | |
| 359 | |
| 360 // Prologue is: | |
| 361 // b over ;; if require_jump | |
| 362 // ldr xzr, #pool_size | |
| 363 // blr xzr | |
| 364 // nop ;; if not 64-bit aligned | |
| 365 int prologue_size = require_jump ? kInstructionSize : 0; | |
| 366 prologue_size += 2 * kInstructionSize; | |
| 367 prologue_size += IsAligned(assm_->pc_offset() + prologue_size, 8) ? | |
| 368 0 : kInstructionSize; | |
| 369 | |
| 370 // All entries are 64-bit for now. | |
| 371 return prologue_size + EntryCount() * kPointerSize; | |
| 372 } | |
| 373 | |
| 374 | |
| 375 void ConstPool::Emit(bool require_jump) { | |
| 376 ASSERT(assm_->is_const_pool_blocked()); | |
|
rmcilroy
2014/06/17 13:09:57
This is a bit counter intuitive - maybe have the B
Rodolph Perfetta (ARM)
2014/06/18 16:53:23
Done.
| |
| 377 | |
| 378 int size = SizeIfEmittedAtCurrentPc(require_jump); | |
| 379 Label size_check; | |
| 380 assm_->bind(&size_check); | |
| 381 | |
| 382 assm_->RecordConstPool(size); | |
| 383 // Emit the constant pool. It is preceded by an optional branch if | |
| 384 // require_jump and a header which will: | |
| 385 // 1) Encode the size of the constant pool, for use by the disassembler. | |
| 386 // 2) Terminate the program, to try to prevent execution from accidentally | |
| 387 // flowing into the constant pool. | |
| 388 // 3) align the pool entries to 64-bit. | |
| 389 // The header is therefore made of up to three arm64 instructions: | |
| 390 // ldr xzr, #<size of the constant pool in 32-bit words> | |
| 391 // blr xzr | |
| 392 // nop | |
| 393 // | |
| 394 // If executed, the header will likely segfault and lr will point to the | |
| 395 // instruction following the offending blr. | |
| 396 // TODO(all): Make the alignment part less fragile. Currently code is | |
| 397 // allocated as a byte array so there are no guarantees the alignment will | |
| 398 // be preserved on compaction. Currently it works as allocation seems to be | |
| 399 // 64-bit aligned. | |
| 400 | |
| 401 // Emit branch if required | |
| 402 Label after_pool; | |
| 403 if (require_jump) { | |
| 404 assm_->b(&after_pool); | |
| 405 } | |
| 406 | |
| 407 // Emit the header. | |
| 408 assm_->RecordComment("[ Constant Pool"); | |
| 409 EmitMarker(); | |
| 410 EmitGuard(); | |
| 411 assm_->Align(8); | |
| 412 | |
| 413 // Emit constant pool entries. | |
| 414 // TODO(all): currently each relocated constant is 64 bits, consider adding | |
| 415 // support for 32-bit entries. | |
| 416 EmitEntries(); | |
| 417 assm_->RecordComment("]"); | |
| 418 | |
| 419 if (after_pool.is_linked()) { | |
| 420 assm_->bind(&after_pool); | |
| 421 } | |
| 422 | |
| 423 ASSERT(assm_->SizeOfCodeGeneratedSince(&size_check) == | |
| 424 static_cast<unsigned>(size)); | |
| 425 } | |
| 426 | |
| 427 | |
| 428 void ConstPool::Clear() { | |
| 429 shared_entries_.clear(); | |
| 430 shared_entries_count = 0; | |
| 431 unique_entries_.clear(); | |
| 432 first_use_ = -1; | |
| 433 } | |
| 434 | |
| 435 | |
| 436 bool ConstPool::CanBeShared(RelocInfo::Mode mode) { | |
| 437 // Constant pool currently does not support 32-bit entries. | |
| 438 ASSERT(mode != RelocInfo::NONE32); | |
| 439 | |
| 440 return RelocInfo::IsNone(mode) || | |
| 441 (!assm_->serializer_enabled() && (mode >= RelocInfo::CELL)); | |
| 442 } | |
| 443 | |
| 444 | |
| 445 void ConstPool::EmitMarker() { | |
| 446 // A constant pool size is expressed in number of 32-bits words. | |
| 447 // Currently all entries are 64-bit. | |
| 448 // + 1 is for the crash guard. | |
| 449 // + 0/1 for alignment. | |
| 450 int word_count = EntryCount() * 2 + 1 + | |
| 451 (IsAligned(assm_->pc_offset(), 8)) ? 0 : 1; | |
| 452 assm_->Emit(LDR_x_lit | | |
| 453 Assembler::ImmLLiteral(word_count) | | |
| 454 Assembler::Rt(xzr)); | |
| 455 } | |
| 456 | |
| 457 | |
| 458 void ConstPool::EmitGuard() { | |
| 459 #ifdef DEBUG | |
| 460 Instruction* instr = reinterpret_cast<Instruction*>(assm_->pc()); | |
| 461 ASSERT(instr->preceding()->IsLdrLiteralX() && | |
| 462 instr->preceding()->Rt() == xzr.code()); | |
| 463 #endif | |
| 464 assm_->EmitPoolGuard(); | |
| 465 } | |
| 466 | |
| 467 | |
| 468 void ConstPool::EmitEntries() { | |
| 469 ASSERT(IsAligned(assm_->pc_offset(), 8)); | |
| 470 | |
| 471 // Emit shared entries. | |
| 472 while (!shared_entries_.empty()) { | |
| 473 typedef std::multimap<uint64_t, int>::const_iterator shared_entries_it; | |
|
rmcilroy
2014/06/17 13:09:57
Use Type style name for shared_entries_it (e.g., C
Rodolph Perfetta (ARM)
2014/06/18 16:53:22
Done.
| |
| 474 std::pair<shared_entries_it, shared_entries_it> range; | |
| 475 uint64_t data = shared_entries_.begin()->first; | |
| 476 range = shared_entries_.equal_range(data); | |
| 477 shared_entries_it shared_it; | |
| 478 for (shared_it = range.first; shared_it != range.second; shared_it++) { | |
| 479 Instruction* instr = assm_->InstructionAt(shared_it->second); | |
| 480 | |
| 481 // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0. | |
| 482 ASSERT(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0); | |
| 483 instr->SetImmPCOffsetTarget(assm_->pc()); | |
| 484 } | |
| 485 assm_->dc64(data); | |
| 486 shared_entries_.erase(data); | |
|
rmcilroy
2014/06/17 13:09:57
nit - could you leave the entry here and just do s
Rodolph Perfetta (ARM)
2014/06/18 16:53:23
Loop updated to iterate through literals and then
| |
| 487 } | |
| 488 shared_entries_count = 0; | |
| 489 | |
| 490 // Emit unique entries. | |
| 491 std::vector<std::pair<uint64_t, int> >::const_iterator unique_it; | |
| 492 for (unique_it = unique_entries_.begin(); | |
| 493 unique_it != unique_entries_.end(); | |
| 494 unique_it++) { | |
| 495 Instruction* instr = assm_->InstructionAt(unique_it->second); | |
| 496 | |
| 497 // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0. | |
| 498 ASSERT(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0); | |
| 499 instr->SetImmPCOffsetTarget(assm_->pc()); | |
| 500 assm_->dc64(unique_it->first); | |
| 501 } | |
| 502 unique_entries_.clear(); | |
| 503 first_use_ = -1; | |
| 504 } | |
| 505 | |
| 506 | |
| 299 // Assembler | 507 // Assembler |
| 300 | |
| 301 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size) | 508 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size) |
| 302 : AssemblerBase(isolate, buffer, buffer_size), | 509 : AssemblerBase(isolate, buffer, buffer_size), |
| 510 constpool_(this), | |
| 303 recorded_ast_id_(TypeFeedbackId::None()), | 511 recorded_ast_id_(TypeFeedbackId::None()), |
| 304 unresolved_branches_(), | 512 unresolved_branches_(), |
| 305 positions_recorder_(this) { | 513 positions_recorder_(this) { |
| 306 const_pool_blocked_nesting_ = 0; | 514 const_pool_blocked_nesting_ = 0; |
| 307 veneer_pool_blocked_nesting_ = 0; | 515 veneer_pool_blocked_nesting_ = 0; |
| 308 Reset(); | 516 Reset(); |
| 309 } | 517 } |
| 310 | 518 |
| 311 | 519 |
| 312 Assembler::~Assembler() { | 520 Assembler::~Assembler() { |
| 313 ASSERT(num_pending_reloc_info_ == 0); | 521 ASSERT(constpool_.IsEmpty()); |
| 314 ASSERT(const_pool_blocked_nesting_ == 0); | 522 ASSERT(const_pool_blocked_nesting_ == 0); |
| 315 ASSERT(veneer_pool_blocked_nesting_ == 0); | 523 ASSERT(veneer_pool_blocked_nesting_ == 0); |
| 316 } | 524 } |
| 317 | 525 |
| 318 | 526 |
| 319 void Assembler::Reset() { | 527 void Assembler::Reset() { |
| 320 #ifdef DEBUG | 528 #ifdef DEBUG |
| 321 ASSERT((pc_ >= buffer_) && (pc_ < buffer_ + buffer_size_)); | 529 ASSERT((pc_ >= buffer_) && (pc_ < buffer_ + buffer_size_)); |
| 322 ASSERT(const_pool_blocked_nesting_ == 0); | 530 ASSERT(const_pool_blocked_nesting_ == 0); |
| 323 ASSERT(veneer_pool_blocked_nesting_ == 0); | 531 ASSERT(veneer_pool_blocked_nesting_ == 0); |
| 324 ASSERT(unresolved_branches_.empty()); | 532 ASSERT(unresolved_branches_.empty()); |
| 325 memset(buffer_, 0, pc_ - buffer_); | 533 memset(buffer_, 0, pc_ - buffer_); |
| 326 #endif | 534 #endif |
| 327 pc_ = buffer_; | 535 pc_ = buffer_; |
| 328 reloc_info_writer.Reposition(reinterpret_cast<byte*>(buffer_ + buffer_size_), | 536 reloc_info_writer.Reposition(reinterpret_cast<byte*>(buffer_ + buffer_size_), |
| 329 reinterpret_cast<byte*>(pc_)); | 537 reinterpret_cast<byte*>(pc_)); |
| 330 num_pending_reloc_info_ = 0; | 538 constpool_.Clear(); |
| 331 next_constant_pool_check_ = 0; | 539 next_constant_pool_check_ = 0; |
| 332 next_veneer_pool_check_ = kMaxInt; | 540 next_veneer_pool_check_ = kMaxInt; |
| 333 no_const_pool_before_ = 0; | 541 no_const_pool_before_ = 0; |
| 334 first_const_pool_use_ = -1; | |
| 335 ClearRecordedAstId(); | 542 ClearRecordedAstId(); |
| 336 } | 543 } |
| 337 | 544 |
| 338 | 545 |
| 339 void Assembler::GetCode(CodeDesc* desc) { | 546 void Assembler::GetCode(CodeDesc* desc) { |
| 340 // Emit constant pool if necessary. | 547 // Emit constant pool if necessary. |
| 341 CheckConstPool(true, false); | 548 CheckConstPool(true, false); |
| 342 ASSERT(num_pending_reloc_info_ == 0); | 549 ASSERT(constpool_.IsEmpty()); |
| 343 | 550 |
| 344 // Set up code descriptor. | 551 // Set up code descriptor. |
| 345 if (desc) { | 552 if (desc) { |
| 346 desc->buffer = reinterpret_cast<byte*>(buffer_); | 553 desc->buffer = reinterpret_cast<byte*>(buffer_); |
| 347 desc->buffer_size = buffer_size_; | 554 desc->buffer_size = buffer_size_; |
| 348 desc->instr_size = pc_offset(); | 555 desc->instr_size = pc_offset(); |
| 349 desc->reloc_size = (reinterpret_cast<byte*>(buffer_) + buffer_size_) - | 556 desc->reloc_size = (reinterpret_cast<byte*>(buffer_) + buffer_size_) - |
| 350 reloc_info_writer.pos(); | 557 reloc_info_writer.pos(); |
| 351 desc->origin = this; | 558 desc->origin = this; |
| 352 } | 559 } |
| (...skipping 262 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 615 // Prevent constant pool checks happening by setting the next check to | 822 // Prevent constant pool checks happening by setting the next check to |
| 616 // the biggest possible offset. | 823 // the biggest possible offset. |
| 617 next_constant_pool_check_ = kMaxInt; | 824 next_constant_pool_check_ = kMaxInt; |
| 618 } | 825 } |
| 619 } | 826 } |
| 620 | 827 |
| 621 | 828 |
| 622 void Assembler::EndBlockConstPool() { | 829 void Assembler::EndBlockConstPool() { |
| 623 if (--const_pool_blocked_nesting_ == 0) { | 830 if (--const_pool_blocked_nesting_ == 0) { |
| 624 // Check the constant pool hasn't been blocked for too long. | 831 // Check the constant pool hasn't been blocked for too long. |
| 625 ASSERT((num_pending_reloc_info_ == 0) || | 832 ASSERT(pc_offset() < constpool_.MaxPcOffset()); |
| 626 (pc_offset() < (first_const_pool_use_ + kMaxDistToConstPool))); | |
| 627 // Two cases: | 833 // Two cases: |
| 628 // * no_const_pool_before_ >= next_constant_pool_check_ and the emission is | 834 // * no_const_pool_before_ >= next_constant_pool_check_ and the emission is |
| 629 // still blocked | 835 // still blocked |
| 630 // * no_const_pool_before_ < next_constant_pool_check_ and the next emit | 836 // * no_const_pool_before_ < next_constant_pool_check_ and the next emit |
| 631 // will trigger a check. | 837 // will trigger a check. |
| 632 next_constant_pool_check_ = no_const_pool_before_; | 838 next_constant_pool_check_ = no_const_pool_before_; |
| 633 } | 839 } |
| 634 } | 840 } |
| 635 | 841 |
| 636 | 842 |
| (...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 675 } | 881 } |
| 676 #endif | 882 #endif |
| 677 if (IsConstantPoolAt(instr)) { | 883 if (IsConstantPoolAt(instr)) { |
| 678 return instr->ImmLLiteral(); | 884 return instr->ImmLLiteral(); |
| 679 } else { | 885 } else { |
| 680 return -1; | 886 return -1; |
| 681 } | 887 } |
| 682 } | 888 } |
| 683 | 889 |
| 684 | 890 |
| 685 void Assembler::ConstantPoolMarker(uint32_t size) { | |
| 686 ASSERT(is_const_pool_blocked()); | |
| 687 // + 1 is for the crash guard. | |
| 688 Emit(LDR_x_lit | ImmLLiteral(size + 1) | Rt(xzr)); | |
| 689 } | |
| 690 | |
| 691 | |
| 692 void Assembler::EmitPoolGuard() { | 891 void Assembler::EmitPoolGuard() { |
| 693 // We must generate only one instruction as this is used in scopes that | 892 // We must generate only one instruction as this is used in scopes that |
| 694 // control the size of the code generated. | 893 // control the size of the code generated. |
| 695 Emit(BLR | Rn(xzr)); | 894 Emit(BLR | Rn(xzr)); |
| 696 } | 895 } |
| 697 | 896 |
| 698 | 897 |
| 699 void Assembler::ConstantPoolGuard() { | |
| 700 #ifdef DEBUG | |
| 701 // Currently this is only used after a constant pool marker. | |
| 702 ASSERT(is_const_pool_blocked()); | |
| 703 Instruction* instr = reinterpret_cast<Instruction*>(pc_); | |
| 704 ASSERT(instr->preceding()->IsLdrLiteralX() && | |
| 705 instr->preceding()->Rt() == xzr.code()); | |
| 706 #endif | |
| 707 EmitPoolGuard(); | |
| 708 } | |
| 709 | |
| 710 | |
| 711 void Assembler::StartBlockVeneerPool() { | 898 void Assembler::StartBlockVeneerPool() { |
| 712 ++veneer_pool_blocked_nesting_; | 899 ++veneer_pool_blocked_nesting_; |
| 713 } | 900 } |
| 714 | 901 |
| 715 | 902 |
| 716 void Assembler::EndBlockVeneerPool() { | 903 void Assembler::EndBlockVeneerPool() { |
| 717 if (--veneer_pool_blocked_nesting_ == 0) { | 904 if (--veneer_pool_blocked_nesting_ == 0) { |
| 718 // Check the veneer pool hasn't been blocked for too long. | 905 // Check the veneer pool hasn't been blocked for too long. |
| 719 ASSERT(unresolved_branches_.empty() || | 906 ASSERT(unresolved_branches_.empty() || |
| 720 (pc_offset() < unresolved_branches_first_limit())); | 907 (pc_offset() < unresolved_branches_first_limit())); |
| (...skipping 1738 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2459 buffer_ = desc.buffer; | 2646 buffer_ = desc.buffer; |
| 2460 buffer_size_ = desc.buffer_size; | 2647 buffer_size_ = desc.buffer_size; |
| 2461 pc_ = reinterpret_cast<byte*>(pc_) + pc_delta; | 2648 pc_ = reinterpret_cast<byte*>(pc_) + pc_delta; |
| 2462 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta, | 2649 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta, |
| 2463 reloc_info_writer.last_pc() + pc_delta); | 2650 reloc_info_writer.last_pc() + pc_delta); |
| 2464 | 2651 |
| 2465 // None of our relocation types are pc relative pointing outside the code | 2652 // None of our relocation types are pc relative pointing outside the code |
| 2466 // buffer nor pc absolute pointing inside the code buffer, so there is no need | 2653 // buffer nor pc absolute pointing inside the code buffer, so there is no need |
| 2467 // to relocate any emitted relocation entries. | 2654 // to relocate any emitted relocation entries. |
| 2468 | 2655 |
| 2469 // Relocate pending relocation entries. | 2656 // Pending relocation entries are also relative, no need to relocate. |
| 2470 for (int i = 0; i < num_pending_reloc_info_; i++) { | |
| 2471 RelocInfo& rinfo = pending_reloc_info_[i]; | |
| 2472 ASSERT(rinfo.rmode() != RelocInfo::COMMENT && | |
| 2473 rinfo.rmode() != RelocInfo::POSITION); | |
| 2474 if (rinfo.rmode() != RelocInfo::JS_RETURN) { | |
| 2475 rinfo.set_pc(rinfo.pc() + pc_delta); | |
| 2476 } | |
| 2477 } | |
| 2478 } | 2657 } |
| 2479 | 2658 |
| 2480 | 2659 |
| 2481 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { | 2660 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { |
| 2482 // We do not try to reuse pool constants. | 2661 // We do not try to reuse pool constants. |
| 2483 RelocInfo rinfo(reinterpret_cast<byte*>(pc_), rmode, data, NULL); | 2662 RelocInfo rinfo(reinterpret_cast<byte*>(pc_), rmode, data, NULL); |
| 2484 if (((rmode >= RelocInfo::JS_RETURN) && | 2663 if (((rmode >= RelocInfo::JS_RETURN) && |
| 2485 (rmode <= RelocInfo::DEBUG_BREAK_SLOT)) || | 2664 (rmode <= RelocInfo::DEBUG_BREAK_SLOT)) || |
| 2486 (rmode == RelocInfo::CONST_POOL) || | 2665 (rmode == RelocInfo::CONST_POOL) || |
| 2487 (rmode == RelocInfo::VENEER_POOL)) { | 2666 (rmode == RelocInfo::VENEER_POOL)) { |
| 2488 // Adjust code for new modes. | 2667 // Adjust code for new modes. |
| 2489 ASSERT(RelocInfo::IsDebugBreakSlot(rmode) | 2668 ASSERT(RelocInfo::IsDebugBreakSlot(rmode) |
| 2490 || RelocInfo::IsJSReturn(rmode) | 2669 || RelocInfo::IsJSReturn(rmode) |
| 2491 || RelocInfo::IsComment(rmode) | 2670 || RelocInfo::IsComment(rmode) |
| 2492 || RelocInfo::IsPosition(rmode) | 2671 || RelocInfo::IsPosition(rmode) |
| 2493 || RelocInfo::IsConstPool(rmode) | 2672 || RelocInfo::IsConstPool(rmode) |
| 2494 || RelocInfo::IsVeneerPool(rmode)); | 2673 || RelocInfo::IsVeneerPool(rmode)); |
| 2495 // These modes do not need an entry in the constant pool. | 2674 // These modes do not need an entry in the constant pool. |
| 2496 } else { | 2675 } else { |
| 2497 ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo); | 2676 if (constpool_.EntryCount() > kApproximatePoolEntryCount) { |
|
rmcilroy
2014/06/17 13:09:57
Could we just do both this check and the check for
Rodolph Perfetta (ARM)
2014/06/18 16:53:23
moving the this code in RecordEntry is a good idea
| |
| 2498 if (num_pending_reloc_info_ == 0) { | 2677 // It is time to emit the constant pool after this instruction. |
| 2499 first_const_pool_use_ = pc_offset(); | 2678 next_constant_pool_check_ = pc_offset() + kInstructionSize; |
| 2500 } | 2679 } |
| 2501 pending_reloc_info_[num_pending_reloc_info_++] = rinfo; | 2680 |
| 2681 constpool_.RecordEntry(data, rmode); | |
| 2502 // Make sure the constant pool is not emitted in place of the next | 2682 // Make sure the constant pool is not emitted in place of the next |
| 2503 // instruction for which we just recorded relocation info. | 2683 // instruction for which we just recorded relocation info. |
| 2504 BlockConstPoolFor(1); | 2684 BlockConstPoolFor(1); |
| 2505 } | 2685 } |
| 2506 | 2686 |
| 2507 if (!RelocInfo::IsNone(rmode)) { | 2687 if (!RelocInfo::IsNone(rmode)) { |
| 2508 // Don't record external references unless the heap will be serialized. | 2688 // Don't record external references unless the heap will be serialized. |
| 2509 if (rmode == RelocInfo::EXTERNAL_REFERENCE && | 2689 if (rmode == RelocInfo::EXTERNAL_REFERENCE && |
| 2510 !serializer_enabled() && !emit_debug_code()) { | 2690 !serializer_enabled() && !emit_debug_code()) { |
| 2511 return; | 2691 return; |
| 2512 } | 2692 } |
| 2513 ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here | 2693 ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here |
| 2514 if (rmode == RelocInfo::CODE_TARGET_WITH_ID) { | 2694 if (rmode == RelocInfo::CODE_TARGET_WITH_ID) { |
| 2515 RelocInfo reloc_info_with_ast_id( | 2695 RelocInfo reloc_info_with_ast_id( |
| 2516 reinterpret_cast<byte*>(pc_), rmode, RecordedAstId().ToInt(), NULL); | 2696 reinterpret_cast<byte*>(pc_), rmode, RecordedAstId().ToInt(), NULL); |
| 2517 ClearRecordedAstId(); | 2697 ClearRecordedAstId(); |
| 2518 reloc_info_writer.Write(&reloc_info_with_ast_id); | 2698 reloc_info_writer.Write(&reloc_info_with_ast_id); |
| 2519 } else { | 2699 } else { |
| 2520 reloc_info_writer.Write(&rinfo); | 2700 reloc_info_writer.Write(&rinfo); |
| 2521 } | 2701 } |
| 2522 } | 2702 } |
| 2523 } | 2703 } |
| 2524 | 2704 |
| 2525 | 2705 |
| 2526 void Assembler::BlockConstPoolFor(int instructions) { | 2706 void Assembler::BlockConstPoolFor(int instructions) { |
| 2527 int pc_limit = pc_offset() + instructions * kInstructionSize; | 2707 int pc_limit = pc_offset() + instructions * kInstructionSize; |
| 2528 if (no_const_pool_before_ < pc_limit) { | 2708 if (no_const_pool_before_ < pc_limit) { |
| 2529 // If there are some pending entries, the constant pool cannot be blocked | |
| 2530 // further than first_const_pool_use_ + kMaxDistToConstPool | |
| 2531 ASSERT((num_pending_reloc_info_ == 0) || | |
| 2532 (pc_limit < (first_const_pool_use_ + kMaxDistToConstPool))); | |
| 2533 no_const_pool_before_ = pc_limit; | 2709 no_const_pool_before_ = pc_limit; |
| 2710 // Make sure the pool won't be blocked for too long. | |
| 2711 ASSERT(pc_limit < constpool_.MaxPcOffset()); | |
| 2534 } | 2712 } |
| 2535 | 2713 |
| 2536 if (next_constant_pool_check_ < no_const_pool_before_) { | 2714 if (next_constant_pool_check_ < no_const_pool_before_) { |
| 2537 next_constant_pool_check_ = no_const_pool_before_; | 2715 next_constant_pool_check_ = no_const_pool_before_; |
| 2538 } | 2716 } |
| 2539 } | 2717 } |
| 2540 | 2718 |
| 2541 | 2719 |
| 2542 void Assembler::CheckConstPool(bool force_emit, bool require_jump) { | 2720 void Assembler::CheckConstPool(bool force_emit, bool require_jump) { |
| 2543 // Some short sequence of instruction mustn't be broken up by constant pool | 2721 // Some short sequence of instruction mustn't be broken up by constant pool |
| 2544 // emission, such sequences are protected by calls to BlockConstPoolFor and | 2722 // emission, such sequences are protected by calls to BlockConstPoolFor and |
| 2545 // BlockConstPoolScope. | 2723 // BlockConstPoolScope. |
| 2546 if (is_const_pool_blocked()) { | 2724 if (is_const_pool_blocked()) { |
| 2547 // Something is wrong if emission is forced and blocked at the same time. | 2725 // Something is wrong if emission is forced and blocked at the same time. |
| 2548 ASSERT(!force_emit); | 2726 ASSERT(!force_emit); |
| 2549 return; | 2727 return; |
| 2550 } | 2728 } |
| 2551 | 2729 |
| 2552 // There is nothing to do if there are no pending constant pool entries. | 2730 // There is nothing to do if there are no pending constant pool entries. |
| 2553 if (num_pending_reloc_info_ == 0) { | 2731 if (constpool_.IsEmpty()) { |
| 2554 // Calculate the offset of the next check. | 2732 // Calculate the offset of the next check. |
| 2555 next_constant_pool_check_ = pc_offset() + kCheckConstPoolInterval; | 2733 next_constant_pool_check_ = pc_offset() + kCheckConstPoolInterval; |
| 2556 return; | 2734 return; |
| 2557 } | 2735 } |
| 2558 | 2736 |
| 2559 // We emit a constant pool when: | 2737 // We emit a constant pool when: |
| 2560 // * requested to do so by parameter force_emit (e.g. after each function). | 2738 // * requested to do so by parameter force_emit (e.g. after each function). |
| 2561 // * the distance to the first instruction accessing the constant pool is | 2739 // * the distance to the first instruction accessing the constant pool is |
| 2562 // kAvgDistToConstPool or more. | 2740 // kApproximateDistToConstPool or more. |
| 2563 // * no jump is required and the distance to the first instruction accessing | 2741 // * the number of entries in the pool is kApproximatePoolEntryCount or more. |
| 2564 // the constant pool is at least kMaxDistToPConstool / 2. | 2742 int dist = constpool_.DistanceToFirstUse(); |
| 2565 ASSERT(first_const_pool_use_ >= 0); | 2743 int count = constpool_.EntryCount(); |
| 2566 int dist = pc_offset() - first_const_pool_use_; | 2744 if (!force_emit && |
| 2567 if (!force_emit && dist < kAvgDistToConstPool && | 2745 (dist < kApproximateDistToConstPool) && |
| 2568 (require_jump || (dist < (kMaxDistToConstPool / 2)))) { | 2746 (count < kApproximatePoolEntryCount)) { |
| 2569 return; | 2747 return; |
| 2570 } | 2748 } |
| 2571 | 2749 |
| 2572 int jump_instr = require_jump ? kInstructionSize : 0; | |
| 2573 int size_pool_marker = kInstructionSize; | |
| 2574 int size_pool_guard = kInstructionSize; | |
| 2575 int pool_size = jump_instr + size_pool_marker + size_pool_guard + | |
| 2576 num_pending_reloc_info_ * kPointerSize; | |
| 2577 int needed_space = pool_size + kGap; | |
| 2578 | 2750 |
| 2579 // Emit veneers for branches that would go out of range during emission of the | 2751 // Emit veneers for branches that would go out of range during emission of the |
| 2580 // constant pool. | 2752 // constant pool. |
| 2581 CheckVeneerPool(false, require_jump, kVeneerDistanceMargin + pool_size); | 2753 int size = constpool_.WorstCaseSize(); |
|
rmcilroy
2014/06/17 13:09:57
s/size/worst_case_size
Rodolph Perfetta (ARM)
2014/06/18 16:53:22
Done.
| |
| 2754 CheckVeneerPool(false, require_jump, kVeneerDistanceMargin + size); | |
| 2582 | 2755 |
| 2583 Label size_check; | 2756 // Buffer checks happen after an emit hence the 2 * kInstructionSize. |
|
rmcilroy
2014/06/17 13:09:57
I'm not sure what the buffer checks you mention he
Rodolph Perfetta (ARM)
2014/06/18 16:53:23
My comment is clumsy and inaccurate. What I meant
| |
| 2584 bind(&size_check); | 2757 int needed_space = size + kGap + 2 * kInstructionSize; |
| 2585 | |
| 2586 // Check that the code buffer is large enough before emitting the constant | 2758 // Check that the code buffer is large enough before emitting the constant |
| 2587 // pool (include the jump over the pool, the constant pool marker, the | 2759 // pool (include the jump over the pool, the constant pool marker, the |
| 2588 // constant pool guard, and the gap to the relocation information). | 2760 // constant pool guard, and the gap to the relocation information). |
| 2589 while (buffer_space() <= needed_space) { | 2761 while (buffer_space() < needed_space) { |
| 2590 GrowBuffer(); | 2762 GrowBuffer(); |
| 2591 } | 2763 } |
| 2592 | 2764 |
| 2593 { | 2765 { |
| 2594 // Block recursive calls to CheckConstPool and protect from veneer pools. | 2766 // Block recursive calls to CheckConstPool and protect from veneer pools. |
| 2595 BlockPoolsScope block_pools(this); | 2767 BlockPoolsScope block_pools(this); |
| 2596 RecordConstPool(pool_size); | 2768 constpool_.Emit(require_jump); |
| 2597 | |
| 2598 // Emit jump over constant pool if necessary. | |
| 2599 Label after_pool; | |
| 2600 if (require_jump) { | |
| 2601 b(&after_pool); | |
| 2602 } | |
| 2603 | |
| 2604 // Emit a constant pool header. The header has two goals: | |
| 2605 // 1) Encode the size of the constant pool, for use by the disassembler. | |
| 2606 // 2) Terminate the program, to try to prevent execution from accidentally | |
| 2607 // flowing into the constant pool. | |
| 2608 // The header is therefore made of two arm64 instructions: | |
| 2609 // ldr xzr, #<size of the constant pool in 32-bit words> | |
| 2610 // blr xzr | |
| 2611 // If executed the code will likely segfault and lr will point to the | |
| 2612 // beginning of the constant pool. | |
| 2613 // TODO(all): currently each relocated constant is 64 bits, consider adding | |
| 2614 // support for 32-bit entries. | |
| 2615 RecordComment("[ Constant Pool"); | |
| 2616 ConstantPoolMarker(2 * num_pending_reloc_info_); | |
| 2617 ConstantPoolGuard(); | |
| 2618 | |
| 2619 // Emit constant pool entries. | |
| 2620 for (int i = 0; i < num_pending_reloc_info_; i++) { | |
| 2621 RelocInfo& rinfo = pending_reloc_info_[i]; | |
| 2622 ASSERT(rinfo.rmode() != RelocInfo::COMMENT && | |
| 2623 rinfo.rmode() != RelocInfo::POSITION && | |
| 2624 rinfo.rmode() != RelocInfo::STATEMENT_POSITION && | |
| 2625 rinfo.rmode() != RelocInfo::CONST_POOL && | |
| 2626 rinfo.rmode() != RelocInfo::VENEER_POOL); | |
| 2627 | |
| 2628 Instruction* instr = reinterpret_cast<Instruction*>(rinfo.pc()); | |
| 2629 // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0. | |
| 2630 ASSERT(instr->IsLdrLiteral() && | |
| 2631 instr->ImmLLiteral() == 0); | |
| 2632 | |
| 2633 instr->SetImmPCOffsetTarget(reinterpret_cast<Instruction*>(pc_)); | |
| 2634 dc64(rinfo.data()); | |
| 2635 } | |
| 2636 | |
| 2637 num_pending_reloc_info_ = 0; | |
| 2638 first_const_pool_use_ = -1; | |
| 2639 | |
| 2640 RecordComment("]"); | |
| 2641 | |
| 2642 if (after_pool.is_linked()) { | |
| 2643 bind(&after_pool); | |
| 2644 } | |
| 2645 } | 2769 } |
| 2646 | 2770 |
| 2647 // Since a constant pool was just emitted, move the check offset forward by | 2771 // Since a constant pool was just emitted, move the check offset forward by |
| 2648 // the standard interval. | 2772 // the standard interval. |
| 2649 next_constant_pool_check_ = pc_offset() + kCheckConstPoolInterval; | 2773 next_constant_pool_check_ = pc_offset() + kCheckConstPoolInterval; |
| 2650 | |
| 2651 ASSERT(SizeOfCodeGeneratedSince(&size_check) == | |
| 2652 static_cast<unsigned>(pool_size)); | |
| 2653 } | 2774 } |
|
rmcilroy
2014/06/17 13:09:57
Add an assert that the size of code generated was
Rodolph Perfetta (ARM)
2014/06/18 16:53:22
Done.
| |
| 2654 | 2775 |
| 2655 | 2776 |
| 2656 bool Assembler::ShouldEmitVeneer(int max_reachable_pc, int margin) { | 2777 bool Assembler::ShouldEmitVeneer(int max_reachable_pc, int margin) { |
| 2657 // Account for the branch around the veneers and the guard. | 2778 // Account for the branch around the veneers and the guard. |
| 2658 int protection_offset = 2 * kInstructionSize; | 2779 int protection_offset = 2 * kInstructionSize; |
| 2659 return pc_offset() > max_reachable_pc - margin - protection_offset - | 2780 return pc_offset() > max_reachable_pc - margin - protection_offset - |
| 2660 static_cast<int>(unresolved_branches_.size() * kMaxVeneerCodeSize); | 2781 static_cast<int>(unresolved_branches_.size() * kMaxVeneerCodeSize); |
| 2661 } | 2782 } |
| 2662 | 2783 |
| 2663 | 2784 |
| (...skipping 219 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2883 adr(rd, 0); | 3004 adr(rd, 0); |
| 2884 MovInt64(scratch, target_offset); | 3005 MovInt64(scratch, target_offset); |
| 2885 add(rd, rd, scratch); | 3006 add(rd, rd, scratch); |
| 2886 } | 3007 } |
| 2887 } | 3008 } |
| 2888 | 3009 |
| 2889 | 3010 |
| 2890 } } // namespace v8::internal | 3011 } } // namespace v8::internal |
| 2891 | 3012 |
| 2892 #endif // V8_TARGET_ARCH_ARM64 | 3013 #endif // V8_TARGET_ARCH_ARM64 |
| OLD | NEW |