Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(713)

Side by Side Diff: src/arm/assembler-arm.cc

Issue 7021007: Optimise the deoptimisation check to improve performance on modern ARM cores.... (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 9 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 1994-2006 Sun Microsystems Inc. 1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved. 2 // All Rights Reserved.
3 // 3 //
4 // Redistribution and use in source and binary forms, with or without 4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions 5 // modification, are permitted provided that the following conditions
6 // are met: 6 // are met:
7 // 7 //
8 // - Redistributions of source code must retain the above copyright notice, 8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer. 9 // this list of conditions and the following disclaimer.
10 // 10 //
(...skipping 303 matching lines...) Expand 10 before | Expand all | Expand 10 after
314 buffer_ = static_cast<byte*>(buffer); 314 buffer_ = static_cast<byte*>(buffer);
315 buffer_size_ = buffer_size; 315 buffer_size_ = buffer_size;
316 own_buffer_ = false; 316 own_buffer_ = false;
317 } 317 }
318 318
319 // Setup buffer pointers. 319 // Setup buffer pointers.
320 ASSERT(buffer_ != NULL); 320 ASSERT(buffer_ != NULL);
321 pc_ = buffer_; 321 pc_ = buffer_;
322 reloc_info_writer.Reposition(buffer_ + buffer_size, pc_); 322 reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
323 num_prinfo_ = 0; 323 num_prinfo_ = 0;
324 num_deopt_jump_entries_ = 0;
324 next_buffer_check_ = 0; 325 next_buffer_check_ = 0;
325 const_pool_blocked_nesting_ = 0; 326 const_pool_blocked_nesting_ = 0;
326 no_const_pool_before_ = 0; 327 no_const_pool_before_ = 0;
327 last_const_pool_end_ = 0; 328 last_const_pool_end_ = 0;
328 last_bound_pos_ = 0; 329 last_bound_pos_ = 0;
329 ast_id_for_reloc_info_ = kNoASTId; 330 ast_id_for_reloc_info_ = kNoASTId;
330 } 331 }
331 332
332 333
333 Assembler::~Assembler() { 334 Assembler::~Assembler() {
(...skipping 1166 matching lines...) Expand 10 before | Expand all | Expand 10 after
1500 svc(kStopCode + code, cond); 1501 svc(kStopCode + code, cond);
1501 } else { 1502 } else {
1502 svc(kStopCode + kMaxStopCode, cond); 1503 svc(kStopCode + kMaxStopCode, cond);
1503 } 1504 }
1504 emit(reinterpret_cast<Instr>(msg)); 1505 emit(reinterpret_cast<Instr>(msg));
1505 #else // def __arm__ 1506 #else // def __arm__
1506 #ifdef CAN_USE_ARMV5_INSTRUCTIONS 1507 #ifdef CAN_USE_ARMV5_INSTRUCTIONS
1507 if (cond != al) { 1508 if (cond != al) {
1508 Label skip; 1509 Label skip;
1509 b(&skip, NegateCondition(cond)); 1510 b(&skip, NegateCondition(cond));
1510 bkpt(0); 1511 bkpt(kBkptStopCode);
1511 bind(&skip); 1512 bind(&skip);
1512 } else { 1513 } else {
1513 bkpt(0); 1514 bkpt(kBkptStopCode);
1514 } 1515 }
1515 #else // ndef CAN_USE_ARMV5_INSTRUCTIONS 1516 #else // ndef CAN_USE_ARMV5_INSTRUCTIONS
1516 svc(0x9f0001, cond); 1517 svc(0x9f0001, cond);
1517 #endif // ndef CAN_USE_ARMV5_INSTRUCTIONS 1518 #endif // ndef CAN_USE_ARMV5_INSTRUCTIONS
1518 #endif // def __arm__ 1519 #endif // def __arm__
1519 } 1520 }
1520 1521
1521 1522
1522 void Assembler::bkpt(uint32_t imm16) { // v5 and above 1523 void Assembler::bkpt(uint32_t imm16) { // v5 and above
1523 ASSERT(is_uint16(imm16)); 1524 ASSERT(is_uint16(imm16));
(...skipping 868 matching lines...) Expand 10 before | Expand all | Expand 10 after
2392 } 2393 }
2393 2394
2394 2395
2395 bool Assembler::IsNop(Instr instr, int type) { 2396 bool Assembler::IsNop(Instr instr, int type) {
2396 // Check for mov rx, rx where x = type. 2397 // Check for mov rx, rx where x = type.
2397 ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop. 2398 ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop.
2398 return instr == (al | 13*B21 | type*B12 | type); 2399 return instr == (al | 13*B21 | type*B12 | type);
2399 } 2400 }
2400 2401
2401 2402
2403 bool Assembler::IsBkpt(Instr instr, int code) {
2404 ASSERT(is_uint16(code));
2405 return instr == (al | B24 | B21 | (code >> 4)*B8 | BKPT | (code & 0xf));
2406 }
2407
2408
2402 bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) { 2409 bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
2403 uint32_t dummy1; 2410 uint32_t dummy1;
2404 uint32_t dummy2; 2411 uint32_t dummy2;
2405 return fits_shifter(imm32, &dummy1, &dummy2, NULL); 2412 return fits_shifter(imm32, &dummy1, &dummy2, NULL);
2406 } 2413 }
2407 2414
2408 2415
2409 void Assembler::BlockConstPoolFor(int instructions) { 2416 void Assembler::BlockConstPoolFor(int instructions) {
2410 BlockConstPoolBefore(pc_offset() + instructions * kInstrSize); 2417 BlockConstPoolBefore(pc_offset() + instructions * kInstrSize);
2411 } 2418 }
(...skipping 129 matching lines...) Expand 10 before | Expand all | Expand 10 after
2541 RelocInfo reloc_info_with_ast_id(pc_, rmode, ast_id_for_reloc_info_); 2548 RelocInfo reloc_info_with_ast_id(pc_, rmode, ast_id_for_reloc_info_);
2542 ast_id_for_reloc_info_ = kNoASTId; 2549 ast_id_for_reloc_info_ = kNoASTId;
2543 reloc_info_writer.Write(&reloc_info_with_ast_id); 2550 reloc_info_writer.Write(&reloc_info_with_ast_id);
2544 } else { 2551 } else {
2545 reloc_info_writer.Write(&rinfo); 2552 reloc_info_writer.Write(&rinfo);
2546 } 2553 }
2547 } 2554 }
2548 } 2555 }
2549 2556
2550 2557
2558 void Assembler::RecordDeoptJumpEntry(Address entry, Condition cond) {
2559 DeoptJumpEntry deopt_jump_entry(pc_offset(), cond, entry);
2560 deopt_jump_entries_[num_deopt_jump_entries_++]= deopt_jump_entry;
2561 }
2562
2563
2551 void Assembler::CheckConstPool(bool force_emit, bool require_jump) { 2564 void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
2552 // Calculate the offset of the next check. It will be overwritten 2565 // Calculate the offset of the next check. It will be overwritten
2553 // when a const pool is generated or when const pools are being 2566 // when a const pool is generated or when const pools are being
2554 // blocked for a specific range. 2567 // blocked for a specific range.
2555 next_buffer_check_ = pc_offset() + kCheckConstInterval; 2568 next_buffer_check_ = pc_offset() + kCheckConstInterval;
2556 2569
2557 // There is nothing to do if there are no pending relocation info entries. 2570 // There is nothing to do if there are no pending relocation info nor
2558 if (num_prinfo_ == 0) return; 2571 // deoptimization entries.
2572 if ((num_prinfo_ == 0) && (num_deopt_jump_entries_ == 0)) return;
2559 2573
2560 // We emit a constant pool at regular intervals of about kDistBetweenPools 2574 // We emit a constant pool at regular intervals of about kDistBetweenPools
2561 // or when requested by parameter force_emit (e.g. after each function). 2575 // or when requested by parameter force_emit (e.g. after each function).
2562 // We prefer not to emit a jump unless the max distance is reached or if we 2576 // We prefer not to emit a jump unless the max distance is reached or if we
2563 // are running low on slots, which can happen if a lot of constants are being 2577 // are running low on slots, which can happen if a lot of constants are being
2564 // emitted (e.g. --debug-code and many static references). 2578 // emitted (e.g. --debug-code and many static references).
2565 int dist = pc_offset() - last_const_pool_end_; 2579 int jump_instr = require_jump ? kInstrSize : 0;
2580 int reloc_info_size = num_prinfo_ * kPointerSize;
2581 int deopt_jump_size = num_deopt_jump_entries_ * DeoptJumpEntry::kTotalSize;
2582 int needed_space = jump_instr +
2583 kInstrSize + // For the constant pool marker.
2584 reloc_info_size + deopt_jump_size;
2585 int dist = pc_offset() - last_const_pool_end_ + needed_space;
2586 // TODO(1236125): Cleanup the "magic" number below. We know that
2587 // the code generation will test every kCheckConstIntervalInst.
2588 // Thus we are safe as long as we generate less than 7 constant
2589 // entries per instruction.
2590 int max_dist_at_next_check =
2591 dist + kCheckConstIntervalInst * (kInstrSize + 7 * kInstrSize);
2592
2593 // The distance between the first instruction after the last constant pool
2594 // and the end of this constant pool must be less than the addressing range.
2566 if (!force_emit && dist < kMaxDistBetweenPools && 2595 if (!force_emit && dist < kMaxDistBetweenPools &&
2567 (require_jump || dist < kDistBetweenPools) && 2596 (require_jump || dist < kDistBetweenPools) &&
2568 // TODO(1236125): Cleanup the "magic" number below. We know that 2597 // We are safe as long as we are certain that we will not generate too
2569 // the code generation will test every kCheckConstIntervalInst. 2598 // many reloc info or deopt entries in the next kCheckConstIntervalInst.
2570 // Thus we are safe as long as we generate less than 7 constant 2599 (max_dist_at_next_check < kMaxDistBetweenPools)) {
2571 // entries per instruction.
2572 (num_prinfo_ < (kMaxNumPRInfo - (7 * kCheckConstIntervalInst)))) {
2573 return; 2600 return;
2574 } 2601 }
2575 2602
2576 // If we did not return by now, we need to emit the constant pool soon. 2603 // If we did not return by now, we need to emit the constant pool soon.
2577 2604
2578 // However, some small sequences of instructions must not be broken up by the 2605 // However, some small sequences of instructions must not be broken up by the
2579 // insertion of a constant pool; such sequences are protected by setting 2606 // insertion of a constant pool; such sequences are protected by setting
2580 // either const_pool_blocked_nesting_ or no_const_pool_before_, which are 2607 // either const_pool_blocked_nesting_ or no_const_pool_before_, which are
2581 // both checked here. Also, recursive calls to CheckConstPool are blocked by 2608 // both checked here. Also, recursive calls to CheckConstPool are blocked by
2582 // no_const_pool_before_. 2609 // no_const_pool_before_.
2583 if (const_pool_blocked_nesting_ > 0 || pc_offset() < no_const_pool_before_) { 2610 if (const_pool_blocked_nesting_ > 0 || pc_offset() < no_const_pool_before_) {
2584 // Emission is currently blocked; make sure we try again as soon as 2611 // Emission is currently blocked; make sure we try again as soon as
2585 // possible. 2612 // possible.
2586 if (const_pool_blocked_nesting_ > 0) { 2613 if (const_pool_blocked_nesting_ > 0) {
2587 next_buffer_check_ = pc_offset() + kInstrSize; 2614 next_buffer_check_ = pc_offset() + kInstrSize;
2588 } else { 2615 } else {
2589 next_buffer_check_ = no_const_pool_before_; 2616 next_buffer_check_ = no_const_pool_before_;
2590 } 2617 }
2591 2618
2592 // Something is wrong if emission is forced and blocked at the same time. 2619 // Something is wrong if emission is forced and blocked at the same time.
2593 ASSERT(!force_emit); 2620 ASSERT(!force_emit);
2594 return; 2621 return;
2595 } 2622 }
2596 2623
2597 int jump_instr = require_jump ? kInstrSize : 0;
2598
2599 // Check that the code buffer is large enough before emitting the constant 2624 // Check that the code buffer is large enough before emitting the constant
2600 // pool and relocation information (include the jump over the pool and the 2625 // pool and relocation information (include the jump over the pool and the
2601 // constant pool marker). 2626 // constant pool marker).
2602 int max_needed_space = 2627 while (buffer_space() <= (needed_space + kGap)) GrowBuffer();
2603 jump_instr + kInstrSize + num_prinfo_*(kInstrSize + kMaxRelocSize);
2604 while (buffer_space() <= (max_needed_space + kGap)) GrowBuffer();
2605 2628
2606 // Block recursive calls to CheckConstPool. 2629 // Block recursive calls to CheckConstPool.
2607 BlockConstPoolBefore(pc_offset() + jump_instr + kInstrSize + 2630 BlockConstPoolScope block_const_pool(this);
2608 num_prinfo_*kInstrSize);
2609 // Don't bother to check for the emit calls below. 2631 // Don't bother to check for the emit calls below.
2610 next_buffer_check_ = no_const_pool_before_; 2632 next_buffer_check_ = no_const_pool_before_;
2611 2633
2612 // Emit jump over constant pool if necessary. 2634 // Emit jump over constant pool if necessary.
2613 Label after_pool; 2635 Label after_pool;
2614 if (require_jump) b(&after_pool); 2636 if (require_jump) b(&after_pool);
2615 2637
2616 RecordComment("[ Constant Pool"); 2638 RecordComment("[ Constant Pool");
2617 2639
2618 // Put down constant pool marker "Undefined instruction" as specified by 2640 // Put down constant pool marker "Undefined instruction" as specified by
2619 // A5.6 (ARMv7) Instruction set encoding. 2641 // A5.6 (ARMv7) Instruction set encoding.
2620 emit(kConstantPoolMarker | num_prinfo_); 2642 int constant_pool_size = reloc_info_size + deopt_jump_size;
2643 ASSERT(((constant_pool_size / kPointerSize) & ~kConstantPoolLengthMask) == 0);
2644 emit(kConstantPoolMarker | (constant_pool_size / kPointerSize));
2645
2646 int start_of_const_pool = pc_offset();
2647 USE(start_of_const_pool);
2648
2649 // Emit the deoptimization jump table.
Søren Thygesen Gjesse 2011/05/16 07:26:39 This emitting of the deopt-jump table in parts ins
2650 RecordComment("[ Deoptimization jump table");
2651 for (int i = 0; i < num_deopt_jump_entries_; i++) {
2652 // Patch the code at the deoptimization site.
2653 DeoptJumpEntry& deopt_jump_entry = deopt_jump_entries_[i];
2654 // Get the offset to the current pc.
2655 int new_offset =
2656 (pc_offset() - deopt_jump_entry.pc_offset() - kPcLoadDelta);
2657 // Compute the location of the deoptimization site.
2658 Instr* deopt_site =
2659 reinterpret_cast<Instr*>(buffer_ + deopt_jump_entry.pc_offset());
2660 // The code to patch is (See LCodeGen::DeoptimizeIf()):
2661 // bkpt kBkptUninitializedCode
2662
2663 // Check that the instruction to patch is indeed
2664 // a bkpt kBkptUninitializedCode.
2665 ASSERT(IsBkpt(*deopt_site, kBkptUninitializedCode));
2666 // We need to patch the instruction with a branch jumping here.
2667 ASSERT((new_offset & 3) == 0);
2668 int imm24 = new_offset >> 2;
2669 ASSERT(is_int24(imm24));
Søren Thygesen Gjesse 2011/05/16 07:26:39 Please use the CodePatcher class for this. CodePa
2670 *deopt_site = (deopt_jump_entry.cond() | B27 | B25 | (imm24 & kImm24Mask));
2671
2672 // Emit the jump to the corresponding deoptimization entry.
2673 // We need to manually register this relocation information, because we need
2674 // it to be emitted in this constant pool after this jump table.
2675 RelocInfo local_rinfo(pc_,
2676 RelocInfo::RUNTIME_ENTRY,
2677 reinterpret_cast<intptr_t>(deopt_jump_entry.entry()));
2678 // We are generating instructions in the constant pool.
2679 // We are sure the instruction cache will be flushed for these instructions:
2680 // the constant pools (and deoptimization jump tables) are intricated in
2681 // the generated code, and will be flushed along with it when needed.
2682 ldr(pc, MemOperand(pc, 0));
2683 prinfo_[num_prinfo_++] = local_rinfo;
2684 reloc_info_writer.Write(&local_rinfo);
2685 }
2686 num_deopt_jump_entries_ = 0;
2687 RecordComment("]");
2621 2688
2622 // Emit constant pool entries. 2689 // Emit constant pool entries.
2690 RecordComment("[ Constant pool entries");
2623 for (int i = 0; i < num_prinfo_; i++) { 2691 for (int i = 0; i < num_prinfo_; i++) {
2624 RelocInfo& rinfo = prinfo_[i]; 2692 RelocInfo& rinfo = prinfo_[i];
2625 ASSERT(rinfo.rmode() != RelocInfo::COMMENT && 2693 ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
2626 rinfo.rmode() != RelocInfo::POSITION && 2694 rinfo.rmode() != RelocInfo::POSITION &&
2627 rinfo.rmode() != RelocInfo::STATEMENT_POSITION); 2695 rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
2628 Instr instr = instr_at(rinfo.pc()); 2696 Instr instr = instr_at(rinfo.pc());
2629 2697
2630 // Instruction to patch must be a ldr/str [pc, #offset]. 2698 // Instruction to patch must be a ldr/str [pc, #offset].
2631 // P and U set, B and W clear, Rn == pc, offset12 still 0. 2699 // P and U set, B and W clear, Rn == pc, offset12 still 0.
2632 ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | kOff12Mask)) == 2700 ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | kOff12Mask)) ==
2633 (2*B25 | P | U | pc.code()*B16)); 2701 (2*B25 | P | U | pc.code()*B16));
2634 int delta = pc_ - rinfo.pc() - 8; 2702 int delta = pc_ - rinfo.pc() - 8;
2635 ASSERT(delta >= -4); // instr could be ldr pc, [pc, #-4] followed by targ32 2703 ASSERT(delta >= -4); // instr could be ldr pc, [pc, #-4] followed by targ32
2636 if (delta < 0) { 2704 if (delta < 0) {
2637 instr &= ~U; 2705 instr &= ~U;
2638 delta = -delta; 2706 delta = -delta;
2639 } 2707 }
2640 ASSERT(is_uint12(delta)); 2708 ASSERT(is_uint12(delta));
2641 instr_at_put(rinfo.pc(), instr + delta); 2709 instr_at_put(rinfo.pc(), instr + delta);
2642 emit(rinfo.data()); 2710 emit(rinfo.data());
2643 } 2711 }
2644 num_prinfo_ = 0; 2712 num_prinfo_ = 0;
2713 RecordComment("]");
2645 last_const_pool_end_ = pc_offset(); 2714 last_const_pool_end_ = pc_offset();
2646 2715
2647 RecordComment("]"); 2716 RecordComment("]");
2648 2717
2649 if (after_pool.is_linked()) { 2718 if (after_pool.is_linked()) {
2650 bind(&after_pool); 2719 bind(&after_pool);
2651 } 2720 }
2721 ASSERT(constant_pool_size == (pc_offset() - start_of_const_pool));
2652 2722
2653 // Since a constant pool was just emitted, move the check offset forward by 2723 // Since a constant pool was just emitted, move the check offset forward by
2654 // the standard interval. 2724 // the standard interval.
2655 next_buffer_check_ = pc_offset() + kCheckConstInterval; 2725 next_buffer_check_ = pc_offset() + kCheckConstInterval;
2656 } 2726 }
2657 2727
2658 2728
2659 } } // namespace v8::internal 2729 } } // namespace v8::internal
2660 2730
2661 #endif // V8_TARGET_ARCH_ARM 2731 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « src/arm/assembler-arm.h ('k') | src/arm/constants-arm.h » ('j') | src/arm/lithium-codegen-arm.cc » ('J')

Powered by Google App Engine
This is Rietveld 408576698