OLD | NEW |
1 // Copyright (c) 1994-2006 Sun Microsystems Inc. | 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. |
2 // All Rights Reserved. | 2 // All Rights Reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions | 5 // modification, are permitted provided that the following conditions |
6 // are met: | 6 // are met: |
7 // | 7 // |
8 // - Redistributions of source code must retain the above copyright notice, | 8 // - Redistributions of source code must retain the above copyright notice, |
9 // this list of conditions and the following disclaimer. | 9 // this list of conditions and the following disclaimer. |
10 // | 10 // |
(...skipping 302 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
313 ASSERT(buffer_size > 0); | 313 ASSERT(buffer_size > 0); |
314 buffer_ = static_cast<byte*>(buffer); | 314 buffer_ = static_cast<byte*>(buffer); |
315 buffer_size_ = buffer_size; | 315 buffer_size_ = buffer_size; |
316 own_buffer_ = false; | 316 own_buffer_ = false; |
317 } | 317 } |
318 | 318 |
319 // Setup buffer pointers. | 319 // Setup buffer pointers. |
320 ASSERT(buffer_ != NULL); | 320 ASSERT(buffer_ != NULL); |
321 pc_ = buffer_; | 321 pc_ = buffer_; |
322 reloc_info_writer.Reposition(buffer_ + buffer_size, pc_); | 322 reloc_info_writer.Reposition(buffer_ + buffer_size, pc_); |
323 num_prinfo_ = 0; | 323 num_pending_reloc_info_ = 0; |
324 next_buffer_check_ = 0; | 324 next_buffer_check_ = 0; |
325 const_pool_blocked_nesting_ = 0; | 325 const_pool_blocked_nesting_ = 0; |
326 no_const_pool_before_ = 0; | 326 no_const_pool_before_ = 0; |
327 last_const_pool_end_ = 0; | 327 first_const_pool_use_ = -1; |
328 last_bound_pos_ = 0; | 328 last_bound_pos_ = 0; |
329 ast_id_for_reloc_info_ = kNoASTId; | 329 ast_id_for_reloc_info_ = kNoASTId; |
330 } | 330 } |
331 | 331 |
332 | 332 |
333 Assembler::~Assembler() { | 333 Assembler::~Assembler() { |
334 ASSERT(const_pool_blocked_nesting_ == 0); | 334 ASSERT(const_pool_blocked_nesting_ == 0); |
335 if (own_buffer_) { | 335 if (own_buffer_) { |
336 if (isolate()->assembler_spare_buffer() == NULL && | 336 if (isolate()->assembler_spare_buffer() == NULL && |
337 buffer_size_ == kMinimalBufferSize) { | 337 buffer_size_ == kMinimalBufferSize) { |
338 isolate()->set_assembler_spare_buffer(buffer_); | 338 isolate()->set_assembler_spare_buffer(buffer_); |
339 } else { | 339 } else { |
340 DeleteArray(buffer_); | 340 DeleteArray(buffer_); |
341 } | 341 } |
342 } | 342 } |
343 } | 343 } |
344 | 344 |
345 | 345 |
346 void Assembler::GetCode(CodeDesc* desc) { | 346 void Assembler::GetCode(CodeDesc* desc) { |
347 // Emit constant pool if necessary. | 347 // Emit constant pool if necessary. |
348 CheckConstPool(true, false); | 348 CheckConstPool(true, false); |
349 ASSERT(num_prinfo_ == 0); | 349 ASSERT(num_pending_reloc_info_ == 0); |
350 | 350 |
351 // Setup code descriptor. | 351 // Setup code descriptor. |
352 desc->buffer = buffer_; | 352 desc->buffer = buffer_; |
353 desc->buffer_size = buffer_size_; | 353 desc->buffer_size = buffer_size_; |
354 desc->instr_size = pc_offset(); | 354 desc->instr_size = pc_offset(); |
355 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos(); | 355 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos(); |
356 } | 356 } |
357 | 357 |
358 | 358 |
359 void Assembler::Align(int m) { | 359 void Assembler::Align(int m) { |
(...skipping 506 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
866 // Immediate shift. | 866 // Immediate shift. |
867 instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code(); | 867 instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code(); |
868 } else { | 868 } else { |
869 // Register shift. | 869 // Register shift. |
870 ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc)); | 870 ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc)); |
871 instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code(); | 871 instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code(); |
872 } | 872 } |
873 emit(instr | rn.code()*B16 | rd.code()*B12); | 873 emit(instr | rn.code()*B16 | rd.code()*B12); |
874 if (rn.is(pc) || x.rm_.is(pc)) { | 874 if (rn.is(pc) || x.rm_.is(pc)) { |
875 // Block constant pool emission for one instruction after reading pc. | 875 // Block constant pool emission for one instruction after reading pc. |
876 BlockConstPoolBefore(pc_offset() + kInstrSize); | 876 BlockConstPoolFor(1); |
877 } | 877 } |
878 } | 878 } |
879 | 879 |
880 | 880 |
881 void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) { | 881 void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) { |
882 ASSERT((instr & ~(kCondMask | B | L)) == B26); | 882 ASSERT((instr & ~(kCondMask | B | L)) == B26); |
883 int am = x.am_; | 883 int am = x.am_; |
884 if (!x.rm_.is_valid()) { | 884 if (!x.rm_.is_valid()) { |
885 // Immediate offset. | 885 // Immediate offset. |
886 int offset_12 = x.offset_; | 886 int offset_12 = x.offset_; |
(...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
990 if (L->is_linked()) { | 990 if (L->is_linked()) { |
991 target_pos = L->pos(); // L's link | 991 target_pos = L->pos(); // L's link |
992 } else { | 992 } else { |
993 target_pos = kEndOfChain; | 993 target_pos = kEndOfChain; |
994 } | 994 } |
995 L->link_to(pc_offset()); | 995 L->link_to(pc_offset()); |
996 } | 996 } |
997 | 997 |
998 // Block the emission of the constant pool, since the branch instruction must | 998 // Block the emission of the constant pool, since the branch instruction must |
999 // be emitted at the pc offset recorded by the label. | 999 // be emitted at the pc offset recorded by the label. |
1000 BlockConstPoolBefore(pc_offset() + kInstrSize); | 1000 BlockConstPoolFor(1); |
1001 return target_pos - (pc_offset() + kPcLoadDelta); | 1001 return target_pos - (pc_offset() + kPcLoadDelta); |
1002 } | 1002 } |
1003 | 1003 |
1004 | 1004 |
1005 void Assembler::label_at_put(Label* L, int at_offset) { | 1005 void Assembler::label_at_put(Label* L, int at_offset) { |
1006 int target_pos; | 1006 int target_pos; |
1007 if (L->is_bound()) { | 1007 if (L->is_bound()) { |
1008 target_pos = L->pos(); | 1008 target_pos = L->pos(); |
1009 } else { | 1009 } else { |
1010 if (L->is_linked()) { | 1010 if (L->is_linked()) { |
(...skipping 475 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1486 addrmod4(cond | B27 | am, base, src); | 1486 addrmod4(cond | B27 | am, base, src); |
1487 } | 1487 } |
1488 | 1488 |
1489 | 1489 |
1490 // Exception-generating instructions and debugging support. | 1490 // Exception-generating instructions and debugging support. |
1491 // Stops with a non-negative code less than kNumOfWatchedStops support | 1491 // Stops with a non-negative code less than kNumOfWatchedStops support |
1492 // enabling/disabling and a counter feature. See simulator-arm.h . | 1492 // enabling/disabling and a counter feature. See simulator-arm.h . |
1493 void Assembler::stop(const char* msg, Condition cond, int32_t code) { | 1493 void Assembler::stop(const char* msg, Condition cond, int32_t code) { |
1494 #ifndef __arm__ | 1494 #ifndef __arm__ |
1495 ASSERT(code >= kDefaultStopCode); | 1495 ASSERT(code >= kDefaultStopCode); |
1496 // The Simulator will handle the stop instruction and get the message address. | 1496 { |
1497 // It expects to find the address just after the svc instruction. | 1497 // The Simulator will handle the stop instruction and get the message |
1498 BlockConstPoolFor(2); | 1498 // address. It expects to find the address just after the svc instruction. |
1499 if (code >= 0) { | 1499 BlockConstPoolScope block_const_pool(this); |
1500 svc(kStopCode + code, cond); | 1500 if (code >= 0) { |
1501 } else { | 1501 svc(kStopCode + code, cond); |
1502 svc(kStopCode + kMaxStopCode, cond); | 1502 } else { |
| 1503 svc(kStopCode + kMaxStopCode, cond); |
| 1504 } |
| 1505 emit(reinterpret_cast<Instr>(msg)); |
1503 } | 1506 } |
1504 emit(reinterpret_cast<Instr>(msg)); | |
1505 #else // def __arm__ | 1507 #else // def __arm__ |
1506 #ifdef CAN_USE_ARMV5_INSTRUCTIONS | 1508 #ifdef CAN_USE_ARMV5_INSTRUCTIONS |
1507 if (cond != al) { | 1509 if (cond != al) { |
1508 Label skip; | 1510 Label skip; |
1509 b(&skip, NegateCondition(cond)); | 1511 b(&skip, NegateCondition(cond)); |
1510 bkpt(0); | 1512 bkpt(0); |
1511 bind(&skip); | 1513 bind(&skip); |
1512 } else { | 1514 } else { |
1513 bkpt(0); | 1515 bkpt(0); |
1514 } | 1516 } |
(...skipping 884 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2399 } | 2401 } |
2400 | 2402 |
2401 | 2403 |
2402 bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) { | 2404 bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) { |
2403 uint32_t dummy1; | 2405 uint32_t dummy1; |
2404 uint32_t dummy2; | 2406 uint32_t dummy2; |
2405 return fits_shifter(imm32, &dummy1, &dummy2, NULL); | 2407 return fits_shifter(imm32, &dummy1, &dummy2, NULL); |
2406 } | 2408 } |
2407 | 2409 |
2408 | 2410 |
2409 void Assembler::BlockConstPoolFor(int instructions) { | |
2410 BlockConstPoolBefore(pc_offset() + instructions * kInstrSize); | |
2411 } | |
2412 | |
2413 | |
2414 // Debugging. | 2411 // Debugging. |
2415 void Assembler::RecordJSReturn() { | 2412 void Assembler::RecordJSReturn() { |
2416 positions_recorder()->WriteRecordedPositions(); | 2413 positions_recorder()->WriteRecordedPositions(); |
2417 CheckBuffer(); | 2414 CheckBuffer(); |
2418 RecordRelocInfo(RelocInfo::JS_RETURN); | 2415 RecordRelocInfo(RelocInfo::JS_RETURN); |
2419 } | 2416 } |
2420 | 2417 |
2421 | 2418 |
2422 void Assembler::RecordDebugBreakSlot() { | 2419 void Assembler::RecordDebugBreakSlot() { |
2423 positions_recorder()->WriteRecordedPositions(); | 2420 positions_recorder()->WriteRecordedPositions(); |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2467 buffer_size_ = desc.buffer_size; | 2464 buffer_size_ = desc.buffer_size; |
2468 pc_ += pc_delta; | 2465 pc_ += pc_delta; |
2469 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta, | 2466 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta, |
2470 reloc_info_writer.last_pc() + pc_delta); | 2467 reloc_info_writer.last_pc() + pc_delta); |
2471 | 2468 |
2472 // None of our relocation types are pc relative pointing outside the code | 2469 // None of our relocation types are pc relative pointing outside the code |
2473 // buffer nor pc absolute pointing inside the code buffer, so there is no need | 2470 // buffer nor pc absolute pointing inside the code buffer, so there is no need |
2474 // to relocate any emitted relocation entries. | 2471 // to relocate any emitted relocation entries. |
2475 | 2472 |
2476 // Relocate pending relocation entries. | 2473 // Relocate pending relocation entries. |
2477 for (int i = 0; i < num_prinfo_; i++) { | 2474 for (int i = 0; i < num_pending_reloc_info_; i++) { |
2478 RelocInfo& rinfo = prinfo_[i]; | 2475 RelocInfo& rinfo = pending_reloc_info_[i]; |
2479 ASSERT(rinfo.rmode() != RelocInfo::COMMENT && | 2476 ASSERT(rinfo.rmode() != RelocInfo::COMMENT && |
2480 rinfo.rmode() != RelocInfo::POSITION); | 2477 rinfo.rmode() != RelocInfo::POSITION); |
2481 if (rinfo.rmode() != RelocInfo::JS_RETURN) { | 2478 if (rinfo.rmode() != RelocInfo::JS_RETURN) { |
2482 rinfo.set_pc(rinfo.pc() + pc_delta); | 2479 rinfo.set_pc(rinfo.pc() + pc_delta); |
2483 } | 2480 } |
2484 } | 2481 } |
2485 } | 2482 } |
2486 | 2483 |
2487 | 2484 |
2488 void Assembler::db(uint8_t data) { | 2485 void Assembler::db(uint8_t data) { |
2489 // No relocation info should be pending while using db. db is used | 2486 // No relocation info should be pending while using db. db is used |
2490 // to write pure data with no pointers and the constant pool should | 2487 // to write pure data with no pointers and the constant pool should |
2491 // be emitted before using db. | 2488 // be emitted before using db. |
2492 ASSERT(num_prinfo_ == 0); | 2489 ASSERT(num_pending_reloc_info_ == 0); |
2493 CheckBuffer(); | 2490 CheckBuffer(); |
2494 *reinterpret_cast<uint8_t*>(pc_) = data; | 2491 *reinterpret_cast<uint8_t*>(pc_) = data; |
2495 pc_ += sizeof(uint8_t); | 2492 pc_ += sizeof(uint8_t); |
2496 } | 2493 } |
2497 | 2494 |
2498 | 2495 |
2499 void Assembler::dd(uint32_t data) { | 2496 void Assembler::dd(uint32_t data) { |
2500 // No relocation info should be pending while using dd. dd is used | 2497 // No relocation info should be pending while using dd. dd is used |
2501 // to write pure data with no pointers and the constant pool should | 2498 // to write pure data with no pointers and the constant pool should |
2502 // be emitted before using dd. | 2499 // be emitted before using dd. |
2503 ASSERT(num_prinfo_ == 0); | 2500 ASSERT(num_pending_reloc_info_ == 0); |
2504 CheckBuffer(); | 2501 CheckBuffer(); |
2505 *reinterpret_cast<uint32_t*>(pc_) = data; | 2502 *reinterpret_cast<uint32_t*>(pc_) = data; |
2506 pc_ += sizeof(uint32_t); | 2503 pc_ += sizeof(uint32_t); |
2507 } | 2504 } |
2508 | 2505 |
2509 | 2506 |
2510 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { | 2507 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { |
2511 RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants | 2508 RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants |
2512 if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) { | 2509 if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) { |
2513 // Adjust code for new modes. | 2510 // Adjust code for new modes. |
2514 ASSERT(RelocInfo::IsDebugBreakSlot(rmode) | 2511 ASSERT(RelocInfo::IsDebugBreakSlot(rmode) |
2515 || RelocInfo::IsJSReturn(rmode) | 2512 || RelocInfo::IsJSReturn(rmode) |
2516 || RelocInfo::IsComment(rmode) | 2513 || RelocInfo::IsComment(rmode) |
2517 || RelocInfo::IsPosition(rmode)); | 2514 || RelocInfo::IsPosition(rmode)); |
2518 // These modes do not need an entry in the constant pool. | 2515 // These modes do not need an entry in the constant pool. |
2519 } else { | 2516 } else { |
2520 ASSERT(num_prinfo_ < kMaxNumPRInfo); | 2517 ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo); |
2521 prinfo_[num_prinfo_++] = rinfo; | 2518 if (num_pending_reloc_info_ == 0) { |
| 2519 first_const_pool_use_ = pc_offset(); |
| 2520 } |
| 2521 pending_reloc_info_[num_pending_reloc_info_++] = rinfo; |
2522 // Make sure the constant pool is not emitted in place of the next | 2522 // Make sure the constant pool is not emitted in place of the next |
2523 // instruction for which we just recorded relocation info. | 2523 // instruction for which we just recorded relocation info. |
2524 BlockConstPoolBefore(pc_offset() + kInstrSize); | 2524 BlockConstPoolFor(1); |
2525 } | 2525 } |
2526 if (rinfo.rmode() != RelocInfo::NONE) { | 2526 if (rinfo.rmode() != RelocInfo::NONE) { |
2527 // Don't record external references unless the heap will be serialized. | 2527 // Don't record external references unless the heap will be serialized. |
2528 if (rmode == RelocInfo::EXTERNAL_REFERENCE) { | 2528 if (rmode == RelocInfo::EXTERNAL_REFERENCE) { |
2529 #ifdef DEBUG | 2529 #ifdef DEBUG |
2530 if (!Serializer::enabled()) { | 2530 if (!Serializer::enabled()) { |
2531 Serializer::TooLateToEnableNow(); | 2531 Serializer::TooLateToEnableNow(); |
2532 } | 2532 } |
2533 #endif | 2533 #endif |
2534 if (!Serializer::enabled() && !emit_debug_code()) { | 2534 if (!Serializer::enabled() && !emit_debug_code()) { |
2535 return; | 2535 return; |
2536 } | 2536 } |
2537 } | 2537 } |
2538 ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here | 2538 ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here |
2539 if (rmode == RelocInfo::CODE_TARGET_WITH_ID) { | 2539 if (rmode == RelocInfo::CODE_TARGET_WITH_ID) { |
2540 ASSERT(ast_id_for_reloc_info_ != kNoASTId); | 2540 ASSERT(ast_id_for_reloc_info_ != kNoASTId); |
2541 RelocInfo reloc_info_with_ast_id(pc_, rmode, ast_id_for_reloc_info_); | 2541 RelocInfo reloc_info_with_ast_id(pc_, rmode, ast_id_for_reloc_info_); |
2542 ast_id_for_reloc_info_ = kNoASTId; | 2542 ast_id_for_reloc_info_ = kNoASTId; |
2543 reloc_info_writer.Write(&reloc_info_with_ast_id); | 2543 reloc_info_writer.Write(&reloc_info_with_ast_id); |
2544 } else { | 2544 } else { |
2545 reloc_info_writer.Write(&rinfo); | 2545 reloc_info_writer.Write(&rinfo); |
2546 } | 2546 } |
2547 } | 2547 } |
2548 } | 2548 } |
2549 | 2549 |
2550 | 2550 |
| 2551 void Assembler::BlockConstPoolFor(int instructions) { |
| 2552 int pc_limit = pc_offset() + instructions * kInstrSize; |
| 2553 if (no_const_pool_before_ < pc_limit) { |
| 2554 // If there are some pending entries, the constant pool cannot be blocked |
| 2555 // further than first_const_pool_use_ + kMaxDistToPool |
| 2556 ASSERT((num_pending_reloc_info_ == 0) || |
| 2557 (pc_limit < (first_const_pool_use_ + kMaxDistToPool))); |
| 2558 no_const_pool_before_ = pc_limit; |
| 2559 } |
| 2560 |
| 2561 if (next_buffer_check_ < no_const_pool_before_) { |
| 2562 next_buffer_check_ = no_const_pool_before_; |
| 2563 } |
| 2564 } |
| 2565 |
| 2566 |
2551 void Assembler::CheckConstPool(bool force_emit, bool require_jump) { | 2567 void Assembler::CheckConstPool(bool force_emit, bool require_jump) { |
2552 // Calculate the offset of the next check. It will be overwritten | 2568 // Some short sequence of instruction mustn't be broken up by constant pool |
2553 // when a const pool is generated or when const pools are being | 2569 // emission, such sequences are protected by calls to BlockConstPoolFor and |
2554 // blocked for a specific range. | 2570 // BlockConstPoolScope. |
2555 next_buffer_check_ = pc_offset() + kCheckConstInterval; | 2571 if (is_const_pool_blocked()) { |
2556 | |
2557 // There is nothing to do if there are no pending relocation info entries. | |
2558 if (num_prinfo_ == 0) return; | |
2559 | |
2560 // We emit a constant pool at regular intervals of about kDistBetweenPools | |
2561 // or when requested by parameter force_emit (e.g. after each function). | |
2562 // We prefer not to emit a jump unless the max distance is reached or if we | |
2563 // are running low on slots, which can happen if a lot of constants are being | |
2564 // emitted (e.g. --debug-code and many static references). | |
2565 int dist = pc_offset() - last_const_pool_end_; | |
2566 if (!force_emit && dist < kMaxDistBetweenPools && | |
2567 (require_jump || dist < kDistBetweenPools) && | |
2568 // TODO(1236125): Cleanup the "magic" number below. We know that | |
2569 // the code generation will test every kCheckConstIntervalInst. | |
2570 // Thus we are safe as long as we generate less than 7 constant | |
2571 // entries per instruction. | |
2572 (num_prinfo_ < (kMaxNumPRInfo - (7 * kCheckConstIntervalInst)))) { | |
2573 return; | |
2574 } | |
2575 | |
2576 // If we did not return by now, we need to emit the constant pool soon. | |
2577 | |
2578 // However, some small sequences of instructions must not be broken up by the | |
2579 // insertion of a constant pool; such sequences are protected by setting | |
2580 // either const_pool_blocked_nesting_ or no_const_pool_before_, which are | |
2581 // both checked here. Also, recursive calls to CheckConstPool are blocked by | |
2582 // no_const_pool_before_. | |
2583 if (const_pool_blocked_nesting_ > 0 || pc_offset() < no_const_pool_before_) { | |
2584 // Emission is currently blocked; make sure we try again as soon as | |
2585 // possible. | |
2586 if (const_pool_blocked_nesting_ > 0) { | |
2587 next_buffer_check_ = pc_offset() + kInstrSize; | |
2588 } else { | |
2589 next_buffer_check_ = no_const_pool_before_; | |
2590 } | |
2591 | |
2592 // Something is wrong if emission is forced and blocked at the same time. | 2572 // Something is wrong if emission is forced and blocked at the same time. |
2593 ASSERT(!force_emit); | 2573 ASSERT(!force_emit); |
2594 return; | 2574 return; |
2595 } | 2575 } |
2596 | 2576 |
2597 int jump_instr = require_jump ? kInstrSize : 0; | 2577 // There is nothing to do if there are no pending constant pool entries. |
| 2578 if (num_pending_reloc_info_ == 0) { |
| 2579 // Calculate the offset of the next check. |
| 2580 next_buffer_check_ = pc_offset() + kCheckPoolInterval; |
| 2581 return; |
| 2582 } |
| 2583 |
| 2584 // We emit a constant pool when: |
| 2585 // * requested to do so by parameter force_emit (e.g. after each function). |
| 2586 // * the distance to the first instruction accessing the constant pool is |
| 2587 // kAvgDistToPool or more. |
| 2588 // * no jump is required and the distance to the first instruction accessing |
| 2589 // the constant pool is at least kMaxDistToPool / 2. |
| 2590 ASSERT(first_const_pool_use_ >= 0); |
| 2591 int dist = pc_offset() - first_const_pool_use_; |
| 2592 if (!force_emit && dist < kAvgDistToPool && |
| 2593 (require_jump || (dist < (kMaxDistToPool / 2)))) { |
| 2594 return; |
| 2595 } |
2598 | 2596 |
2599 // Check that the code buffer is large enough before emitting the constant | 2597 // Check that the code buffer is large enough before emitting the constant |
2600 // pool and relocation information (include the jump over the pool and the | 2598 // pool (include the jump over the pool and the constant pool marker and |
2601 // constant pool marker). | 2599 // the gap to the relocation information). |
2602 int max_needed_space = | 2600 int jump_instr = require_jump ? kInstrSize : 0; |
2603 jump_instr + kInstrSize + num_prinfo_*(kInstrSize + kMaxRelocSize); | 2601 int needed_space = jump_instr + kInstrSize + |
2604 while (buffer_space() <= (max_needed_space + kGap)) GrowBuffer(); | 2602 num_pending_reloc_info_ * kInstrSize + kGap; |
| 2603 while (buffer_space() <= needed_space) GrowBuffer(); |
2605 | 2604 |
2606 // Block recursive calls to CheckConstPool. | 2605 { |
2607 BlockConstPoolBefore(pc_offset() + jump_instr + kInstrSize + | 2606 // Block recursive calls to CheckConstPool. |
2608 num_prinfo_*kInstrSize); | 2607 BlockConstPoolScope block_const_pool(this); |
2609 // Don't bother to check for the emit calls below. | |
2610 next_buffer_check_ = no_const_pool_before_; | |
2611 | 2608 |
2612 // Emit jump over constant pool if necessary. | 2609 // Emit jump over constant pool if necessary. |
2613 Label after_pool; | 2610 Label after_pool; |
2614 if (require_jump) b(&after_pool); | 2611 if (require_jump) { |
| 2612 b(&after_pool); |
| 2613 } |
2615 | 2614 |
2616 RecordComment("[ Constant Pool"); | 2615 RecordComment("[ Constant Pool"); |
2617 | 2616 |
2618 // Put down constant pool marker "Undefined instruction" as specified by | 2617 // Put down constant pool marker "Undefined instruction" as specified by |
2619 // A5.6 (ARMv7) Instruction set encoding. | 2618 // A5.6 (ARMv7) Instruction set encoding. |
2620 emit(kConstantPoolMarker | num_prinfo_); | 2619 emit(kConstantPoolMarker | num_pending_reloc_info_); |
2621 | 2620 |
2622 // Emit constant pool entries. | 2621 // Emit constant pool entries. |
2623 for (int i = 0; i < num_prinfo_; i++) { | 2622 for (int i = 0; i < num_pending_reloc_info_; i++) { |
2624 RelocInfo& rinfo = prinfo_[i]; | 2623 RelocInfo& rinfo = pending_reloc_info_[i]; |
2625 ASSERT(rinfo.rmode() != RelocInfo::COMMENT && | 2624 ASSERT(rinfo.rmode() != RelocInfo::COMMENT && |
2626 rinfo.rmode() != RelocInfo::POSITION && | 2625 rinfo.rmode() != RelocInfo::POSITION && |
2627 rinfo.rmode() != RelocInfo::STATEMENT_POSITION); | 2626 rinfo.rmode() != RelocInfo::STATEMENT_POSITION); |
2628 Instr instr = instr_at(rinfo.pc()); | |
2629 | 2627 |
2630 // Instruction to patch must be a ldr/str [pc, #offset]. | 2628 Instr instr = instr_at(rinfo.pc()); |
2631 // P and U set, B and W clear, Rn == pc, offset12 still 0. | 2629 // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0. |
2632 ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | kOff12Mask)) == | 2630 ASSERT(IsLdrPcImmediateOffset(instr) && |
2633 (2*B25 | P | U | pc.code()*B16)); | 2631 GetLdrRegisterImmediateOffset(instr) == 0); |
2634 int delta = pc_ - rinfo.pc() - 8; | 2632 |
2635 ASSERT(delta >= -4); // instr could be ldr pc, [pc, #-4] followed by targ32 | 2633 int delta = pc_ - rinfo.pc() - kPcLoadDelta; |
2636 if (delta < 0) { | 2634 // 0 is the smallest delta: |
2637 instr &= ~U; | 2635 // ldr rd, [pc, #0] |
2638 delta = -delta; | 2636 // constant pool marker |
| 2637 // data |
| 2638 ASSERT(is_uint12(delta)); |
| 2639 |
| 2640 instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta)); |
| 2641 emit(rinfo.data()); |
2639 } | 2642 } |
2640 ASSERT(is_uint12(delta)); | |
2641 instr_at_put(rinfo.pc(), instr + delta); | |
2642 emit(rinfo.data()); | |
2643 } | |
2644 num_prinfo_ = 0; | |
2645 last_const_pool_end_ = pc_offset(); | |
2646 | 2643 |
2647 RecordComment("]"); | 2644 num_pending_reloc_info_ = 0; |
| 2645 first_const_pool_use_ = -1; |
2648 | 2646 |
2649 if (after_pool.is_linked()) { | 2647 RecordComment("]"); |
2650 bind(&after_pool); | 2648 |
| 2649 if (after_pool.is_linked()) { |
| 2650 bind(&after_pool); |
| 2651 } |
2651 } | 2652 } |
2652 | 2653 |
2653 // Since a constant pool was just emitted, move the check offset forward by | 2654 // Since a constant pool was just emitted, move the check offset forward by |
2654 // the standard interval. | 2655 // the standard interval. |
2655 next_buffer_check_ = pc_offset() + kCheckConstInterval; | 2656 next_buffer_check_ = pc_offset() + kCheckPoolInterval; |
2656 } | 2657 } |
2657 | 2658 |
2658 | 2659 |
2659 } } // namespace v8::internal | 2660 } } // namespace v8::internal |
2660 | 2661 |
2661 #endif // V8_TARGET_ARCH_ARM | 2662 #endif // V8_TARGET_ARCH_ARM |
OLD | NEW |