Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Side by Side Diff: src/arm/assembler-arm.cc

Issue 61763025: ARM: Merge redundant entries in literal pool. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Review comments addressed. Created 7 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/assembler-arm.h ('k') | src/assembler.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 1994-2006 Sun Microsystems Inc. 1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved. 2 // All Rights Reserved.
3 // 3 //
4 // Redistribution and use in source and binary forms, with or without 4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions 5 // modification, are permitted provided that the following conditions
6 // are met: 6 // are met:
7 // 7 //
8 // - Redistributions of source code must retain the above copyright notice, 8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer. 9 // this list of conditions and the following disclaimer.
10 // 10 //
(...skipping 499 matching lines...) Expand 10 before | Expand all | Expand 10 after
510 const Instr kLdrStrInstrTypeMask = 0xffff0000; 510 const Instr kLdrStrInstrTypeMask = 0xffff0000;
511 const Instr kLdrStrInstrArgumentMask = 0x0000ffff; 511 const Instr kLdrStrInstrArgumentMask = 0x0000ffff;
512 const Instr kLdrStrOffsetMask = 0x00000fff; 512 const Instr kLdrStrOffsetMask = 0x00000fff;
513 513
514 514
515 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size) 515 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
516 : AssemblerBase(isolate, buffer, buffer_size), 516 : AssemblerBase(isolate, buffer, buffer_size),
517 recorded_ast_id_(TypeFeedbackId::None()), 517 recorded_ast_id_(TypeFeedbackId::None()),
518 positions_recorder_(this) { 518 positions_recorder_(this) {
519 reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_); 519 reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
520 num_pending_reloc_info_ = 0; 520 num_pending_32_bit_reloc_info_ = 0;
521 num_pending_64_bit_reloc_info_ = 0; 521 num_pending_64_bit_reloc_info_ = 0;
522 next_buffer_check_ = 0; 522 next_buffer_check_ = 0;
523 const_pool_blocked_nesting_ = 0; 523 const_pool_blocked_nesting_ = 0;
524 no_const_pool_before_ = 0; 524 no_const_pool_before_ = 0;
525 first_const_pool_use_ = -1; 525 first_const_pool_32_use_ = -1;
526 first_const_pool_64_use_ = -1;
526 last_bound_pos_ = 0; 527 last_bound_pos_ = 0;
527 ClearRecordedAstId(); 528 ClearRecordedAstId();
528 } 529 }
529 530
530 531
531 Assembler::~Assembler() { 532 Assembler::~Assembler() {
532 ASSERT(const_pool_blocked_nesting_ == 0); 533 ASSERT(const_pool_blocked_nesting_ == 0);
533 } 534 }
534 535
535 536
536 void Assembler::GetCode(CodeDesc* desc) { 537 void Assembler::GetCode(CodeDesc* desc) {
537 // Emit constant pool if necessary. 538 // Emit constant pool if necessary.
538 CheckConstPool(true, false); 539 CheckConstPool(true, false);
539 ASSERT(num_pending_reloc_info_ == 0); 540 ASSERT(num_pending_32_bit_reloc_info_ == 0);
540 ASSERT(num_pending_64_bit_reloc_info_ == 0); 541 ASSERT(num_pending_64_bit_reloc_info_ == 0);
541 542
542 // Set up code descriptor. 543 // Set up code descriptor.
543 desc->buffer = buffer_; 544 desc->buffer = buffer_;
544 desc->buffer_size = buffer_size_; 545 desc->buffer_size = buffer_size_;
545 desc->instr_size = pc_offset(); 546 desc->instr_size = pc_offset();
546 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos(); 547 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
547 } 548 }
548 549
549 550
(...skipping 2592 matching lines...) Expand 10 before | Expand all | Expand 10 after
3142 buffer_size_ = desc.buffer_size; 3143 buffer_size_ = desc.buffer_size;
3143 pc_ += pc_delta; 3144 pc_ += pc_delta;
3144 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta, 3145 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
3145 reloc_info_writer.last_pc() + pc_delta); 3146 reloc_info_writer.last_pc() + pc_delta);
3146 3147
3147 // None of our relocation types are pc relative pointing outside the code 3148 // None of our relocation types are pc relative pointing outside the code
3148 // buffer nor pc absolute pointing inside the code buffer, so there is no need 3149 // buffer nor pc absolute pointing inside the code buffer, so there is no need
3149 // to relocate any emitted relocation entries. 3150 // to relocate any emitted relocation entries.
3150 3151
3151 // Relocate pending relocation entries. 3152 // Relocate pending relocation entries.
3152 for (int i = 0; i < num_pending_reloc_info_; i++) { 3153 for (int i = 0; i < num_pending_32_bit_reloc_info_; i++) {
3153 RelocInfo& rinfo = pending_reloc_info_[i]; 3154 RelocInfo& rinfo = pending_32_bit_reloc_info_[i];
3154 ASSERT(rinfo.rmode() != RelocInfo::COMMENT && 3155 ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
3155 rinfo.rmode() != RelocInfo::POSITION); 3156 rinfo.rmode() != RelocInfo::POSITION);
3156 if (rinfo.rmode() != RelocInfo::JS_RETURN) { 3157 if (rinfo.rmode() != RelocInfo::JS_RETURN) {
3157 rinfo.set_pc(rinfo.pc() + pc_delta); 3158 rinfo.set_pc(rinfo.pc() + pc_delta);
3158 } 3159 }
3159 } 3160 }
3161 for (int i = 0; i < num_pending_64_bit_reloc_info_; i++) {
3162 RelocInfo& rinfo = pending_64_bit_reloc_info_[i];
3163 ASSERT(rinfo.rmode() == RelocInfo::NONE64);
3164 rinfo.set_pc(rinfo.pc() + pc_delta);
3165 }
3160 } 3166 }
3161 3167
3162 3168
3163 void Assembler::db(uint8_t data) { 3169 void Assembler::db(uint8_t data) {
3164 // No relocation info should be pending while using db. db is used 3170 // No relocation info should be pending while using db. db is used
3165 // to write pure data with no pointers and the constant pool should 3171 // to write pure data with no pointers and the constant pool should
3166 // be emitted before using db. 3172 // be emitted before using db.
3167 ASSERT(num_pending_reloc_info_ == 0); 3173 ASSERT(num_pending_32_bit_reloc_info_ == 0);
3168 ASSERT(num_pending_64_bit_reloc_info_ == 0); 3174 ASSERT(num_pending_64_bit_reloc_info_ == 0);
3169 CheckBuffer(); 3175 CheckBuffer();
3170 *reinterpret_cast<uint8_t*>(pc_) = data; 3176 *reinterpret_cast<uint8_t*>(pc_) = data;
3171 pc_ += sizeof(uint8_t); 3177 pc_ += sizeof(uint8_t);
3172 } 3178 }
3173 3179
3174 3180
3175 void Assembler::dd(uint32_t data) { 3181 void Assembler::dd(uint32_t data) {
3176 // No relocation info should be pending while using dd. dd is used 3182 // No relocation info should be pending while using dd. dd is used
3177 // to write pure data with no pointers and the constant pool should 3183 // to write pure data with no pointers and the constant pool should
3178 // be emitted before using dd. 3184 // be emitted before using dd.
3179 ASSERT(num_pending_reloc_info_ == 0); 3185 ASSERT(num_pending_32_bit_reloc_info_ == 0);
3180 ASSERT(num_pending_64_bit_reloc_info_ == 0); 3186 ASSERT(num_pending_64_bit_reloc_info_ == 0);
3181 CheckBuffer(); 3187 CheckBuffer();
3182 *reinterpret_cast<uint32_t*>(pc_) = data; 3188 *reinterpret_cast<uint32_t*>(pc_) = data;
3183 pc_ += sizeof(uint32_t); 3189 pc_ += sizeof(uint32_t);
3184 } 3190 }
3185 3191
3186 3192
3187 void Assembler::emit_code_stub_address(Code* stub) { 3193 void Assembler::emit_code_stub_address(Code* stub) {
3188 CheckBuffer(); 3194 CheckBuffer();
3189 *reinterpret_cast<uint32_t*>(pc_) = 3195 *reinterpret_cast<uint32_t*>(pc_) =
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
3239 3245
3240 3246
3241 void Assembler::RecordRelocInfo(double data) { 3247 void Assembler::RecordRelocInfo(double data) {
3242 // We do not try to reuse pool constants. 3248 // We do not try to reuse pool constants.
3243 RelocInfo rinfo(pc_, data); 3249 RelocInfo rinfo(pc_, data);
3244 RecordRelocInfoConstantPoolEntryHelper(rinfo); 3250 RecordRelocInfoConstantPoolEntryHelper(rinfo);
3245 } 3251 }
3246 3252
3247 3253
3248 void Assembler::RecordRelocInfoConstantPoolEntryHelper(const RelocInfo& rinfo) { 3254 void Assembler::RecordRelocInfoConstantPoolEntryHelper(const RelocInfo& rinfo) {
3249 ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo); 3255 if (rinfo.rmode() == RelocInfo::NONE64) {
3250 if (num_pending_reloc_info_ == 0) { 3256 ASSERT(num_pending_64_bit_reloc_info_ < kMaxNumPending64RelocInfo);
3251 first_const_pool_use_ = pc_offset(); 3257 if (num_pending_64_bit_reloc_info_ == 0) {
3258 first_const_pool_64_use_ = pc_offset();
3259 }
3260 pending_64_bit_reloc_info_[num_pending_64_bit_reloc_info_++] = rinfo;
3261 } else {
3262 ASSERT(num_pending_32_bit_reloc_info_ < kMaxNumPending32RelocInfo);
3263 if (num_pending_32_bit_reloc_info_ == 0) {
3264 first_const_pool_32_use_ = pc_offset();
3265 }
3266 pending_32_bit_reloc_info_[num_pending_32_bit_reloc_info_++] = rinfo;
3252 } 3267 }
3253 pending_reloc_info_[num_pending_reloc_info_++] = rinfo;
3254 if (rinfo.rmode() == RelocInfo::NONE64) {
3255 ++num_pending_64_bit_reloc_info_;
3256 }
3257 ASSERT(num_pending_64_bit_reloc_info_ <= num_pending_reloc_info_);
3258 // Make sure the constant pool is not emitted in place of the next 3268 // Make sure the constant pool is not emitted in place of the next
3259 // instruction for which we just recorded relocation info. 3269 // instruction for which we just recorded relocation info.
3260 BlockConstPoolFor(1); 3270 BlockConstPoolFor(1);
3261 } 3271 }
3262 3272
3263 3273
3264 void Assembler::BlockConstPoolFor(int instructions) { 3274 void Assembler::BlockConstPoolFor(int instructions) {
3265 int pc_limit = pc_offset() + instructions * kInstrSize; 3275 int pc_limit = pc_offset() + instructions * kInstrSize;
3266 if (no_const_pool_before_ < pc_limit) { 3276 if (no_const_pool_before_ < pc_limit) {
3267 // If there are some pending entries, the constant pool cannot be blocked 3277 // Max pool start (if we need a jump and an alignment).
3268 // further than constant pool instruction's reach. 3278 #ifdef DEBUG
3269 ASSERT((num_pending_reloc_info_ == 0) || 3279 int start = pc_limit + kInstrSize + 2 * kPointerSize;
3270 (pc_limit - first_const_pool_use_ < kMaxDistToIntPool)); 3280 ASSERT((num_pending_32_bit_reloc_info_ == 0) ||
3271 // TODO(jfb) Also check 64-bit entries are in range (requires splitting 3281 (start - first_const_pool_32_use_ +
3272 // them up from 32-bit entries). 3282 num_pending_64_bit_reloc_info_ * kDoubleSize < kMaxDistToIntPool));
3283 ASSERT((num_pending_64_bit_reloc_info_ == 0) ||
3284 (start - first_const_pool_64_use_ < kMaxDistToFPPool));
3285 #endif
3273 no_const_pool_before_ = pc_limit; 3286 no_const_pool_before_ = pc_limit;
3274 } 3287 }
3275 3288
3276 if (next_buffer_check_ < no_const_pool_before_) { 3289 if (next_buffer_check_ < no_const_pool_before_) {
3277 next_buffer_check_ = no_const_pool_before_; 3290 next_buffer_check_ = no_const_pool_before_;
3278 } 3291 }
3279 } 3292 }
3280 3293
3281 3294
3282 void Assembler::CheckConstPool(bool force_emit, bool require_jump) { 3295 void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
3283 // Some short sequence of instruction mustn't be broken up by constant pool 3296 // Some short sequence of instruction mustn't be broken up by constant pool
3284 // emission, such sequences are protected by calls to BlockConstPoolFor and 3297 // emission, such sequences are protected by calls to BlockConstPoolFor and
3285 // BlockConstPoolScope. 3298 // BlockConstPoolScope.
3286 if (is_const_pool_blocked()) { 3299 if (is_const_pool_blocked()) {
3287 // Something is wrong if emission is forced and blocked at the same time. 3300 // Something is wrong if emission is forced and blocked at the same time.
3288 ASSERT(!force_emit); 3301 ASSERT(!force_emit);
3289 return; 3302 return;
3290 } 3303 }
3291 3304
3292 // There is nothing to do if there are no pending constant pool entries. 3305 // There is nothing to do if there are no pending constant pool entries.
3293 if (num_pending_reloc_info_ == 0) { 3306 if ((num_pending_32_bit_reloc_info_ == 0) &&
3294 ASSERT(num_pending_64_bit_reloc_info_ == 0); 3307 (num_pending_64_bit_reloc_info_ == 0)) {
3295 // Calculate the offset of the next check. 3308 // Calculate the offset of the next check.
3296 next_buffer_check_ = pc_offset() + kCheckPoolInterval; 3309 next_buffer_check_ = pc_offset() + kCheckPoolInterval;
3297 return; 3310 return;
3298 } 3311 }
3299 3312
3300 // Check that the code buffer is large enough before emitting the constant 3313 // Check that the code buffer is large enough before emitting the constant
3301 // pool (include the jump over the pool and the constant pool marker and 3314 // pool (include the jump over the pool and the constant pool marker and
3302 // the gap to the relocation information). 3315 // the gap to the relocation information).
3303 // Note 64-bit values are wider, and the first one needs to be 64-bit aligned.
3304 int jump_instr = require_jump ? kInstrSize : 0; 3316 int jump_instr = require_jump ? kInstrSize : 0;
3305 int size_up_to_marker = jump_instr + kInstrSize; 3317 int size_up_to_marker = jump_instr + kInstrSize;
3306 int size_after_marker = num_pending_reloc_info_ * kPointerSize; 3318 int size_after_marker = num_pending_32_bit_reloc_info_ * kPointerSize;
3307 bool has_fp_values = (num_pending_64_bit_reloc_info_ > 0); 3319 bool has_fp_values = (num_pending_64_bit_reloc_info_ > 0);
3308 // 64-bit values must be 64-bit aligned. 3320 bool require_64_bit_align = false;
3309 // We'll start emitting at PC: branch+marker, then 32-bit values, then 3321 if (has_fp_values) {
3310 // 64-bit values which might need to be aligned. 3322 require_64_bit_align = (((uintptr_t)pc_ + size_up_to_marker) & 0x7);
3311 bool require_64_bit_align = has_fp_values && 3323 if (require_64_bit_align) {
3312 (((uintptr_t)pc_ + size_up_to_marker + size_after_marker) & 0x3); 3324 size_after_marker += kInstrSize;
3313 if (require_64_bit_align) { 3325 }
3314 size_after_marker += kInstrSize; 3326 size_after_marker += num_pending_64_bit_reloc_info_ * kDoubleSize;
3315 } 3327 }
3316 // num_pending_reloc_info_ also contains 64-bit entries, the above code
3317 // therefore already counted half of the size for 64-bit entries. Add the
3318 // remaining size.
3319 STATIC_ASSERT(kPointerSize == kDoubleSize / 2);
3320 size_after_marker += num_pending_64_bit_reloc_info_ * (kDoubleSize / 2);
3321 3328
3322 int size = size_up_to_marker + size_after_marker; 3329 int size = size_up_to_marker + size_after_marker;
3323 3330
3324 // We emit a constant pool when: 3331 // We emit a constant pool when:
3325 // * requested to do so by parameter force_emit (e.g. after each function). 3332 // * requested to do so by parameter force_emit (e.g. after each function).
3326 // * the distance from the first instruction accessing the constant pool to 3333 // * the distance from the first instruction accessing the constant pool to
3327 // any of the constant pool entries will exceed its limit the next 3334 // any of the constant pool entries will exceed its limit the next
3328 // time the pool is checked. This is overly restrictive, but we don't emit 3335 // time the pool is checked. This is overly restrictive, but we don't emit
3329 // constant pool entries in-order so it's conservatively correct. 3336 // constant pool entries in-order so it's conservatively correct.
3330 // * the instruction doesn't require a jump after itself to jump over the 3337 // * the instruction doesn't require a jump after itself to jump over the
3331 // constant pool, and we're getting close to running out of range. 3338 // constant pool, and we're getting close to running out of range.
3332 if (!force_emit) { 3339 if (!force_emit) {
3333 ASSERT((first_const_pool_use_ >= 0) && (num_pending_reloc_info_ > 0)); 3340 ASSERT((first_const_pool_32_use_ >= 0) || (first_const_pool_64_use_ >= 0));
3334 int dist = pc_offset() + size - first_const_pool_use_; 3341 bool need_emit = false;
3335 if (has_fp_values) { 3342 if (has_fp_values) {
3336 if ((dist < kMaxDistToFPPool - kCheckPoolInterval) && 3343 int dist64 = pc_offset() +
3337 (require_jump || (dist < kMaxDistToFPPool / 2))) { 3344 size -
3338 return; 3345 num_pending_32_bit_reloc_info_ * kPointerSize -
3339 } 3346 first_const_pool_64_use_;
3340 } else { 3347 if ((dist64 >= kMaxDistToFPPool - kCheckPoolInterval) ||
3341 if ((dist < kMaxDistToIntPool - kCheckPoolInterval) && 3348 (!require_jump && (dist64 >= kMaxDistToFPPool / 2))) {
3342 (require_jump || (dist < kMaxDistToIntPool / 2))) { 3349 need_emit = true;
3343 return;
3344 } 3350 }
3345 } 3351 }
3352 int dist32 =
3353 pc_offset() + size - first_const_pool_32_use_;
3354 if ((dist32 >= kMaxDistToIntPool - kCheckPoolInterval) ||
3355 (!require_jump && (dist32 >= kMaxDistToIntPool / 2))) {
3356 need_emit = true;
3357 }
3358 if (!need_emit) return;
3346 } 3359 }
3347 3360
3348 int needed_space = size + kGap; 3361 int needed_space = size + kGap;
3349 while (buffer_space() <= needed_space) GrowBuffer(); 3362 while (buffer_space() <= needed_space) GrowBuffer();
3350 3363
3351 { 3364 {
3352 // Block recursive calls to CheckConstPool. 3365 // Block recursive calls to CheckConstPool.
3353 BlockConstPoolScope block_const_pool(this); 3366 BlockConstPoolScope block_const_pool(this);
3354 RecordComment("[ Constant Pool"); 3367 RecordComment("[ Constant Pool");
3355 RecordConstPool(size); 3368 RecordConstPool(size);
3356 3369
3357 // Emit jump over constant pool if necessary. 3370 // Emit jump over constant pool if necessary.
3358 Label after_pool; 3371 Label after_pool;
3359 if (require_jump) { 3372 if (require_jump) {
3360 b(&after_pool); 3373 b(&after_pool);
3361 } 3374 }
3362 3375
3363 // Put down constant pool marker "Undefined instruction". 3376 // Put down constant pool marker "Undefined instruction".
3364 // The data size helps disassembly know what to print. 3377 // The data size helps disassembly know what to print.
3365 emit(kConstantPoolMarker | 3378 emit(kConstantPoolMarker |
3366 EncodeConstantPoolLength(size_after_marker / kPointerSize)); 3379 EncodeConstantPoolLength(size_after_marker / kPointerSize));
3367 3380
3368 if (require_64_bit_align) { 3381 if (require_64_bit_align) {
3369 emit(kConstantPoolMarker); 3382 emit(kConstantPoolMarker);
3370 } 3383 }
3371 3384
3372 // Emit 64-bit constant pool entries first: their range is smaller than 3385 // Emit 64-bit constant pool entries first: their range is smaller than
3373 // 32-bit entries. 3386 // 32-bit entries.
3374 for (int i = 0; i < num_pending_reloc_info_; i++) { 3387 for (int i = 0; i < num_pending_64_bit_reloc_info_; i++) {
3375 RelocInfo& rinfo = pending_reloc_info_[i]; 3388 RelocInfo& rinfo = pending_64_bit_reloc_info_[i];
3376 3389
3377 if (rinfo.rmode() != RelocInfo::NONE64) { 3390 ASSERT(!((uintptr_t)pc_ & 0x7)); // Check 64-bit alignment.
3378 // 32-bit values emitted later.
3379 continue;
3380 }
3381
3382 ASSERT(!((uintptr_t)pc_ & 0x3)); // Check 64-bit alignment.
3383 3391
3384 Instr instr = instr_at(rinfo.pc()); 3392 Instr instr = instr_at(rinfo.pc());
3385 // Instruction to patch must be 'vldr rd, [pc, #offset]' with offset == 0. 3393 // Instruction to patch must be 'vldr rd, [pc, #offset]' with offset == 0.
3386 ASSERT((IsVldrDPcImmediateOffset(instr) && 3394 ASSERT((IsVldrDPcImmediateOffset(instr) &&
3387 GetVldrDRegisterImmediateOffset(instr) == 0)); 3395 GetVldrDRegisterImmediateOffset(instr) == 0));
3388 3396
3389 int delta = pc_ - rinfo.pc() - kPcLoadDelta; 3397 int delta = pc_ - rinfo.pc() - kPcLoadDelta;
3390 ASSERT(is_uint10(delta)); 3398 ASSERT(is_uint10(delta));
3391 3399
3400 bool found = false;
3401 uint64_t value = rinfo.raw_data64();
3402 for (int j = 0; j < i; j++) {
3403 RelocInfo& rinfo2 = pending_64_bit_reloc_info_[j];
3404 if (value == rinfo2.raw_data64()) {
3405 found = true;
3406 ASSERT(rinfo2.rmode() == RelocInfo::NONE64);
3407 Instr instr2 = instr_at(rinfo2.pc());
3408 ASSERT(IsVldrDPcImmediateOffset(instr2));
3409 delta = GetVldrDRegisterImmediateOffset(instr2);
3410 delta += rinfo2.pc() - rinfo.pc();
3411 break;
3412 }
3413 }
3414
3392 instr_at_put(rinfo.pc(), SetVldrDRegisterImmediateOffset(instr, delta)); 3415 instr_at_put(rinfo.pc(), SetVldrDRegisterImmediateOffset(instr, delta));
3393 3416
3394 const double double_data = rinfo.data64(); 3417 if (!found) {
3395 uint64_t uint_data = 0; 3418 uint64_t uint_data = rinfo.raw_data64();
3396 OS::MemCopy(&uint_data, &double_data, sizeof(double_data)); 3419 emit(uint_data & 0xFFFFFFFF);
3397 emit(uint_data & 0xFFFFFFFF); 3420 emit(uint_data >> 32);
3398 emit(uint_data >> 32); 3421 }
3399 } 3422 }
3400 3423
3401 // Emit 32-bit constant pool entries. 3424 // Emit 32-bit constant pool entries.
3402 for (int i = 0; i < num_pending_reloc_info_; i++) { 3425 for (int i = 0; i < num_pending_32_bit_reloc_info_; i++) {
3403 RelocInfo& rinfo = pending_reloc_info_[i]; 3426 RelocInfo& rinfo = pending_32_bit_reloc_info_[i];
3404 ASSERT(rinfo.rmode() != RelocInfo::COMMENT && 3427 ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
3405 rinfo.rmode() != RelocInfo::POSITION && 3428 rinfo.rmode() != RelocInfo::POSITION &&
3406 rinfo.rmode() != RelocInfo::STATEMENT_POSITION && 3429 rinfo.rmode() != RelocInfo::STATEMENT_POSITION &&
3407 rinfo.rmode() != RelocInfo::CONST_POOL); 3430 rinfo.rmode() != RelocInfo::CONST_POOL &&
3408 3431 rinfo.rmode() != RelocInfo::NONE64);
3409 if (rinfo.rmode() == RelocInfo::NONE64) {
3410 // 64-bit values emitted earlier.
3411 continue;
3412 }
3413 3432
3414 Instr instr = instr_at(rinfo.pc()); 3433 Instr instr = instr_at(rinfo.pc());
3415 3434
3416 // 64-bit loads shouldn't get here. 3435 // 64-bit loads shouldn't get here.
3417 ASSERT(!IsVldrDPcImmediateOffset(instr)); 3436 ASSERT(!IsVldrDPcImmediateOffset(instr));
3418 3437
3419 int delta = pc_ - rinfo.pc() - kPcLoadDelta;
3420 // 0 is the smallest delta:
3421 // ldr rd, [pc, #0]
3422 // constant pool marker
3423 // data
3424
3425 if (IsLdrPcImmediateOffset(instr) && 3438 if (IsLdrPcImmediateOffset(instr) &&
3426 GetLdrRegisterImmediateOffset(instr) == 0) { 3439 GetLdrRegisterImmediateOffset(instr) == 0) {
3440 int delta = pc_ - rinfo.pc() - kPcLoadDelta;
3427 ASSERT(is_uint12(delta)); 3441 ASSERT(is_uint12(delta));
3442 // 0 is the smallest delta:
3443 // ldr rd, [pc, #0]
3444 // constant pool marker
3445 // data
3446
3447 bool found = false;
3448 if (!Serializer::enabled() && (rinfo.rmode() >= RelocInfo::CELL)) {
3449 for (int j = 0; j < i; j++) {
3450 RelocInfo& rinfo2 = pending_32_bit_reloc_info_[j];
3451
3452 if ((rinfo2.data() == rinfo.data()) &&
3453 (rinfo2.rmode() == rinfo.rmode())) {
3454 Instr instr2 = instr_at(rinfo2.pc());
3455 if (IsLdrPcImmediateOffset(instr2)) {
3456 delta = GetLdrRegisterImmediateOffset(instr2);
3457 delta += rinfo2.pc() - rinfo.pc();
3458 found = true;
3459 break;
3460 }
3461 }
3462 }
3463 }
3464
3428 instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta)); 3465 instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta));
3429 emit(rinfo.data()); 3466
3467 if (!found) {
3468 emit(rinfo.data());
3469 }
3430 } else { 3470 } else {
3431 ASSERT(IsMovW(instr)); 3471 ASSERT(IsMovW(instr));
3432 emit(rinfo.data());
3433 } 3472 }
3434 } 3473 }
3435 3474
3436 num_pending_reloc_info_ = 0; 3475 num_pending_32_bit_reloc_info_ = 0;
3437 num_pending_64_bit_reloc_info_ = 0; 3476 num_pending_64_bit_reloc_info_ = 0;
3438 first_const_pool_use_ = -1; 3477 first_const_pool_32_use_ = -1;
3478 first_const_pool_64_use_ = -1;
3439 3479
3440 RecordComment("]"); 3480 RecordComment("]");
3441 3481
3442 if (after_pool.is_linked()) { 3482 if (after_pool.is_linked()) {
3443 bind(&after_pool); 3483 bind(&after_pool);
3444 } 3484 }
3445 } 3485 }
3446 3486
3447 // Since a constant pool was just emitted, move the check offset forward by 3487 // Since a constant pool was just emitted, move the check offset forward by
3448 // the standard interval. 3488 // the standard interval.
3449 next_buffer_check_ = pc_offset() + kCheckPoolInterval; 3489 next_buffer_check_ = pc_offset() + kCheckPoolInterval;
3450 } 3490 }
3451 3491
3452 3492
3453 } } // namespace v8::internal 3493 } } // namespace v8::internal
3454 3494
3455 #endif // V8_TARGET_ARCH_ARM 3495 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « src/arm/assembler-arm.h ('k') | src/assembler.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698