| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/codegen.h" | 5 #include "src/codegen.h" |
| 6 #include "src/deoptimizer.h" | 6 #include "src/deoptimizer.h" |
| 7 #include "src/full-codegen/full-codegen.h" | 7 #include "src/full-codegen/full-codegen.h" |
| 8 #include "src/register-configuration.h" | 8 #include "src/register-configuration.h" |
| 9 #include "src/safepoint-table.h" | 9 #include "src/safepoint-table.h" |
| 10 | 10 |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 59 Address prev_call_address = NULL; | 59 Address prev_call_address = NULL; |
| 60 #endif | 60 #endif |
| 61 // For each LLazyBailout instruction insert a call to the corresponding | 61 // For each LLazyBailout instruction insert a call to the corresponding |
| 62 // deoptimization entry. | 62 // deoptimization entry. |
| 63 for (int i = 0; i < deopt_data->DeoptCount(); i++) { | 63 for (int i = 0; i < deopt_data->DeoptCount(); i++) { |
| 64 if (deopt_data->Pc(i)->value() == -1) continue; | 64 if (deopt_data->Pc(i)->value() == -1) continue; |
| 65 Address call_address = code_start_address + deopt_data->Pc(i)->value(); | 65 Address call_address = code_start_address + deopt_data->Pc(i)->value(); |
| 66 Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY); | 66 Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY); |
| 67 // We need calls to have a predictable size in the unoptimized code, but | 67 // We need calls to have a predictable size in the unoptimized code, but |
| 68 // this is optimized code, so we don't have to have a predictable size. | 68 // this is optimized code, so we don't have to have a predictable size. |
| 69 int call_size_in_bytes = | 69 int call_size_in_bytes = MacroAssembler::CallDeoptimizerSize(); |
| 70 MacroAssembler::CallSizeNotPredictableCodeSize(isolate, | |
| 71 deopt_entry, | |
| 72 RelocInfo::NONE32); | |
| 73 int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize; | 70 int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize; |
| 74 DCHECK(call_size_in_bytes % Assembler::kInstrSize == 0); | 71 DCHECK(call_size_in_bytes % Assembler::kInstrSize == 0); |
| 75 DCHECK(call_size_in_bytes <= patch_size()); | 72 DCHECK(call_size_in_bytes <= patch_size()); |
| 76 CodePatcher patcher(isolate, call_address, call_size_in_words); | 73 CodePatcher patcher(isolate, call_address, call_size_in_words); |
| 77 patcher.masm()->Call(deopt_entry, RelocInfo::NONE32); | 74 patcher.masm()->CallDeoptimizer(deopt_entry); |
| 78 DCHECK(prev_call_address == NULL || | 75 DCHECK(prev_call_address == NULL || |
| 79 call_address >= prev_call_address + patch_size()); | 76 call_address >= prev_call_address + patch_size()); |
| 80 DCHECK(call_address + patch_size() <= code->instruction_end()); | 77 DCHECK(call_address + patch_size() <= code->instruction_end()); |
| 81 #ifdef DEBUG | 78 #ifdef DEBUG |
| 82 prev_call_address = call_address; | 79 prev_call_address = call_address; |
| 83 #endif | 80 #endif |
| 84 } | 81 } |
| 85 } | 82 } |
| 86 | 83 |
| 87 | 84 |
| (...skipping 212 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 300 __ pop(ip); // get continuation, leave pc on stack | 297 __ pop(ip); // get continuation, leave pc on stack |
| 301 __ pop(lr); | 298 __ pop(lr); |
| 302 __ Jump(ip); | 299 __ Jump(ip); |
| 303 __ stop("Unreachable."); | 300 __ stop("Unreachable."); |
| 304 } | 301 } |
| 305 | 302 |
| 306 | 303 |
| 307 void Deoptimizer::TableEntryGenerator::GeneratePrologue() { | 304 void Deoptimizer::TableEntryGenerator::GeneratePrologue() { |
| 308 // Create a sequence of deoptimization entries. | 305 // Create a sequence of deoptimization entries. |
| 309 // Note that registers are still live when jumping to an entry. | 306 // Note that registers are still live when jumping to an entry. |
| 310 Label done; | 307 |
| 311 for (int i = 0; i < count(); i++) { | 308 // We need to be able to generate immediates up to kMaxNumberOfEntries. On |
| 312 int start = masm()->pc_offset(); | 309 // ARMv7, we can use movw (with a maximum immediate of 0xffff). On ARMv6, we |
| 313 USE(start); | 310 // need two instructions. |
| 314 __ mov(ip, Operand(i)); | 311 STATIC_ASSERT((kMaxNumberOfEntries - 1) <= 0xffff); |
| 315 __ b(&done); | 312 if (CpuFeatures::IsSupported(ARMv7)) { |
| 316 DCHECK(masm()->pc_offset() - start == table_entry_size_); | 313 CpuFeatureScope scope(masm(), ARMv7); |
| 314 Label done; |
| 315 for (int i = 0; i < count(); i++) { |
| 316 int start = masm()->pc_offset(); |
| 317 USE(start); |
| 318 __ movw(ip, i); |
| 319 __ b(&done); |
| 320 DCHECK_EQ(table_entry_size_, masm()->pc_offset() - start); |
| 321 } |
| 322 __ bind(&done); |
| 323 } else { |
| 324 // We want to keep table_entry_size_ == 8 (since this is the common case), |
| 325 // but we need two instructions to load most immediates over 0xff. To handle |
| 326 // this, we set the low byte in the main table, and then set the high byte |
| 327 // in a separate table if necessary. |
| 328 Label high_fixes[256]; |
| 329 int high_fix_max = (count() - 1) >> 8; |
| 330 DCHECK_GT(arraysize(high_fixes), high_fix_max); |
| 331 for (int i = 0; i < count(); i++) { |
| 332 int start = masm()->pc_offset(); |
| 333 USE(start); |
| 334 __ mov(ip, Operand(i & 0xff)); // Set the low byte. |
| 335 __ b(&high_fixes[i >> 8]); // Jump to the secondary table. |
| 336 DCHECK_EQ(table_entry_size_, masm()->pc_offset() - start); |
| 337 } |
| 338 // Generate the secondary table, to set the high byte. |
| 339 for (int high = 1; high <= high_fix_max; high++) { |
| 340 __ bind(&high_fixes[high]); |
| 341 __ orr(ip, ip, Operand(high << 8)); |
| 342 // If this isn't the last entry, emit a branch to the end of the table. |
| 343 // The last entry can just fall through. |
| 344 if (high < high_fix_max) __ b(&high_fixes[0]); |
| 345 } |
| 346 // Bind high_fixes[0] last, for indices like 0x00**. This case requires no |
| 347 // fix-up, so for (common) small tables we can jump here, then just fall |
| 348 // through with no additional branch. |
| 349 __ bind(&high_fixes[0]); |
| 317 } | 350 } |
| 318 __ bind(&done); | |
| 319 __ push(ip); | 351 __ push(ip); |
| 320 } | 352 } |
| 321 | 353 |
| 322 | 354 |
| 323 void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) { | 355 void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) { |
| 324 SetFrameSlot(offset, value); | 356 SetFrameSlot(offset, value); |
| 325 } | 357 } |
| 326 | 358 |
| 327 | 359 |
| 328 void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) { | 360 void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) { |
| 329 SetFrameSlot(offset, value); | 361 SetFrameSlot(offset, value); |
| 330 } | 362 } |
| 331 | 363 |
| 332 | 364 |
| 333 void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) { | 365 void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) { |
| 334 DCHECK(FLAG_enable_embedded_constant_pool); | 366 DCHECK(FLAG_enable_embedded_constant_pool); |
| 335 SetFrameSlot(offset, value); | 367 SetFrameSlot(offset, value); |
| 336 } | 368 } |
| 337 | 369 |
| 338 | 370 |
| 339 #undef __ | 371 #undef __ |
| 340 | 372 |
| 341 } // namespace internal | 373 } // namespace internal |
| 342 } // namespace v8 | 374 } // namespace v8 |
| OLD | NEW |