| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/v8.h" | 5 #include "src/v8.h" |
| 6 | 6 |
| 7 #include "src/arm/lithium-codegen-arm.h" | 7 #include "src/arm/lithium-codegen-arm.h" |
| 8 #include "src/arm/lithium-gap-resolver-arm.h" | 8 #include "src/arm/lithium-gap-resolver-arm.h" |
| 9 #include "src/base/bits.h" | 9 #include "src/base/bits.h" |
| 10 #include "src/code-factory.h" | 10 #include "src/code-factory.h" |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 45 bool LCodeGen::GenerateCode() { | 45 bool LCodeGen::GenerateCode() { |
| 46 LPhase phase("Z_Code generation", chunk()); | 46 LPhase phase("Z_Code generation", chunk()); |
| 47 DCHECK(is_unused()); | 47 DCHECK(is_unused()); |
| 48 status_ = GENERATING; | 48 status_ = GENERATING; |
| 49 | 49 |
| 50 // Open a frame scope to indicate that there is a frame on the stack. The | 50 // Open a frame scope to indicate that there is a frame on the stack. The |
| 51 // NONE indicates that the scope shouldn't actually generate code to set up | 51 // NONE indicates that the scope shouldn't actually generate code to set up |
| 52 // the frame (that is done in GeneratePrologue). | 52 // the frame (that is done in GeneratePrologue). |
| 53 FrameScope frame_scope(masm_, StackFrame::NONE); | 53 FrameScope frame_scope(masm_, StackFrame::NONE); |
| 54 | 54 |
| 55 return GeneratePrologue() && | 55 return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() && |
| 56 GenerateBody() && | 56 GenerateJumpTable() && GenerateSafepointTable(); |
| 57 GenerateDeferredCode() && | |
| 58 GenerateDeoptJumpTable() && | |
| 59 GenerateSafepointTable(); | |
| 60 } | 57 } |
| 61 | 58 |
| 62 | 59 |
| 63 void LCodeGen::FinishCode(Handle<Code> code) { | 60 void LCodeGen::FinishCode(Handle<Code> code) { |
| 64 DCHECK(is_done()); | 61 DCHECK(is_done()); |
| 65 code->set_stack_slots(GetStackSlotCount()); | 62 code->set_stack_slots(GetStackSlotCount()); |
| 66 code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); | 63 code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); |
| 67 if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code); | 64 if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code); |
| 68 PopulateDeoptimizationData(code); | 65 PopulateDeoptimizationData(code); |
| 69 } | 66 } |
| (...skipping 236 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 306 } | 303 } |
| 307 | 304 |
| 308 // Force constant pool emission at the end of the deferred code to make | 305 // Force constant pool emission at the end of the deferred code to make |
| 309 // sure that no constant pools are emitted after. | 306 // sure that no constant pools are emitted after. |
| 310 masm()->CheckConstPool(true, false); | 307 masm()->CheckConstPool(true, false); |
| 311 | 308 |
| 312 return !is_aborted(); | 309 return !is_aborted(); |
| 313 } | 310 } |
| 314 | 311 |
| 315 | 312 |
| 316 bool LCodeGen::GenerateDeoptJumpTable() { | 313 bool LCodeGen::GenerateJumpTable() { |
| 317 // Check that the jump table is accessible from everywhere in the function | 314 // Check that the jump table is accessible from everywhere in the function |
| 318 // code, i.e. that offsets to the table can be encoded in the 24bit signed | 315 // code, i.e. that offsets to the table can be encoded in the 24bit signed |
| 319 // immediate of a branch instruction. | 316 // immediate of a branch instruction. |
| 320 // To simplify we consider the code size from the first instruction to the | 317 // To simplify we consider the code size from the first instruction to the |
| 321 // end of the jump table. We also don't consider the pc load delta. | 318 // end of the jump table. We also don't consider the pc load delta. |
| 322 // Each entry in the jump table generates one instruction and inlines one | 319 // Each entry in the jump table generates one instruction and inlines one |
| 323 // 32bit data after it. | 320 // 32bit data after it. |
| 324 if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) + | 321 if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) + |
| 325 deopt_jump_table_.length() * 7)) { | 322 deopt_jump_table_.length() * 7)) { |
| 326 Abort(kGeneratedCodeIsTooLarge); | 323 Abort(kGeneratedCodeIsTooLarge); |
| 327 } | 324 } |
| 328 | 325 |
| 329 if (deopt_jump_table_.length() > 0) { | 326 if (deopt_jump_table_.length() > 0) { |
| 330 Label needs_frame, call_deopt_entry; | 327 Label needs_frame, call_deopt_entry; |
| 331 | 328 |
| 332 Comment(";;; -------------------- Jump table --------------------"); | 329 Comment(";;; -------------------- Jump table --------------------"); |
| 333 Address base = deopt_jump_table_[0].address; | 330 Address base = deopt_jump_table_[0].address; |
| 334 | 331 |
| 335 Register entry_offset = scratch0(); | 332 Register entry_offset = scratch0(); |
| 336 | 333 |
| 337 int length = deopt_jump_table_.length(); | 334 int length = deopt_jump_table_.length(); |
| 338 for (int i = 0; i < length; i++) { | 335 for (int i = 0; i < length; i++) { |
| 339 Deoptimizer::JumpTableEntry* table_entry = &deopt_jump_table_[i]; | 336 Deoptimizer::JumpTableEntry* table_entry = &deopt_jump_table_[i]; |
| 340 __ bind(&table_entry->label); | 337 __ bind(&table_entry->label); |
| 341 | 338 |
| 342 Deoptimizer::BailoutType type = table_entry->bailout_type; | 339 Deoptimizer::BailoutType type = table_entry->bailout_type; |
| 343 DCHECK(type == deopt_jump_table_[0].bailout_type); | 340 DCHECK(type == deopt_jump_table_[0].bailout_type); |
| 344 Address entry = table_entry->address; | 341 Address entry = table_entry->address; |
| 345 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); | 342 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); |
| 346 DCHECK(id != Deoptimizer::kNotDeoptimizationEntry); | 343 DCHECK_NE(Deoptimizer::kNotDeoptimizationEntry, id); |
| 347 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); | 344 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); |
| 348 DeoptComment(table_entry->mnemonic, table_entry->reason); | 345 DeoptComment(table_entry->mnemonic, table_entry->reason); |
| 349 | 346 |
| 350 // Second-level deopt table entries are contiguous and small, so instead | 347 // Second-level deopt table entries are contiguous and small, so instead |
| 351 // of loading the full, absolute address of each one, load an immediate | 348 // of loading the full, absolute address of each one, load an immediate |
| 352 // offset which will be added to the base address later. | 349 // offset which will be added to the base address later. |
| 353 __ mov(entry_offset, Operand(entry - base)); | 350 __ mov(entry_offset, Operand(entry - base)); |
| 354 | 351 |
| 355 if (table_entry->needs_frame) { | 352 if (table_entry->needs_frame) { |
| 356 DCHECK(!info()->saves_caller_doubles()); | 353 DCHECK(!info()->saves_caller_doubles()); |
| (...skipping 5556 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5913 __ Push(scope_info); | 5910 __ Push(scope_info); |
| 5914 __ push(ToRegister(instr->function())); | 5911 __ push(ToRegister(instr->function())); |
| 5915 CallRuntime(Runtime::kPushBlockContext, 2, instr); | 5912 CallRuntime(Runtime::kPushBlockContext, 2, instr); |
| 5916 RecordSafepoint(Safepoint::kNoLazyDeopt); | 5913 RecordSafepoint(Safepoint::kNoLazyDeopt); |
| 5917 } | 5914 } |
| 5918 | 5915 |
| 5919 | 5916 |
| 5920 #undef __ | 5917 #undef __ |
| 5921 | 5918 |
| 5922 } } // namespace v8::internal | 5919 } } // namespace v8::internal |
| OLD | NEW |