OLD | NEW |
---|---|
(Empty) | |
1 // Copyright (c) 2016, the Dart project authors. Please see the AUTHORS file | |
2 // for details. All rights reserved. Use of this source code is governed by a | |
3 // BSD-style license that can be found in the LICENSE file. | |
4 | |
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_DBC. | |
6 #if defined(TARGET_ARCH_DBC) | |
7 | |
8 #include "vm/flow_graph_compiler.h" | |
9 | |
10 #include "vm/ast_printer.h" | |
11 #include "vm/compiler.h" | |
12 #include "vm/cpu.h" | |
13 #include "vm/dart_entry.h" | |
14 #include "vm/deopt_instructions.h" | |
15 #include "vm/il_printer.h" | |
16 #include "vm/instructions.h" | |
17 #include "vm/locations.h" | |
18 #include "vm/object_store.h" | |
19 #include "vm/parser.h" | |
20 #include "vm/stack_frame.h" | |
21 #include "vm/stub_code.h" | |
22 #include "vm/symbols.h" | |
23 #include "vm/verified_memory.h" | |
24 | |
25 namespace dart { | |
26 | |
27 DEFINE_FLAG(bool, trap_on_deoptimization, false, "Trap on deoptimization."); | |
28 DEFINE_FLAG(bool, unbox_mints, true, "Optimize 64-bit integer arithmetic."); | |
29 DEFINE_FLAG(bool, unbox_doubles, true, "Optimize double arithmetic."); | |
30 DECLARE_FLAG(bool, enable_simd_inline); | |
31 DECLARE_FLAG(bool, use_megamorphic_stub); | |
32 DECLARE_FLAG(charp, optimization_filter); | |
33 | |
34 void MegamorphicSlowPath::EmitNativeCode(FlowGraphCompiler* compiler) { | |
35 #define __ assembler-> | |
zra
2016/04/14 18:27:48
not needed?
Vyacheslav Egorov (Google)
2016/04/18 15:56:41
Done.
| |
36 UNIMPLEMENTED(); | |
37 #undef __ | |
38 } | |
39 | |
40 | |
41 FlowGraphCompiler::~FlowGraphCompiler() { | |
42 // BlockInfos are zone-allocated, so their destructors are not called. | |
43 // Verify the labels explicitly here. | |
44 for (int i = 0; i < block_info_.length(); ++i) { | |
45 ASSERT(!block_info_[i]->jump_label()->IsLinked()); | |
46 } | |
47 } | |
48 | |
49 | |
50 bool FlowGraphCompiler::SupportsUnboxedDoubles() { | |
51 return false; | |
52 } | |
53 | |
54 | |
55 bool FlowGraphCompiler::SupportsUnboxedMints() { | |
56 return false; | |
57 } | |
58 | |
59 | |
60 bool FlowGraphCompiler::SupportsUnboxedSimd128() { | |
61 return false; | |
62 } | |
63 | |
64 | |
65 bool FlowGraphCompiler::SupportsSinCos() { | |
66 return false; | |
67 } | |
68 | |
69 | |
70 bool FlowGraphCompiler::SupportsHardwareDivision() { | |
71 return true; | |
72 } | |
73 | |
74 | |
75 bool FlowGraphCompiler::CanConvertUnboxedMintToDouble() { | |
76 return false; | |
77 } | |
78 | |
79 | |
80 void FlowGraphCompiler::EnterIntrinsicMode() { | |
81 ASSERT(!intrinsic_mode()); | |
82 intrinsic_mode_ = true; | |
83 } | |
84 | |
85 | |
86 void FlowGraphCompiler::ExitIntrinsicMode() { | |
87 ASSERT(intrinsic_mode()); | |
88 intrinsic_mode_ = false; | |
89 } | |
90 | |
91 | |
92 RawTypedData* CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler, | |
93 DeoptInfoBuilder* builder, | |
94 const Array& deopt_table) { | |
95 UNIMPLEMENTED(); | |
96 return TypedData::null(); | |
97 } | |
98 | |
99 | |
100 void CompilerDeoptInfoWithStub::GenerateCode(FlowGraphCompiler* compiler, | |
101 intptr_t stub_ix) { | |
102 // Calls do not need stubs, they share a deoptimization trampoline. | |
103 ASSERT(reason() != ICData::kDeoptAtCall); | |
104 #define __ assembler-> | |
zra
2016/04/14 18:27:48
ditto
Vyacheslav Egorov (Google)
2016/04/18 15:56:41
Done.
| |
105 UNIMPLEMENTED(); | |
106 #undef __ | |
107 } | |
108 | |
109 | |
110 #define __ assembler()-> | |
111 | |
112 | |
113 void FlowGraphCompiler::GenerateAssertAssignable(TokenPosition token_pos, | |
114 intptr_t deopt_id, | |
115 const AbstractType& dst_type, | |
116 const String& dst_name, | |
117 LocationSummary* locs) { | |
118 ASSERT(!is_optimizing()); | |
119 SubtypeTestCache& test_cache = SubtypeTestCache::Handle(); | |
120 if (!dst_type.IsVoidType() && dst_type.IsInstantiated()) { | |
121 test_cache = SubtypeTestCache::New(); | |
122 } | |
123 | |
124 __ PushConstant(dst_type); | |
125 __ PushConstant(dst_name); | |
126 __ AssertAssignable(__ AddConstant(test_cache)); | |
127 AddCurrentDescriptor(RawPcDescriptors::kOther, deopt_id, token_pos); | |
128 } | |
129 | |
130 | |
131 void FlowGraphCompiler::EmitInstructionEpilogue(Instruction* instr) { | |
132 if (!is_optimizing()) { | |
133 Definition* defn = instr->AsDefinition(); | |
134 if ((defn != NULL) && | |
135 (defn->tag() != Instruction::kPushArgument) && | |
136 (defn->tag() != Instruction::kStoreIndexed) && | |
137 (defn->tag() != Instruction::kStoreStaticField) && | |
138 (defn->tag() != Instruction::kStoreLocal) && | |
139 (defn->tag() != Instruction::kStoreInstanceField) && | |
140 (defn->tag() != Instruction::kDropTemps) && | |
141 (defn->tag() != Instruction::kPushTemp) && | |
142 !defn->HasTemp()) { | |
143 __ Drop1(); | |
144 } | |
145 } | |
146 } | |
147 | |
148 | |
149 void FlowGraphCompiler::GenerateInlinedGetter(intptr_t offset) { | |
150 __ Move(0, -(1 + kParamEndSlotFromFp)); | |
151 __ LoadField(0, 0, offset / kWordSize); | |
152 __ Return(0); | |
153 } | |
154 | |
155 | |
156 void FlowGraphCompiler::GenerateInlinedSetter(intptr_t offset) { | |
157 __ Move(0, -(2 + kParamEndSlotFromFp)); | |
158 __ Move(1, -(1 + kParamEndSlotFromFp)); | |
159 __ StoreField(0, offset / kWordSize, 1); | |
160 __ LoadConstant(0, Object::Handle()); | |
161 __ Return(0); | |
162 } | |
163 | |
164 | |
165 void FlowGraphCompiler::EmitFrameEntry() { | |
166 const Function& function = parsed_function().function(); | |
167 const intptr_t num_fixed_params = function.num_fixed_parameters(); | |
168 const int num_opt_pos_params = function.NumOptionalPositionalParameters(); | |
169 const int num_opt_named_params = function.NumOptionalNamedParameters(); | |
170 const int num_params = | |
171 num_fixed_params + num_opt_pos_params + num_opt_named_params; | |
172 const bool has_optional_params = num_opt_pos_params != 0 || | |
zra
2016/04/14 18:27:48
parens around clauses.
Vyacheslav Egorov (Google)
2016/04/18 15:56:41
Done.
| |
173 num_opt_named_params != 0; | |
174 const int num_locals = parsed_function().num_stack_locals(); | |
175 const intptr_t context_index = | |
176 -parsed_function().current_context_var()->index() - 1; | |
177 | |
178 if (has_optional_params) { | |
179 __ EntryOpt(num_fixed_params, num_opt_pos_params, num_opt_named_params); | |
180 } else { | |
181 __ Entry(num_fixed_params, num_locals, context_index); | |
182 } | |
183 | |
184 if (num_opt_named_params != 0) { | |
185 LocalScope* scope = parsed_function().node_sequence()->scope(); | |
186 | |
187 // Start by alphabetically sorting the names of the optional parameters. | |
188 LocalVariable** opt_param = | |
189 zone()->Alloc<LocalVariable*>(num_opt_named_params); | |
190 int* opt_param_position = zone()->Alloc<int>(num_opt_named_params); | |
191 for (int pos = num_fixed_params; pos < num_params; pos++) { | |
192 LocalVariable* parameter = scope->VariableAt(pos); | |
193 const String& opt_param_name = parameter->name(); | |
194 int i = pos - num_fixed_params; | |
195 while (--i >= 0) { | |
196 LocalVariable* param_i = opt_param[i]; | |
197 const intptr_t result = opt_param_name.CompareTo(param_i->name()); | |
198 ASSERT(result != 0); | |
199 if (result > 0) break; | |
zra
2016/04/14 18:27:48
{} please
Vyacheslav Egorov (Google)
2016/04/18 15:56:41
This code exists at least in 5 copies in the code
| |
200 opt_param[i + 1] = opt_param[i]; | |
201 opt_param_position[i + 1] = opt_param_position[i]; | |
202 } | |
203 opt_param[i + 1] = parameter; | |
204 opt_param_position[i + 1] = pos; | |
205 } | |
206 | |
207 for (intptr_t i = 0; i < num_opt_named_params; i++) { | |
208 const int param_pos = opt_param_position[i]; | |
209 const Instance& value = parsed_function().DefaultParameterValueAt( | |
210 param_pos - num_fixed_params); | |
211 __ LoadConstant(param_pos, opt_param[i]->name()); | |
212 __ LoadConstant(param_pos, value); | |
213 } | |
214 } else if (num_opt_pos_params != 0) { | |
215 for (intptr_t i = 0; i < num_opt_pos_params; i++) { | |
216 const Object& value = parsed_function().DefaultParameterValueAt(i); | |
217 __ LoadConstant(num_fixed_params + i, value); | |
218 } | |
219 } | |
220 | |
221 | |
222 ASSERT(num_locals > 0); // There is always at least context_var. | |
223 if (has_optional_params) { | |
224 ASSERT(!is_optimizing()); | |
225 __ Frame(num_locals); // Reserve space for locals. | |
226 } | |
227 | |
228 if (function.IsClosureFunction()) { | |
229 Register reg = context_index; | |
230 Register closure_reg = reg; | |
231 LocalScope* scope = parsed_function().node_sequence()->scope(); | |
232 LocalVariable* local = scope->VariableAt(0); | |
233 if (local->index() > 0) { | |
234 __ Move(reg, -local->index()); | |
235 } else { | |
236 closure_reg = -local->index() - 1; | |
237 } | |
238 __ LoadField(reg, closure_reg, Closure::context_offset() / kWordSize); | |
239 } else if (has_optional_params) { | |
240 __ LoadConstant(context_index, | |
241 Object::Handle(isolate()->object_store()->empty_context())); | |
242 } | |
243 } | |
244 | |
245 | |
246 void FlowGraphCompiler::CompileGraph() { | |
247 InitCompiler(); | |
248 | |
249 if (TryIntrinsify()) { | |
250 // Skip regular code generation. | |
251 return; | |
252 } | |
253 | |
254 EmitFrameEntry(); | |
255 VisitBlocks(); | |
256 } | |
257 | |
258 | |
259 #undef __ | |
260 #define __ compiler_->assembler()-> | |
261 | |
262 | |
263 void ParallelMoveResolver::EmitMove(int index) { | |
264 UNIMPLEMENTED(); | |
265 } | |
266 | |
267 | |
268 void ParallelMoveResolver::EmitSwap(int index) { | |
269 UNIMPLEMENTED(); | |
270 } | |
271 | |
272 | |
273 void ParallelMoveResolver::MoveMemoryToMemory(const Address& dst, | |
274 const Address& src) { | |
275 UNREACHABLE(); | |
276 } | |
277 | |
278 | |
279 void ParallelMoveResolver::StoreObject(const Address& dst, const Object& obj) { | |
280 UNREACHABLE(); | |
281 } | |
282 | |
283 | |
284 // Do not call or implement this function. Instead, use the form below that | |
285 // uses an offset from the frame pointer instead of an Address. | |
286 void ParallelMoveResolver::Exchange(Register reg, const Address& mem) { | |
287 UNREACHABLE(); | |
288 } | |
289 | |
290 | |
291 // Do not call or implement this function. Instead, use the form below that | |
292 // uses offsets from the frame pointer instead of Addresses. | |
293 void ParallelMoveResolver::Exchange(const Address& mem1, const Address& mem2) { | |
294 UNREACHABLE(); | |
295 } | |
296 | |
297 | |
298 void ParallelMoveResolver::Exchange(Register reg, | |
299 Register base_reg, | |
300 intptr_t stack_offset) { | |
301 UNIMPLEMENTED(); | |
302 } | |
303 | |
304 | |
305 void ParallelMoveResolver::Exchange(Register base_reg1, | |
306 intptr_t stack_offset1, | |
307 Register base_reg2, | |
308 intptr_t stack_offset2) { | |
309 UNIMPLEMENTED(); | |
310 } | |
311 | |
312 | |
313 void ParallelMoveResolver::SpillScratch(Register reg) { | |
314 UNIMPLEMENTED(); | |
315 } | |
316 | |
317 | |
318 void ParallelMoveResolver::RestoreScratch(Register reg) { | |
319 UNIMPLEMENTED(); | |
320 } | |
321 | |
322 | |
323 void ParallelMoveResolver::SpillFpuScratch(FpuRegister reg) { | |
324 UNIMPLEMENTED(); | |
325 } | |
326 | |
327 | |
328 void ParallelMoveResolver::RestoreFpuScratch(FpuRegister reg) { | |
329 UNIMPLEMENTED(); | |
330 } | |
331 | |
332 | |
333 #undef __ | |
334 | |
335 } // namespace dart | |
336 | |
337 #endif // defined TARGET_ARCH_DBC | |
OLD | NEW |