Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(283)

Side by Side Diff: runtime/vm/flow_graph_compiler_dbc.cc

Issue 1858283002: Initial SIMDBC interpreter. (Closed) Base URL: git@github.com:dart-lang/sdk.git@master
Patch Set: Address comments Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 // Copyright (c) 2016, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file.
4
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_DBC.
6 #if defined(TARGET_ARCH_DBC)
7
8 #include "vm/flow_graph_compiler.h"
9
10 #include "vm/ast_printer.h"
11 #include "vm/compiler.h"
12 #include "vm/cpu.h"
13 #include "vm/dart_entry.h"
14 #include "vm/deopt_instructions.h"
15 #include "vm/il_printer.h"
16 #include "vm/instructions.h"
17 #include "vm/locations.h"
18 #include "vm/object_store.h"
19 #include "vm/parser.h"
20 #include "vm/stack_frame.h"
21 #include "vm/stub_code.h"
22 #include "vm/symbols.h"
23 #include "vm/verified_memory.h"
24
25 namespace dart {
26
27 DEFINE_FLAG(bool, trap_on_deoptimization, false, "Trap on deoptimization.");
28 DEFINE_FLAG(bool, unbox_mints, true, "Optimize 64-bit integer arithmetic.");
29 DEFINE_FLAG(bool, unbox_doubles, true, "Optimize double arithmetic.");
30 DECLARE_FLAG(bool, enable_simd_inline);
31 DECLARE_FLAG(bool, use_megamorphic_stub);
32 DECLARE_FLAG(charp, optimization_filter);
33
34 void MegamorphicSlowPath::EmitNativeCode(FlowGraphCompiler* compiler) {
35 #define __ assembler->
36 UNIMPLEMENTED();
37 #undef __
38 }
39
40
41 FlowGraphCompiler::~FlowGraphCompiler() {
42 // BlockInfos are zone-allocated, so their destructors are not called.
43 // Verify the labels explicitly here.
44 for (int i = 0; i < block_info_.length(); ++i) {
45 ASSERT(!block_info_[i]->jump_label()->IsLinked());
46 }
47 }
48
49
50 bool FlowGraphCompiler::SupportsUnboxedDoubles() {
51 return false;
52 }
53
54
55 bool FlowGraphCompiler::SupportsUnboxedMints() {
56 return false;
57 }
58
59
60 bool FlowGraphCompiler::SupportsUnboxedSimd128() {
61 return false;
62 }
63
64
65 bool FlowGraphCompiler::SupportsSinCos() {
66 return false;
67 }
68
69
70 bool FlowGraphCompiler::SupportsHardwareDivision() {
71 return true;
72 }
73
74
75 bool FlowGraphCompiler::CanConvertUnboxedMintToDouble() {
76 return false;
77 }
78
79
80 void FlowGraphCompiler::EnterIntrinsicMode() {
81 ASSERT(!intrinsic_mode());
82 intrinsic_mode_ = true;
83 }
84
85
86 void FlowGraphCompiler::ExitIntrinsicMode() {
87 ASSERT(intrinsic_mode());
88 intrinsic_mode_ = false;
89 }
90
91
92 RawTypedData* CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler,
93 DeoptInfoBuilder* builder,
94 const Array& deopt_table) {
95 UNIMPLEMENTED();
96 return TypedData::null();
97 }
98
99
100 void CompilerDeoptInfoWithStub::GenerateCode(FlowGraphCompiler* compiler,
101 intptr_t stub_ix) {
102 // Calls do not need stubs, they share a deoptimization trampoline.
103 ASSERT(reason() != ICData::kDeoptAtCall);
104 #define __ assembler->
105 UNIMPLEMENTED();
106 #undef __
107 }
108
109
110 #define __ assembler()->
111
112
113 void FlowGraphCompiler::GenerateAssertAssignable(TokenPosition token_pos,
114 intptr_t deopt_id,
115 const AbstractType& dst_type,
116 const String& dst_name,
117 LocationSummary* locs) {
118 ASSERT(!is_optimizing());
119 SubtypeTestCache& test_cache = SubtypeTestCache::Handle();
120 if (!dst_type.IsVoidType() && dst_type.IsInstantiated()) {
121 test_cache = SubtypeTestCache::New();
122 }
123
124 __ PushConstant(dst_type);
125 __ PushConstant(dst_name);
126 __ AssertAssignable(__ AddConstant(test_cache));
127 AddCurrentDescriptor(RawPcDescriptors::kOther, deopt_id, token_pos);
128 }
129
130
131 void FlowGraphCompiler::EmitInstructionEpilogue(Instruction* instr) {
132 if (!is_optimizing()) {
133 Definition* defn = instr->AsDefinition();
134 if ((defn != NULL) &&
135 (defn->tag() != Instruction::kPushArgument) &&
136 (defn->tag() != Instruction::kStoreIndexed) &&
137 (defn->tag() != Instruction::kStoreStaticField) &&
138 (defn->tag() != Instruction::kStoreLocal) &&
139 (defn->tag() != Instruction::kStoreInstanceField) &&
140 (defn->tag() != Instruction::kDropTemps) &&
141 (defn->tag() != Instruction::kPushTemp) &&
142 !defn->HasTemp()) {
143 __ Drop1();
144 }
145 }
146 }
147
148
149 void FlowGraphCompiler::GenerateInlinedGetter(intptr_t offset) {
150 __ Move(0, -(1 + kParamEndSlotFromFp));
151 __ LoadField(0, 0, offset / kWordSize);
152 __ Return(0);
153 }
154
155
156 void FlowGraphCompiler::GenerateInlinedSetter(intptr_t offset) {
157 __ Move(0, -(2 + kParamEndSlotFromFp));
158 __ Move(1, -(1 + kParamEndSlotFromFp));
159 __ StoreField(0, offset / kWordSize, 1);
160 __ LoadConstant(0, Object::Handle());
161 __ Return(0);
162 }
163
164
165 void FlowGraphCompiler::EmitFrameEntry() {
166 const Function& function = parsed_function().function();
167 const intptr_t num_fixed_params = function.num_fixed_parameters();
168 const int num_opt_pos_params = function.NumOptionalPositionalParameters();
169 const int num_opt_named_params = function.NumOptionalNamedParameters();
170 const int num_params =
171 num_fixed_params + num_opt_pos_params + num_opt_named_params;
172 const bool has_optional_params = num_opt_pos_params != 0 ||
173 num_opt_named_params != 0;
174 const int num_locals = parsed_function().num_stack_locals();
175 const intptr_t context_index =
176 -parsed_function().current_context_var()->index() - 1;
177
178 if (has_optional_params) {
179 __ EntryOpt(num_fixed_params, num_opt_pos_params, num_opt_named_params);
180 } else {
181 __ Entry(num_fixed_params, num_locals, context_index);
182 }
183
184 if (num_opt_named_params != 0) {
185 LocalScope* scope = parsed_function().node_sequence()->scope();
186
187 // Start by alphabetically sorting the names of the optional parameters.
188 LocalVariable** opt_param = new LocalVariable*[num_opt_named_params];
189 int* opt_param_position = new int[num_opt_named_params];
190 for (int pos = num_fixed_params; pos < num_params; pos++) {
191 LocalVariable* parameter = scope->VariableAt(pos);
192 const String& opt_param_name = parameter->name();
193 int i = pos - num_fixed_params;
194 while (--i >= 0) {
195 LocalVariable* param_i = opt_param[i];
196 const intptr_t result = opt_param_name.CompareTo(param_i->name());
197 ASSERT(result != 0);
198 if (result > 0) break;
199 opt_param[i + 1] = opt_param[i];
200 opt_param_position[i + 1] = opt_param_position[i];
201 }
202 opt_param[i + 1] = parameter;
203 opt_param_position[i + 1] = pos;
204 }
205
206 for (intptr_t i = 0; i < num_opt_named_params; i++) {
207 const int param_pos = opt_param_position[i];
208 const Instance& value = parsed_function().DefaultParameterValueAt(
209 param_pos - num_fixed_params);
210 __ LoadConstant(param_pos, opt_param[i]->name());
211 __ LoadConstant(param_pos, value);
212 }
Florian Schneider 2016/04/12 18:01:42 delete[] opt_param; delete[] opt_param_position;
Vyacheslav Egorov (Google) 2016/04/14 13:05:27 Switched to Zone allocation.
213 } else if (num_opt_pos_params != 0) {
214 for (intptr_t i = 0; i < num_opt_pos_params; i++) {
215 const Object& value = parsed_function().DefaultParameterValueAt(i);
216 __ LoadConstant(num_fixed_params + i, value);
217 }
218 }
219
220
221 ASSERT(num_locals > 0); // There is always at least context_var.
222 if (has_optional_params) {
223 ASSERT(!is_optimizing());
224 __ Frame(num_locals); // Reserve space for locals.
225 }
226
227 if (function.IsClosureFunction()) {
228 Register reg = context_index;
229 Register closure_reg = reg;
230 LocalScope* scope = parsed_function().node_sequence()->scope();
231 LocalVariable* local = scope->VariableAt(0);
232 if (local->index() > 0) {
233 __ Move(reg, -local->index());
234 } else {
235 closure_reg = -local->index() - 1;
236 }
237 __ LoadField(reg, closure_reg, Closure::context_offset() / kWordSize);
238 } else if (has_optional_params) {
239 __ LoadConstant(context_index,
240 Object::Handle(isolate()->object_store()->empty_context()));
241 }
242 }
243
244
245 void FlowGraphCompiler::CompileGraph() {
246 InitCompiler();
247
248 if (TryIntrinsify()) {
249 // Skip regular code generation.
250 return;
251 }
252
253 EmitFrameEntry();
254 VisitBlocks();
255 }
256
257
258 #undef __
259 #define __ compiler_->assembler()->
260
261
262 void ParallelMoveResolver::EmitMove(int index) {
263 UNIMPLEMENTED();
264 }
265
266
267 void ParallelMoveResolver::EmitSwap(int index) {
268 UNIMPLEMENTED();
269 }
270
271
272 void ParallelMoveResolver::MoveMemoryToMemory(const Address& dst,
273 const Address& src) {
274 UNREACHABLE();
275 }
276
277
278 void ParallelMoveResolver::StoreObject(const Address& dst, const Object& obj) {
279 UNREACHABLE();
280 }
281
282
283 // Do not call or implement this function. Instead, use the form below that
284 // uses an offset from the frame pointer instead of an Address.
285 void ParallelMoveResolver::Exchange(Register reg, const Address& mem) {
286 UNREACHABLE();
287 }
288
289
290 // Do not call or implement this function. Instead, use the form below that
291 // uses offsets from the frame pointer instead of Addresses.
292 void ParallelMoveResolver::Exchange(const Address& mem1, const Address& mem2) {
293 UNREACHABLE();
294 }
295
296
297 void ParallelMoveResolver::Exchange(Register reg,
298 Register base_reg,
299 intptr_t stack_offset) {
300 UNIMPLEMENTED();
301 }
302
303
304 void ParallelMoveResolver::Exchange(Register base_reg1,
305 intptr_t stack_offset1,
306 Register base_reg2,
307 intptr_t stack_offset2) {
308 UNIMPLEMENTED();
309 }
310
311
312 void ParallelMoveResolver::SpillScratch(Register reg) {
313 UNIMPLEMENTED();
314 }
315
316
317 void ParallelMoveResolver::RestoreScratch(Register reg) {
318 UNIMPLEMENTED();
319 }
320
321
322 void ParallelMoveResolver::SpillFpuScratch(FpuRegister reg) {
323 UNIMPLEMENTED();
324 }
325
326
327 void ParallelMoveResolver::RestoreFpuScratch(FpuRegister reg) {
328 UNIMPLEMENTED();
329 }
330
331
332 #undef __
333
334 } // namespace dart
335
336 #endif // defined TARGET_ARCH_DBC
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698