Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(196)

Side by Side Diff: runtime/vm/flow_graph_allocator.h

Issue 10800037: New linear scan allocator. (Closed) Base URL: https://dart.googlecode.com/svn/branches/bleeding_edge/dart
Patch Set: fix one comment Created 8 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « no previous file | runtime/vm/flow_graph_allocator.cc » ('j') | runtime/vm/flow_graph_allocator.cc » ('J')
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #ifndef VM_FLOW_GRAPH_ALLOCATOR_H_ 5 #ifndef VM_FLOW_GRAPH_ALLOCATOR_H_
6 #define VM_FLOW_GRAPH_ALLOCATOR_H_ 6 #define VM_FLOW_GRAPH_ALLOCATOR_H_
7 7
8 #include "vm/growable_array.h" 8 #include "vm/growable_array.h"
9 #include "vm/intermediate_language.h" 9 #include "vm/intermediate_language.h"
10 10
11 namespace dart { 11 namespace dart {
12 12
13 class AllocationFinger;
13 class FlowGraphBuilder; 14 class FlowGraphBuilder;
14 class LiveRange; 15 class LiveRange;
15 class UseInterval; 16 class UseInterval;
17 class UsePosition;
16 18
17 class FlowGraphAllocator : public ValueObject { 19 class FlowGraphAllocator : public ValueObject {
18 public: 20 public:
19 FlowGraphAllocator(const GrowableArray<BlockEntryInstr*>& block_order, 21 FlowGraphAllocator(const GrowableArray<BlockEntryInstr*>& block_order,
20 FlowGraphBuilder* builder); 22 FlowGraphBuilder* builder);
21 23
22 void AllocateRegisters(); 24 void AllocateRegisters();
23 25
24 // Build live-in and live-out sets for each block. 26 // Build live-in and live-out sets for each block.
25 void AnalyzeLiveness(); 27 void AnalyzeLiveness();
(...skipping 15 matching lines...) Expand all
41 43
42 // Perform fix-point iteration updating live-out and live-in sets 44 // Perform fix-point iteration updating live-out and live-in sets
43 // for blocks until they stop changing. 45 // for blocks until they stop changing.
44 void ComputeLiveInAndLiveOutSets(); 46 void ComputeLiveInAndLiveOutSets();
45 47
46 // Print results of liveness analysis. 48 // Print results of liveness analysis.
47 void DumpLiveness(); 49 void DumpLiveness();
48 50
49 // Visit blocks in the code generation order (reverse post order) and 51 // Visit blocks in the code generation order (reverse post order) and
50 // linearly assign consequent lifetime positions to every instruction. 52 // linearly assign consequent lifetime positions to every instruction.
51 // Each instruction gets two positions: 53 // We assign position as follows:
52 // 54 //
53 // 2 * n - even one corresponding to instruction's start 55 // 2 * n - even position corresponding to an implicit parallel move
56 // preceding the instruction;
54 // 57 //
55 // 2 * n + 1 - odd one corresponding to instruction's end 58 // 2 * n + 1 - odd position corresponding to instruction itself;
56 // 59 //
57 // Having two positions allows us to capture non-trivial register 60 // Having positions corresponding to parallel moves between every two
58 // constraints in use intervals: for example we can declare that 61 // instructions allows us to capture non-trivial shapes of use intervals.
59 // an input value is only used at the start of the instruction and 62 // For specific examples see comments inside ProcessOneInstruction.
60 // this might allow register allocator to allocate both this input
61 // and output (or temp) to the same register if this is the last
62 // use of the value.
63 // Additionally creates parallel moves at the joins' predecessors 63 // Additionally creates parallel moves at the joins' predecessors
64 // that will be used for phi resolution. 64 // that will be used for phi resolution.
65 void NumberInstructions(); 65 void NumberInstructions();
66 Instruction* InstructionAt(intptr_t pos) const;
67 bool IsBlockEntry(intptr_t pos) const;
66 68
67 LiveRange* GetLiveRange(intptr_t vreg); 69 LiveRange* GetLiveRange(intptr_t vreg);
70
71 // Visit instructions in the postorder and build live ranges for
72 // all SSA values.
68 void BuildLiveRanges(); 73 void BuildLiveRanges();
69 void PrintLiveRanges(); 74 Instruction* ConnectOutgoingPhiMoves(BlockEntryInstr* block);
75 void ProcessOneInstruction(BlockEntryInstr* block, Instruction* instr);
76 void ConnectIncomingPhiMoves(BlockEntryInstr* block);
77 void BlockLocation(Location loc, intptr_t from, intptr_t to);
70 78
71 // Register use of the given virtual register at lifetime position use_pos. 79 // Process live ranges sorted by their start and assign registers
72 // If definition position is unknown then start of the block contaning 80 // to them
73 // use_pos will be passed. 81 void AllocateCPURegisters();
74 void UseValue(Instruction* instr,
75 intptr_t def_pos, // Lifetime position for the definition.
76 intptr_t use_pos, // Lifetime position for the use.
77 intptr_t vreg,
78 Location* loc,
79 bool use_at_end);
80
81 // Register definition of the given virtual register at lifetime position
82 // def_pos. Existing use interval will be shortened to start at def_pos.
83 void Define(Instruction* instr,
84 intptr_t def_pos,
85 intptr_t vreg,
86 Location* loc);
87
88 void AddToUnallocated(UseInterval* chain);
89 void BlockLocation(Location loc, intptr_t pos);
90
91 bool AllocateFreeRegister(UseInterval* unallocated);
92 void AssignFreeRegister(UseInterval* unallocated, Register reg);
93
94 void FinalizeInterval(UseInterval* interval, Location loc);
95 void AdvanceActiveIntervals(const intptr_t start); 82 void AdvanceActiveIntervals(const intptr_t start);
96 83
84 // Connect split siblings over non-linear control flow edges.
85 void ResolveControlFlow();
86 void ConnectSplitSiblings(LiveRange* range,
87 BlockEntryInstr* source_block,
88 BlockEntryInstr* target_block);
89
90
91 // Update location slot corresponding to the use with location allocated for
92 // the use's live range.
93 void ConvertUseTo(UsePosition* use, Location loc);
94 void ConvertAllUses(LiveRange* range);
95
96 // Add live range to the list of unallocated live ranges to be processed
97 // by the allocator.
98 void AddToUnallocated(LiveRange* range);
99 #ifdef DEBUG
97 bool UnallocatedIsSorted(); 100 bool UnallocatedIsSorted();
98 void AllocateCPURegisters(); 101 #endif
102
103 // Try to find a free register for an unallocated live range.
104 bool AllocateFreeRegister(LiveRange* unallocated);
105
106 // Try to find a register that can be used by a given live range.
107 // If all registers are occupied consider evicting interference for
108 // a register that is going to be used as far from the start of
109 // the unallocated live range as possible.
110 void AllocateAnyRegister(LiveRange* unallocated);
111
112 // Assign selected non-free register to an unallocated live range and
113 // evict any interference that can be evicted by spliting and spilling
114 // parts of interfering live ranges. Place non-spilled parts into
Kevin Millikin (Google) 2012/07/24 15:20:51 'spliting' -> 'splitting'. Last sentence is a fra
Vyacheslav Egorov (Google) 2012/07/24 16:01:00 Done.
115 void AssignNonFreeRegister(LiveRange* unallocated, Register reg);
116 bool EvictIntersection(LiveRange* allocated, LiveRange* unallocated);
117 void RemoveEvicted(Register reg, intptr_t first_evicted);
118
119 // Find first intersection between unallocated live range and
120 // live ranges currently allocated to the given register.
121 intptr_t FirstIntersectionWithAllocated(Register reg,
122 LiveRange* unallocated);
123
124 bool UpdateFreeUntil(Register reg,
125 LiveRange* unallocated,
126 intptr_t* cur_free_until,
127 intptr_t* cur_blocked_at);
128
129 // Split given live range in an optimal position between given positions.
130 LiveRange* SplitBetween(LiveRange* range, intptr_t from, intptr_t to);
131
132 // Find a spill slot that can be used by the given live range.
133 intptr_t AllocateSpillSlotFor(LiveRange* range);
134
135 // Allocate the given live range to a spill slot.
136 void Spill(LiveRange* range);
137
138 // Spill the given live range from the given position onwards.
139 void SpillAfter(LiveRange* range, intptr_t from);
140
141 // Spill the given live range from the given position until some
142 // position preceeding the to position.
143 void SpillBetween(LiveRange* range, intptr_t from, intptr_t to);
144
145 MoveOperands* AddMoveAt(intptr_t pos, Location to, Location from);
146
147 void PrintLiveRanges();
99 148
100 // TODO(vegorov): this field is used only to call Bailout. Remove when 149 // TODO(vegorov): this field is used only to call Bailout. Remove when
101 // all bailouts are gone. 150 // all bailouts are gone.
102 FlowGraphBuilder* builder_; 151 FlowGraphBuilder* builder_;
103 152
104 const GrowableArray<BlockEntryInstr*>& block_order_; 153 const GrowableArray<BlockEntryInstr*>& block_order_;
105 const GrowableArray<BlockEntryInstr*>& postorder_; 154 const GrowableArray<BlockEntryInstr*>& postorder_;
106 155
156 GrowableArray<Instruction*> instructions_;
157
107 // Live-out sets for each block. They contain indices of SSA values 158 // Live-out sets for each block. They contain indices of SSA values
108 // that are live out from this block: that is values that were either 159 // that are live out from this block: that is values that were either
109 // defined in this block or live into it and that are used in some 160 // defined in this block or live into it and that are used in some
110 // successor block. 161 // successor block.
111 GrowableArray<BitVector*> live_out_; 162 GrowableArray<BitVector*> live_out_;
112 163
113 // Kill sets for each block. They contain indices of SSA values that 164 // Kill sets for each block. They contain indices of SSA values that
114 // are defined by this block. 165 // are defined by this block.
115 GrowableArray<BitVector*> kill_; 166 GrowableArray<BitVector*> kill_;
116 167
117 // Live-in sets for each block. They contain indices of SSA values 168 // Live-in sets for each block. They contain indices of SSA values
118 // that are used by this block or its successors. 169 // that are used by this block or its successors.
119 GrowableArray<BitVector*> live_in_; 170 GrowableArray<BitVector*> live_in_;
120 171
121 // Number of virtual registers. Currently equal to the number of 172 // Number of virtual registers. Currently equal to the number of
122 // SSA values. 173 // SSA values.
123 const intptr_t vreg_count_; 174 const intptr_t vreg_count_;
124 175
125 // LiveRanges corresponding to SSA values. 176 // LiveRanges corresponding to SSA values.
126 GrowableArray<LiveRange*> live_ranges_; 177 GrowableArray<LiveRange*> live_ranges_;
127 178
128 // Worklist for register allocator. Always maintained sorted according 179 // Worklist for register allocator. Always maintained sorted according
129 // to ShouldBeAllocatedBefore predicate. 180 // to ShouldBeAllocatedBefore predicate.
130 GrowableArray<UseInterval*> unallocated_; 181 GrowableArray<LiveRange*> unallocated_;
131 182
132 // Per register lists of allocated UseIntervals, linked through 183 // Per register lists of allocated live ranges. Contain only those
133 // next_allocated field. Contains only those intervals that 184 // ranges that can be affected by future allocation decisions.
134 // can be affected by future allocation decisions. Those intervals 185 // Those live ranges that end before the start of the current live range are
135 // that end before the start of the current UseInterval are removed 186 // removed from the list and will not be affected.
136 // from this list and will not be affected. 187 GrowableArray<LiveRange*> cpu_regs_[kNumberOfCpuRegisters];
137 UseInterval* cpu_regs_[kNumberOfCpuRegisters]; 188
189 // List of used spill slots. Contain positions after which spill slots
190 // become free and can be reused for allocation.
191 GrowableArray<intptr_t> spill_slots_;
192
193 bool blocked_cpu_regs_[kNumberOfCpuRegisters];
138 194
139 DISALLOW_COPY_AND_ASSIGN(FlowGraphAllocator); 195 DISALLOW_COPY_AND_ASSIGN(FlowGraphAllocator);
140 }; 196 };
141 197
142 198
143 // UsePosition represents a single use of an SSA value by some instruction. 199 // UsePosition represents a single use of an SSA value by some instruction.
144 // It points to a location slot which either tells register allocator 200 // It points to a location slot which either tells register allocator
145 // where instruction expects the value (if slot contains a fixed location) or 201 // where instruction expects the value (if slot contains a fixed location) or
146 // asks register allocator to allocate storage (register or spill slot) for 202 // asks register allocator to allocate storage (register or spill slot) for
147 // this use with certain properties (if slot contain an unallocated location). 203 // this use with certain properties (if slot contain an unallocated location).
148 class UsePosition : public ZoneAllocated { 204 class UsePosition : public ZoneAllocated {
149 public: 205 public:
150 enum UseFlag { 206 UsePosition(intptr_t pos,
151 kNoFlag = 0,
152 kFixedUse = 1,
153 kSameAsFirstUse = 2,
154 kOther = 3
155 };
156
157 static const intptr_t kUseFlagMask = 0x3;
158 static const intptr_t kPositionShift = 2;
159
160 static UseFlag FlagForUse(const Location& loc) {
161 if (loc.IsRegister()) return kFixedUse;
162 if (loc.IsUnallocated() && (loc.policy() == Location::kSameAsFirstInput)) {
163 return kSameAsFirstUse;
164 }
165 return kOther;
166 }
167
168 // TODO(vegorov): we encode either position or instruction pointer
169 // into the pos_ field to generate moves when needed to resolve
170 // fixed or same-as-first constraints, but this looks ugly.
171 UsePosition(Instruction* instr,
172 intptr_t pos,
173 UsePosition* next, 207 UsePosition* next,
174 Location* location_slot) 208 Location* location_slot)
175 : pos_(pos << kPositionShift), 209 : pos_(pos),
176 location_slot_(location_slot), 210 location_slot_(location_slot),
177 next_(next) { 211 next_(next) {
178 // Non-NULL instr is considered unlikely so we preinitialize pos_ field
179 // with an encoded position even if instr is not NULL.
180 if (instr != NULL) {
181 ASSERT(location_slot_ != NULL);
182 pos_ = reinterpret_cast<intptr_t>(instr) | FlagForUse(*location_slot_);
183 }
184 ASSERT(this->pos() == pos);
185 } 212 }
186 213
187 // Tell the use that it should load the value from the given location.
188 // If location slot for the use is flexible (unallocated) it will be updated
189 // with the given location. Otherwise a move will be scheduled from the given
190 // location to the location already stored in the slot.
191 void AssignLocation(Location loc);
192
193 Location* location_slot() const { return location_slot_; } 214 Location* location_slot() const { return location_slot_; }
194 void set_location_slot(Location* location_slot) { 215 void set_location_slot(Location* location_slot) {
195 location_slot_ = location_slot; 216 location_slot_ = location_slot;
196 } 217 }
197 218
198 void set_next(UsePosition* next) { next_ = next; } 219 void set_next(UsePosition* next) { next_ = next; }
199 UsePosition* next() const { return next_; } 220 UsePosition* next() const { return next_; }
200 221
201 intptr_t pos() const { 222 intptr_t pos() const { return pos_; }
202 if ((pos_ & kUseFlagMask) != kNoFlag) {
203 return instr()->lifetime_position();
204 }
205 return pos_ >> kPositionShift;
206 }
207
208 Instruction* instr() const {
209 ASSERT((pos_ & kUseFlagMask) != kNoFlag);
210 return reinterpret_cast<Instruction*>(pos_ & ~kUseFlagMask);
211 }
212 223
213 bool HasHint() const { 224 bool HasHint() const {
214 return (pos_ & kUseFlagMask) == kFixedUse; 225 return (location_slot() != NULL) && (location_slot()->IsRegister());
215 } 226 }
216 227
217 Location hint() const { 228 Location hint() const {
218 ASSERT(HasHint()); 229 ASSERT(HasHint());
219 ASSERT(location_slot()->IsRegister()); 230 return *location_slot();
220 return *location_slot_;
221 } 231 }
222 232
223 private: 233 private:
224 intptr_t pos_; 234 const intptr_t pos_;
225 Location* location_slot_; 235 Location* location_slot_;
226 UsePosition* next_; 236 UsePosition* next_;
237
238 DISALLOW_COPY_AND_ASSIGN(UsePosition);
227 }; 239 };
228 240
229 241
230 // UseInterval represents a holeless half open interval of liveness for a given 242 // UseInterval represents a holeless half open interval of liveness for a given
231 // SSA value: [start, end) in terms of lifetime positions that 243 // SSA value: [start, end) in terms of lifetime positions that
232 // NumberInstructions assigns to instructions. Register allocator has to keep 244 // NumberInstructions assigns to instructions. Register allocator has to keep
233 // a value live in the register or in a spill slot from start position and until 245 // a value live in the register or in a spill slot from start position and until
234 // the end position. The interval can cover zero or more uses. 246 // the end position. The interval can cover zero or more uses.
235 // During the register allocation UseIntervals from different live ranges
236 // allocated to the same register will be chained together through
237 // next_allocated_ field.
238 // Note: currently all uses of the same SSA value are linked together into a 247 // Note: currently all uses of the same SSA value are linked together into a
239 // single list (and not split between UseIntervals). 248 // single list (and not split between UseIntervals).
240 class UseInterval : public ZoneAllocated { 249 class UseInterval : public ZoneAllocated {
241 public: 250 public:
242 UseInterval(intptr_t vreg, intptr_t start, intptr_t end, UseInterval* next) 251 UseInterval(intptr_t start, intptr_t end, UseInterval* next)
243 : vreg_(vreg), 252 : start_(start),
244 start_(start),
245 end_(end), 253 end_(end),
246 uses_((next == NULL) ? NULL : next->uses_), 254 next_(next) { }
247 next_(next),
248 next_allocated_(next) { }
249 255
250
251 void AddUse(Instruction* instr, intptr_t pos, Location* loc);
252 void Print(); 256 void Print();
253 257
254 intptr_t vreg() const { return vreg_; }
255 intptr_t start() const { return start_; } 258 intptr_t start() const { return start_; }
256 intptr_t end() const { return end_; } 259 intptr_t end() const { return end_; }
257 UsePosition* first_use() const { return uses_; }
258 UseInterval* next() const { return next_; } 260 UseInterval* next() const { return next_; }
259 261
260 bool Contains(intptr_t pos) const { 262 bool Contains(intptr_t pos) const {
261 return (start() <= pos) && (pos < end()); 263 return (start() <= pos) && (pos < end());
262 } 264 }
263 265
264 // Return the smallest position that is covered by both UseIntervals or 266 // Return the smallest position that is covered by both UseIntervals or
265 // kIllegalPosition if intervals do not intersect. 267 // kIllegalPosition if intervals do not intersect.
266 intptr_t Intersect(UseInterval* other); 268 intptr_t Intersect(UseInterval* other);
267 269
268 UseInterval* Split(intptr_t pos);
269
270 void set_next_allocated(UseInterval* next_allocated) {
271 next_allocated_ = next_allocated;
272 }
273 UseInterval* next_allocated() const { return next_allocated_; }
274
275 private: 270 private:
276 friend class LiveRange; 271 friend class LiveRange;
277 const intptr_t vreg_;
278 272
279 intptr_t start_; 273 intptr_t start_;
280 intptr_t end_; 274 intptr_t end_;
275 UseInterval* next_;
281 276
282 UsePosition* uses_; 277 DISALLOW_COPY_AND_ASSIGN(UseInterval);
278 };
283 279
284 UseInterval* next_; 280
285 UseInterval* next_allocated_; 281 // AllocationFinger is used to keep track of currently active position
282 // for the register allocator and cache lookup results.
283 class AllocationFinger : public ValueObject {
284 public:
285 AllocationFinger()
286 : first_pending_use_interval_(NULL),
287 first_register_use_(NULL),
288 first_register_beneficial_use_(NULL),
289 first_hinted_use_(NULL) {
290 }
291
292 void Initialize(LiveRange* range);
293 bool Advance(intptr_t start);
294
295 UseInterval* first_pending_use_interval() const {
296 return first_pending_use_interval_;
297 }
298
299 Location FirstHint();
300 UsePosition* FirstRegisterUse(intptr_t after_pos);
301 UsePosition* FirstRegisterBeneficialUse(intptr_t after_pos);
302
303 private:
304 UseInterval* first_pending_use_interval_;
305 UsePosition* first_register_use_;
306 UsePosition* first_register_beneficial_use_;
307 UsePosition* first_hinted_use_;
308
309 DISALLOW_COPY_AND_ASSIGN(AllocationFinger);
286 }; 310 };
287 311
288 312
289 // LiveRange represents a sequence of UseIntervals for a given SSA value. 313 // LiveRange represents a sequence of UseIntervals for a given SSA value.
290 // TODO(vegorov): this class is actually redundant currently.
291 class LiveRange : public ZoneAllocated { 314 class LiveRange : public ZoneAllocated {
292 public: 315 public:
293 explicit LiveRange(intptr_t vreg) : vreg_(vreg), head_(NULL) { } 316 explicit LiveRange(intptr_t vreg)
317 : vreg_(vreg),
318 uses_(NULL),
319 first_use_interval_(NULL),
320 last_use_interval_(NULL),
321 next_sibling_(NULL),
322 finger_() {
323 }
294 324
295 void DefineAt(Instruction* instr, intptr_t pos, Location* loc); 325 static LiveRange* MakeTemp(intptr_t pos, Location* location_slot);
296 326
297 void UseAt(Instruction* instr, 327 intptr_t vreg() const { return vreg_; }
298 intptr_t def_pos, 328 LiveRange* next_sibling() const { return next_sibling_; }
299 intptr_t use_pos, 329 UsePosition* first_use() const { return uses_; }
300 bool use_at_end, 330 void set_first_use(UsePosition* use) { uses_ = use; }
301 Location* loc); 331 UseInterval* first_use_interval() const { return first_use_interval_; }
332 UseInterval* last_use_interval() const { return last_use_interval_; }
333 Location assigned_location() const { return assigned_location_; }
334 intptr_t Start() const { return first_use_interval()->start(); }
335 intptr_t End() const { return last_use_interval()->end(); }
302 336
337 AllocationFinger* finger() { return &finger_; }
338
339 void set_assigned_location(Location location) {
340 assigned_location_ = location;
341 }
342
343 void DefineAt(intptr_t pos);
344
345 void AddUse(intptr_t pos, Location* location_slot);
303 void AddUseInterval(intptr_t start, intptr_t end); 346 void AddUseInterval(intptr_t start, intptr_t end);
304 347
305 void Print(); 348 void Print();
306 349
307 UseInterval* head() const { return head_; } 350 void AssignLocation(UseInterval* use, Location loc);
351
352 LiveRange* SplitAt(intptr_t pos);
353
354 bool CanCover(intptr_t pos) const {
355 return (Start() <= pos) && (pos < End());
356 }
308 357
309 private: 358 private:
359 LiveRange(intptr_t vreg,
360 UsePosition* uses,
361 UseInterval* first_use_interval,
362 UseInterval* last_use_interval,
363 LiveRange* next_sibling)
364 : vreg_(vreg),
365 uses_(uses),
366 first_use_interval_(first_use_interval),
367 last_use_interval_(last_use_interval),
368 next_sibling_(next_sibling),
369 finger_() {
370 }
371
310 const intptr_t vreg_; 372 const intptr_t vreg_;
311 UseInterval* head_; 373 Location assigned_location_;
374
375 UsePosition* uses_;
376 UseInterval* first_use_interval_;
377 UseInterval* last_use_interval_;
378
379 LiveRange* next_sibling_;
380
381 AllocationFinger finger_;
382
383 DISALLOW_COPY_AND_ASSIGN(LiveRange);
312 }; 384 };
313 385
314 386
315 } // namespace dart 387 } // namespace dart
316 388
317 #endif // VM_FLOW_GRAPH_ALLOCATOR_H_ 389 #endif // VM_FLOW_GRAPH_ALLOCATOR_H_
OLDNEW
« no previous file with comments | « no previous file | runtime/vm/flow_graph_allocator.cc » ('j') | runtime/vm/flow_graph_allocator.cc » ('J')

Powered by Google App Engine
This is Rietveld 408576698