Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(472)

Side by Side Diff: runtime/vm/flow_graph_compiler_x64.cc

Issue 10696151: Skeleton of a linear scan register allocator. (Closed) Base URL: https://dart.googlecode.com/svn/branches/bleeding_edge/dart
Patch Set: Created 8 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_X64. 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_X64.
6 #if defined(TARGET_ARCH_X64) 6 #if defined(TARGET_ARCH_X64)
7 7
8 #include "vm/flow_graph_compiler.h" 8 #include "vm/flow_graph_compiler.h"
9 9
10 #include "lib/error.h" 10 #include "lib/error.h"
(...skipping 10 matching lines...) Expand all
21 DECLARE_FLAG(bool, print_ast); 21 DECLARE_FLAG(bool, print_ast);
22 DECLARE_FLAG(bool, print_scopes); 22 DECLARE_FLAG(bool, print_scopes);
23 DECLARE_FLAG(bool, trace_functions); 23 DECLARE_FLAG(bool, trace_functions);
24 24
25 25
26 void DeoptimizationStub::GenerateCode(FlowGraphCompiler* compiler) { 26 void DeoptimizationStub::GenerateCode(FlowGraphCompiler* compiler) {
27 Assembler* assem = compiler->assembler(); 27 Assembler* assem = compiler->assembler();
28 #define __ assem-> 28 #define __ assem->
29 __ Comment("Deopt stub for id %d", deopt_id_); 29 __ Comment("Deopt stub for id %d", deopt_id_);
30 __ Bind(entry_label()); 30 __ Bind(entry_label());
31 for (intptr_t i = 0; i < registers_.length(); i++) { 31
32 if (registers_[i] != kNoRegister) { 32 if (deoptimization_env_ == NULL) {
33 __ pushq(registers_[i]); 33 for (intptr_t i = 0; i < registers_.length(); i++) {
34 if (registers_[i] != kNoRegister) {
35 __ pushq(registers_[i]);
36 }
37 }
38 } else {
39 // We have a deoptimization environment, we have to tear down optimized
40 // frame and recreate non-optimized one.
41 __ leaq(RSP,
42 Address(RBP, ParsedFunction::kFirstLocalSlotIndex * kWordSize));
srdjan 2012/07/10 23:27:54 How do you handle incoming parameters and incoming
Vyacheslav Egorov (Google) 2012/07/11 13:27:58 Positional parameters are in the environment as we
43
44 const ZoneGrowableArray<Value*>& values = deoptimization_env_->values();
45 const GrowableArray<Location>* locations = deoptimization_env_->locations();
46
47 for (intptr_t i = 0; i < values.length(); i++) {
48 Location loc = (*locations)[i];
49 if (loc.IsInvalid()) {
50 ASSERT(values[i]->IsConstant());
51 __ PushObject(values[i]->AsConstant()->value());
52 } else {
53 ASSERT(loc.IsRegister());
54 __ pushq(loc.reg());
55 }
34 } 56 }
35 } 57 }
58
36 if (compiler->IsLeaf()) { 59 if (compiler->IsLeaf()) {
37 Label L; 60 Label L;
38 __ call(&L); 61 __ call(&L);
39 const intptr_t offset = assem->CodeSize(); 62 const intptr_t offset = assem->CodeSize();
40 __ Bind(&L); 63 __ Bind(&L);
41 __ popq(RAX); 64 __ popq(RAX);
42 __ subq(RAX, 65 __ subq(RAX,
43 Immediate(offset - AssemblerMacros::kOffsetOfSavedPCfromEntrypoint)); 66 Immediate(offset - AssemblerMacros::kOffsetOfSavedPCfromEntrypoint));
44 __ movq(Address(RBP, -kWordSize), RAX); 67 __ movq(Address(RBP, -kWordSize), RAX);
45 } 68 }
(...skipping 1027 matching lines...) Expand 10 before | Expand all | Expand 10 after
1073 __ movsd(result, FieldAddress(reg, Double::value_offset())); 1096 __ movsd(result, FieldAddress(reg, Double::value_offset()));
1074 __ jmp(&done); 1097 __ jmp(&done);
1075 __ Bind(&is_smi); 1098 __ Bind(&is_smi);
1076 __ movq(temp, reg); 1099 __ movq(temp, reg);
1077 __ SmiUntag(temp); 1100 __ SmiUntag(temp);
1078 __ cvtsi2sd(result, temp); 1101 __ cvtsi2sd(result, temp);
1079 __ Bind(&done); 1102 __ Bind(&done);
1080 } 1103 }
1081 1104
1082 1105
1106 ParallelMoveResolver::ParallelMoveResolver(FlowGraphCompiler* compiler)
1107 : compiler_(compiler), moves_(32) {}
srdjan 2012/07/10 23:27:54 Some methods should be shared, I guess.
Vyacheslav Egorov (Google) 2012/07/11 13:27:58 Yes this is the intent. Done when porting to ia32.
1108
1109
1110 void ParallelMoveResolver::EmitNativeCode(ParallelMoveInstr* parallel_move) {
1111 ASSERT(moves_.is_empty());
1112 // Build up a worklist of moves.
1113 BuildInitialMoveList(parallel_move);
1114
1115 for (int i = 0; i < moves_.length(); ++i) {
1116 MoveOperands move = moves_[i];
1117 // Skip constants to perform them last. They don't block other moves
1118 // and skipping such moves with register destinations keeps those
1119 // registers free for the whole algorithm.
1120 if (!move.IsEliminated() && !move.src().IsConstant()) PerformMove(i);
1121 }
1122
1123 // Perform the moves with constant sources.
1124 for (int i = 0; i < moves_.length(); ++i) {
1125 if (!moves_[i].IsEliminated()) {
1126 ASSERT(moves_[i].src().IsConstant());
1127 EmitMove(i);
1128 }
1129 }
1130
1131 moves_.Clear();
1132 }
1133
1134
1135 void ParallelMoveResolver::BuildInitialMoveList(
1136 ParallelMoveInstr* parallel_move) {
srdjan 2012/07/10 23:27:54 indent 4 spaces
Vyacheslav Egorov (Google) 2012/07/11 13:27:58 Done.
1137 // Perform a linear sweep of the moves to add them to the initial list of
1138 // moves to perform, ignoring any move that is redundant (the source is
1139 // the same as the destination, the destination is ignored and
1140 // unallocated, or the move was already eliminated).
1141 const GrowableArray<MoveOperands>& moves = parallel_move->moves();
1142 for (int i = 0; i < moves.length(); ++i) {
1143 MoveOperands move = moves[i];
1144 if (!move.IsRedundant()) moves_.Add(move);
1145 }
1146 }
1147
1148
1149 void ParallelMoveResolver::PerformMove(int index) {
1150 // Each call to this function performs a move and deletes it from the move
1151 // graph. We first recursively perform any move blocking this one. We
1152 // mark a move as "pending" on entry to PerformMove in order to detect
1153 // cycles in the move graph. We use operand swaps to resolve cycles,
1154 // which means that a call to PerformMove could change any source operand
1155 // in the move graph.
1156
1157 ASSERT(!moves_[index].IsPending());
1158 ASSERT(!moves_[index].IsRedundant());
1159
1160 // Clear this move's destination to indicate a pending move. The actual
1161 // destination is saved in a stack-allocated local. Recursion may allow
1162 // multiple moves to be pending.
1163 ASSERT(!moves_[index].src().IsInvalid());
1164 Location destination = moves_[index].MarkPending();
1165
1166 // Perform a depth-first traversal of the move graph to resolve
1167 // dependencies. Any unperformed, unpending move with a source the same
1168 // as this one's destination blocks this one so recursively perform all
1169 // such moves.
1170 for (int i = 0; i < moves_.length(); ++i) {
1171 MoveOperands other_move = moves_[i];
1172 if (other_move.Blocks(destination) && !other_move.IsPending()) {
1173 // Though PerformMove can change any source operand in the move graph,
1174 // this call cannot create a blocking move via a swap (this loop does
1175 // not miss any). Assume there is a non-blocking move with source A
1176 // and this move is blocked on source B and there is a swap of A and
1177 // B. Then A and B must be involved in the same cycle (or they would
1178 // not be swapped). Since this move's destination is B and there is
1179 // only a single incoming edge to an operand, this move must also be
1180 // involved in the same cycle. In that case, the blocking move will
1181 // be created but will be "pending" when we return from PerformMove.
1182 PerformMove(i);
1183 }
1184 }
1185
1186 // We are about to resolve this move and don't need it marked as
1187 // pending, so restore its destination.
1188 moves_[index].ClearPending(destination);
1189
1190 // This move's source may have changed due to swaps to resolve cycles and
1191 // so it may now be the last move in the cycle. If so remove it.
1192 if (moves_[index].src().Equals(destination)) {
1193 moves_[index].Eliminate();
1194 return;
1195 }
1196
1197 // The move may be blocked on a (at most one) pending move, in which case
1198 // we have a cycle. Search for such a blocking move and perform a swap to
1199 // resolve it.
1200 for (int i = 0; i < moves_.length(); ++i) {
1201 MoveOperands other_move = moves_[i];
1202 if (other_move.Blocks(destination)) {
1203 ASSERT(other_move.IsPending());
1204 EmitSwap(index);
1205 return;
1206 }
1207 }
1208
1209 // This move is not blocked.
1210 EmitMove(index);
1211 }
1212
1213 #undef __
1214
1215 #undef __
srdjan 2012/07/10 23:27:54 One undef too many.
Vyacheslav Egorov (Google) 2012/07/11 13:27:58 Done.
1216 #define __ compiler_->assembler()->
1217
1218 void ParallelMoveResolver::EmitMove(int index) {
1219 Location source = moves_[index].src();
1220 Location destination = moves_[index].dest();
1221
1222 ASSERT(destination.IsRegister());
1223 if (source.IsRegister()) {
1224 __ movq(destination.reg(), source.reg());
srdjan 2012/07/10 23:27:54 indent
Vyacheslav Egorov (Google) 2012/07/11 13:27:58 Done.
1225 } else {
1226 ASSERT(source.IsConstant());
1227 const Object& value = source.constant();
1228 if (value.IsSmi()) {
Vyacheslav Egorov (Google) 2012/07/11 13:27:58 This is apparently not needed assembler does the s
1229 int64_t imm = reinterpret_cast<int64_t>(value.raw());
1230 __ movq(destination.reg(), Immediate(imm));
1231 } else {
1232 __ LoadObject(destination.reg(), value);
1233 }
1234 }
1235 moves_[index].Eliminate();
1236 }
1237
1238
1239 void ParallelMoveResolver::EmitSwap(int index) {
1240 Location source = moves_[index].src();
1241 Location destination = moves_[index].dest();
1242
1243 ASSERT(source.IsRegister() && destination.IsRegister());
1244 __ xchgq(destination.reg(), source.reg());
1245
1246 // The swap of source and destination has executed a move from source to
1247 // destination.
1248 moves_[index].Eliminate();
1249
1250 // Any unperformed (including pending) move with a source of either
1251 // this move's source or destination needs to have their source
1252 // changed to reflect the state of affairs after the swap.
1253 for (int i = 0; i < moves_.length(); ++i) {
1254 MoveOperands other_move = moves_[i];
1255 if (other_move.Blocks(source)) {
1256 moves_[i].set_src(destination);
1257 } else if (other_move.Blocks(destination)) {
1258 moves_[i].set_src(source);
1259 }
1260 }
1261 }
1262
1263
1083 #undef __ 1264 #undef __
1084 1265
1085 } // namespace dart 1266 } // namespace dart
1086 1267
1087 #endif // defined TARGET_ARCH_X64 1268 #endif // defined TARGET_ARCH_X64
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698