Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(200)

Side by Side Diff: gcc/gcc/tree-ssa-loop-ivcanon.c

Issue 3050029: [gcc] GCC 4.5.0=>4.5.1 (Closed) Base URL: ssh://git@gitrw.chromium.org:9222/nacl-toolchain.git
Patch Set: Created 10 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « gcc/gcc/tree-ssa-loop-im.c ('k') | gcc/gcc/tree-ssa-loop-prefetch.c » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* Induction variable canonicalization. 1 /* Induction variable canonicalization.
2 Copyright (C) 2004, 2005, 2007, 2008 Free Software Foundation, Inc. 2 Copyright (C) 2004, 2005, 2007, 2008, 2010
3 3 Free Software Foundation, Inc.
4
4 This file is part of GCC. 5 This file is part of GCC.
5 6
6 GCC is free software; you can redistribute it and/or modify it 7 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the 8 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any 9 Free Software Foundation; either version 3, or (at your option) any
9 later version. 10 later version.
10 11
11 GCC is distributed in the hope that it will be useful, but WITHOUT 12 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details. 15 for more details.
15 16
16 You should have received a copy of the GNU General Public License 17 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see 18 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */ 19 <http://www.gnu.org/licenses/>. */
19 20
20 /* This pass detects the loops that iterate a constant number of times, 21 /* This pass detects the loops that iterate a constant number of times,
21 adds a canonical induction variable (step -1, tested against 0) 22 adds a canonical induction variable (step -1, tested against 0)
22 and replaces the exit test. This enables the less powerful rtl 23 and replaces the exit test. This enables the less powerful rtl
23 level analysis to use this information. 24 level analysis to use this information.
24 25
25 This might spoil the code in some cases (by increasing register pressure). 26 This might spoil the code in some cases (by increasing register pressure).
26 Note that in the case the new variable is not needed, ivopts will get rid 27 Note that in the case the new variable is not needed, ivopts will get rid
27 of it, so it might only be a problem when there are no other linear induction 28 of it, so it might only be a problem when there are no other linear induction
28 variables. In that case the created optimization possibilities are likely 29 variables. In that case the created optimization possibilities are likely
29 to pay up. 30 to pay up.
30 31
31 Additionally in case we detect that it is beneficial to unroll the 32 Additionally in case we detect that it is beneficial to unroll the
(...skipping 14 matching lines...) Expand all
46 #include "tree-flow.h" 47 #include "tree-flow.h"
47 #include "tree-dump.h" 48 #include "tree-dump.h"
48 #include "cfgloop.h" 49 #include "cfgloop.h"
49 #include "tree-pass.h" 50 #include "tree-pass.h"
50 #include "ggc.h" 51 #include "ggc.h"
51 #include "tree-chrec.h" 52 #include "tree-chrec.h"
52 #include "tree-scalar-evolution.h" 53 #include "tree-scalar-evolution.h"
53 #include "params.h" 54 #include "params.h"
54 #include "flags.h" 55 #include "flags.h"
55 #include "tree-inline.h" 56 #include "tree-inline.h"
57 #include "target.h"
56 58
57 /* Specifies types of loops that may be unrolled. */ 59 /* Specifies types of loops that may be unrolled. */
58 60
59 enum unroll_level 61 enum unroll_level
60 { 62 {
61 UL_SINGLE_ITER, /* Only loops that exit immediately in the first 63 UL_SINGLE_ITER, /* Only loops that exit immediately in the first
62 iteration. */ 64 iteration. */
63 UL_NO_GROWTH, /* Only loops whose unrolling will not cause increase 65 UL_NO_GROWTH, /* Only loops whose unrolling will not cause increase
64 of code size. */ 66 of code size. */
65 UL_ALL /* All suitable loops. */ 67 UL_ALL /* All suitable loops. */
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
111 update_stmt (cond); 113 update_stmt (cond);
112 } 114 }
113 115
114 /* Computes an estimated number of insns in LOOP, weighted by WEIGHTS. */ 116 /* Computes an estimated number of insns in LOOP, weighted by WEIGHTS. */
115 117
116 unsigned 118 unsigned
117 tree_num_loop_insns (struct loop *loop, eni_weights *weights) 119 tree_num_loop_insns (struct loop *loop, eni_weights *weights)
118 { 120 {
119 basic_block *body = get_loop_body (loop); 121 basic_block *body = get_loop_body (loop);
120 gimple_stmt_iterator gsi; 122 gimple_stmt_iterator gsi;
121 unsigned size = 1, i; 123 unsigned size = 0, i;
122 124
123 for (i = 0; i < loop->num_nodes; i++) 125 for (i = 0; i < loop->num_nodes; i++)
124 for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi); gsi_next (&gsi)) 126 for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi); gsi_next (&gsi))
125 size += estimate_num_insns (gsi_stmt (gsi), weights); 127 size += estimate_num_insns (gsi_stmt (gsi), weights);
126 free (body); 128 free (body);
127 129
128 return size; 130 return size;
129 } 131 }
130 132
131 /* Estimate number of insns of completely unrolled loop. We assume 133 /* Describe size of loop as detected by tree_estimate_loop_size. */
132 that the size of the unrolled loop is decreased in the 134 struct loop_size
133 following way (the numbers of insns are based on what 135 {
134 estimate_num_insns returns for appropriate statements): 136 /* Number of instructions in the loop. */
135 137 int overall;
136 1) exit condition gets removed (2 insns) 138
137 2) increment of the control variable gets removed (2 insns) 139 /* Number of instructions that will be likely optimized out in
138 3) All remaining statements are likely to get simplified 140 peeled iterations of loop (i.e. computation based on induction
139 due to constant propagation. Hard to estimate; just 141 variable where induction variable starts at known constant.) */
140 as a heuristics we decrease the rest by 1/3. 142 int eliminated_by_peeling;
141 143
142 NINSNS is the number of insns in the loop before unrolling. 144 /* Same statistics for last iteration of loop: it is smaller because
143 NUNROLL is the number of times the loop is unrolled. */ 145 instructions after exit are not executed. */
146 int last_iteration;
147 int last_iteration_eliminated_by_peeling;
148 };
149
150 /* Return true if OP in STMT will be constant after peeling LOOP. */
151
152 static bool
153 constant_after_peeling (tree op, gimple stmt, struct loop *loop)
154 {
155 affine_iv iv;
156
157 if (is_gimple_min_invariant (op))
158 return true;
159
160 /* We can still fold accesses to constant arrays when index is known. */
161 if (TREE_CODE (op) != SSA_NAME)
162 {
163 tree base = op;
164
165 /* First make fast look if we see constant array inside. */
166 while (handled_component_p (base))
167 » base = TREE_OPERAND (base, 0);
168 if ((DECL_P (base)
169 » && TREE_STATIC (base)
170 » && TREE_READONLY (base)
171 && (DECL_INITIAL (base)
172 » || (!DECL_EXTERNAL (base)
173 » » && targetm.binds_local_p (base))))
174 » || CONSTANT_CLASS_P (base))
175 » {
176 » /* If so, see if we understand all the indices. */
177 » base = op;
178 » while (handled_component_p (base))
179 » {
180 » if (TREE_CODE (base) == ARRAY_REF
181 » » && !constant_after_peeling (TREE_OPERAND (base, 1), stmt, loop ))
182 » » return false;
183 » base = TREE_OPERAND (base, 0);
184 » }
185 » return true;
186 » }
187 return false;
188 }
189
190 /* Induction variables are constants. */
191 if (!simple_iv (loop, loop_containing_stmt (stmt), op, &iv, false))
192 return false;
193 if (!is_gimple_min_invariant (iv.base))
194 return false;
195 if (!is_gimple_min_invariant (iv.step))
196 return false;
197 return true;
198 }
199
200 /* Computes an estimated number of insns in LOOP, weighted by WEIGHTS.
201 Return results in SIZE, estimate benefits for complete unrolling exiting by E XIT. */
202
203 static void
204 tree_estimate_loop_size (struct loop *loop, edge exit, struct loop_size *size)
205 {
206 basic_block *body = get_loop_body (loop);
207 gimple_stmt_iterator gsi;
208 unsigned int i;
209 bool after_exit;
210
211 size->overall = 0;
212 size->eliminated_by_peeling = 0;
213 size->last_iteration = 0;
214 size->last_iteration_eliminated_by_peeling = 0;
215
216 if (dump_file && (dump_flags & TDF_DETAILS))
217 fprintf (dump_file, "Estimating sizes for loop %i\n", loop->num);
218 for (i = 0; i < loop->num_nodes; i++)
219 {
220 if (exit && body[i] != exit->src
221 » && dominated_by_p (CDI_DOMINATORS, body[i], exit->src))
222 » after_exit = true;
223 else
224 » after_exit = false;
225 if (dump_file && (dump_flags & TDF_DETAILS))
226 » fprintf (dump_file, " BB: %i, after_exit: %i\n", body[i]->index, after_e xit);
227
228 for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi); gsi_next (&gsi))
229 » {
230 » gimple stmt = gsi_stmt (gsi);
231 » int num = estimate_num_insns (stmt, &eni_size_weights);
232 » bool likely_eliminated = false;
233
234 » if (dump_file && (dump_flags & TDF_DETAILS))
235 » {
236 » fprintf (dump_file, " size: %3i ", num);
237 » print_gimple_stmt (dump_file, gsi_stmt (gsi), 0, 0);
238 » }
239
240 » /* Look for reasons why we might optimize this stmt away. */
241
242 » /* Exit conditional. */
243 » if (body[i] == exit->src && stmt == last_stmt (exit->src))
244 » {
245 » if (dump_file && (dump_flags & TDF_DETAILS))
246 » fprintf (dump_file, " Exit condition will be eliminated.\n");
247 » likely_eliminated = true;
248 » }
249 » /* Sets of IV variables */
250 » else if (gimple_code (stmt) == GIMPLE_ASSIGN
251 » && constant_after_peeling (gimple_assign_lhs (stmt), stmt, loop))
252 » {
253 » if (dump_file && (dump_flags & TDF_DETAILS))
254 » fprintf (dump_file, " Induction variable computation will"
255 » » » " be folded away.\n");
256 » likely_eliminated = true;
257 » }
258 » /* Assignments of IV variables. */
259 » else if (gimple_code (stmt) == GIMPLE_ASSIGN
260 » » && TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME
261 » » && constant_after_peeling (gimple_assign_rhs1 (stmt), stmt,lo op)
262 » » && (gimple_assign_rhs_class (stmt) != GIMPLE_BINARY_RHS
263 » » || constant_after_peeling (gimple_assign_rhs2 (stmt),
264 » » »» » » stmt, loop)))
265 » {
266 » if (dump_file && (dump_flags & TDF_DETAILS))
267 » fprintf (dump_file, " Constant expression will be folded away. \n");
268 » likely_eliminated = true;
269 » }
270 » /* Conditionals. */
271 » else if (gimple_code (stmt) == GIMPLE_COND
272 » » && constant_after_peeling (gimple_cond_lhs (stmt), stmt, loop )
273 » » && constant_after_peeling (gimple_cond_rhs (stmt), stmt, loop ))
274 » {
275 » if (dump_file && (dump_flags & TDF_DETAILS))
276 » fprintf (dump_file, " Constant conditional.\n");
277 » likely_eliminated = true;
278 » }
279
280 » size->overall += num;
281 » if (likely_eliminated)
282 » size->eliminated_by_peeling += num;
283 » if (!after_exit)
284 » {
285 » size->last_iteration += num;
286 » if (likely_eliminated)
287 » » size->last_iteration_eliminated_by_peeling += num;
288 » }
289 » }
290 }
291 if (dump_file && (dump_flags & TDF_DETAILS))
292 fprintf (dump_file, "size: %i-%i, last_iteration: %i-%i\n", size->overall,
293 » size->eliminated_by_peeling, size->last_iteration,
294 » size->last_iteration_eliminated_by_peeling);
295
296 free (body);
297 }
298
299 /* Estimate number of insns of completely unrolled loop.
300 It is (NUNROLL + 1) * size of loop body with taking into account
301 the fact that in last copy everything after exit conditional
302 is dead and that some instructions will be eliminated after
303 peeling.
304
305 Loop body is likely going to simplify futher, this is difficult
306 to guess, we just decrease the result by 1/3. */
144 307
145 static unsigned HOST_WIDE_INT 308 static unsigned HOST_WIDE_INT
146 estimated_unrolled_size (unsigned HOST_WIDE_INT ninsns, 309 estimated_unrolled_size (struct loop_size *size,
147 unsigned HOST_WIDE_INT nunroll) 310 unsigned HOST_WIDE_INT nunroll)
148 { 311 {
149 HOST_WIDE_INT unr_insns = 2 * ((HOST_WIDE_INT) ninsns - 4) / 3; 312 HOST_WIDE_INT unr_insns = ((nunroll)
313 » » » * (HOST_WIDE_INT) (size->overall
314 » » » » » » - size->eliminated_by_peeling));
315 if (!nunroll)
316 unr_insns = 0;
317 unr_insns += size->last_iteration - size->last_iteration_eliminated_by_peeling ;
318
319 unr_insns = unr_insns * 2 / 3;
150 if (unr_insns <= 0) 320 if (unr_insns <= 0)
151 unr_insns = 1; 321 unr_insns = 1;
152 unr_insns *= (nunroll + 1);
153 322
154 return unr_insns; 323 return unr_insns;
155 } 324 }
156 325
157 /* Tries to unroll LOOP completely, i.e. NITER times. 326 /* Tries to unroll LOOP completely, i.e. NITER times.
158 UL determines which loops we are allowed to unroll. 327 UL determines which loops we are allowed to unroll.
159 EXIT is the exit of the loop that should be eliminated. */ 328 EXIT is the exit of the loop that should be eliminated. */
160 329
161 static bool 330 static bool
162 try_unroll_loop_completely (struct loop *loop, 331 try_unroll_loop_completely (struct loop *loop,
163 edge exit, tree niter, 332 edge exit, tree niter,
164 enum unroll_level ul) 333 enum unroll_level ul)
165 { 334 {
166 unsigned HOST_WIDE_INT n_unroll, ninsns, max_unroll, unr_insns; 335 unsigned HOST_WIDE_INT n_unroll, ninsns, max_unroll, unr_insns;
167 gimple cond; 336 gimple cond;
337 struct loop_size size;
168 338
169 if (loop->inner) 339 if (loop->inner)
170 return false; 340 return false;
171 341
172 if (!host_integerp (niter, 1)) 342 if (!host_integerp (niter, 1))
173 return false; 343 return false;
174 n_unroll = tree_low_cst (niter, 1); 344 n_unroll = tree_low_cst (niter, 1);
175 345
176 max_unroll = PARAM_VALUE (PARAM_MAX_COMPLETELY_PEEL_TIMES); 346 max_unroll = PARAM_VALUE (PARAM_MAX_COMPLETELY_PEEL_TIMES);
177 if (n_unroll > max_unroll) 347 if (n_unroll > max_unroll)
178 return false; 348 return false;
179 349
180 if (n_unroll) 350 if (n_unroll)
181 { 351 {
182 if (ul == UL_SINGLE_ITER) 352 if (ul == UL_SINGLE_ITER)
183 return false; 353 return false;
184 354
185 ninsns = tree_num_loop_insns (loop, &eni_size_weights); 355 tree_estimate_loop_size (loop, exit, &size);
356 ninsns = size.overall;
186 357
187 unr_insns = estimated_unrolled_size (ninsns, n_unroll); 358 unr_insns = estimated_unrolled_size (&size, n_unroll);
188 if (dump_file && (dump_flags & TDF_DETAILS)) 359 if (dump_file && (dump_flags & TDF_DETAILS))
189 { 360 {
190 fprintf (dump_file, " Loop size: %d\n", (int) ninsns); 361 fprintf (dump_file, " Loop size: %d\n", (int) ninsns);
191 fprintf (dump_file, " Estimated size after unrolling: %d\n", 362 fprintf (dump_file, " Estimated size after unrolling: %d\n",
192 (int) unr_insns); 363 (int) unr_insns);
193 } 364 }
194 365
195 if (unr_insns > ninsns 366 if (unr_insns > ninsns
196 && (unr_insns 367 && (unr_insns
197 > (unsigned) PARAM_VALUE (PARAM_MAX_COMPLETELY_PEELED_INSNS))) 368 > (unsigned) PARAM_VALUE (PARAM_MAX_COMPLETELY_PEELED_INSNS)))
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after
254 update_stmt (cond); 425 update_stmt (cond);
255 update_ssa (TODO_update_ssa); 426 update_ssa (TODO_update_ssa);
256 427
257 if (dump_file && (dump_flags & TDF_DETAILS)) 428 if (dump_file && (dump_flags & TDF_DETAILS))
258 fprintf (dump_file, "Unrolled loop %d completely.\n", loop->num); 429 fprintf (dump_file, "Unrolled loop %d completely.\n", loop->num);
259 430
260 return true; 431 return true;
261 } 432 }
262 433
263 /* Adds a canonical induction variable to LOOP if suitable. 434 /* Adds a canonical induction variable to LOOP if suitable.
264 CREATE_IV is true if we may create a new iv. UL determines 435 CREATE_IV is true if we may create a new iv. UL determines
265 which loops we are allowed to completely unroll. If TRY_EVAL is true, we try 436 which loops we are allowed to completely unroll. If TRY_EVAL is true, we try
266 to determine the number of iterations of a loop by direct evaluation. 437 to determine the number of iterations of a loop by direct evaluation.
267 Returns true if cfg is changed. */ 438 Returns true if cfg is changed. */
268 439
269 static bool 440 static bool
270 canonicalize_loop_induction_variables (struct loop *loop, 441 canonicalize_loop_induction_variables (struct loop *loop,
271 bool create_iv, enum unroll_level ul, 442 bool create_iv, enum unroll_level ul,
272 bool try_eval) 443 bool try_eval)
273 { 444 {
274 edge exit = NULL; 445 edge exit = NULL;
275 tree niter; 446 tree niter;
276 447
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
317 488
318 /* The main entry point of the pass. Adds canonical induction variables 489 /* The main entry point of the pass. Adds canonical induction variables
319 to the suitable loops. */ 490 to the suitable loops. */
320 491
321 unsigned int 492 unsigned int
322 canonicalize_induction_variables (void) 493 canonicalize_induction_variables (void)
323 { 494 {
324 loop_iterator li; 495 loop_iterator li;
325 struct loop *loop; 496 struct loop *loop;
326 bool changed = false; 497 bool changed = false;
327 498
328 FOR_EACH_LOOP (li, loop, 0) 499 FOR_EACH_LOOP (li, loop, 0)
329 { 500 {
330 changed |= canonicalize_loop_induction_variables (loop, 501 changed |= canonicalize_loop_induction_variables (loop,
331 true, UL_SINGLE_ITER, 502 true, UL_SINGLE_ITER,
332 true); 503 true);
333 } 504 }
334 505
335 /* Clean up the information about numbers of iterations, since brute force 506 /* Clean up the information about numbers of iterations, since brute force
336 evaluation could reveal new information. */ 507 evaluation could reveal new information. */
337 scev_reset (); 508 scev_reset ();
338 509
339 if (changed) 510 if (changed)
340 return TODO_cleanup_cfg; 511 return TODO_cleanup_cfg;
341 return 0; 512 return 0;
342 } 513 }
343 514
344 /* Unroll LOOPS completely if they iterate just few times. Unless 515 /* Unroll LOOPS completely if they iterate just few times. Unless
345 MAY_INCREASE_SIZE is true, perform the unrolling only if the 516 MAY_INCREASE_SIZE is true, perform the unrolling only if the
346 size of the code does not increase. */ 517 size of the code does not increase. */
347 518
348 unsigned int 519 unsigned int
349 tree_unroll_loops_completely (bool may_increase_size, bool unroll_outer) 520 tree_unroll_loops_completely (bool may_increase_size, bool unroll_outer)
350 { 521 {
351 loop_iterator li; 522 loop_iterator li;
352 struct loop *loop; 523 struct loop *loop;
353 bool changed; 524 bool changed;
354 enum unroll_level ul; 525 enum unroll_level ul;
526 int iteration = 0;
355 527
356 do 528 do
357 { 529 {
358 changed = false; 530 changed = false;
359 531
360 FOR_EACH_LOOP (li, loop, LI_ONLY_INNERMOST) 532 FOR_EACH_LOOP (li, loop, LI_ONLY_INNERMOST)
361 { 533 {
362 if (may_increase_size && optimize_loop_for_speed_p (loop) 534 if (may_increase_size && optimize_loop_for_speed_p (loop)
363 /* Unroll outermost loops only if asked to do so or they do 535 /* Unroll outermost loops only if asked to do so or they do
364 not cause code growth. */ 536 not cause code growth. */
(...skipping 12 matching lines...) Expand all
377 from the loop structures so we can continue unrolling now 549 from the loop structures so we can continue unrolling now
378 innermost loops. */ 550 innermost loops. */
379 if (cleanup_tree_cfg ()) 551 if (cleanup_tree_cfg ())
380 update_ssa (TODO_update_ssa_only_virtuals); 552 update_ssa (TODO_update_ssa_only_virtuals);
381 553
382 /* Clean up the information about numbers of iterations, since 554 /* Clean up the information about numbers of iterations, since
383 complete unrolling might have invalidated it. */ 555 complete unrolling might have invalidated it. */
384 scev_reset (); 556 scev_reset ();
385 } 557 }
386 } 558 }
387 while (changed); 559 while (changed
560 » && ++iteration <= PARAM_VALUE (PARAM_MAX_UNROLL_ITERATIONS));
388 561
389 return 0; 562 return 0;
390 } 563 }
391
392 /* Checks whether LOOP is empty. */
393
394 static bool
395 empty_loop_p (struct loop *loop)
396 {
397 edge exit;
398 struct tree_niter_desc niter;
399 basic_block *body;
400 gimple_stmt_iterator gsi;
401 unsigned i;
402
403 /* If the loop has multiple exits, it is too hard for us to handle.
404 Similarly, if the exit is not dominating, we cannot determine
405 whether the loop is not infinite. */
406 exit = single_dom_exit (loop);
407 if (!exit)
408 return false;
409
410 /* The loop must be finite. */
411 if (!number_of_iterations_exit (loop, exit, &niter, false))
412 return false;
413
414 /* Values of all loop exit phi nodes must be invariants. */
415 for (gsi = gsi_start(phi_nodes (exit->dest)); !gsi_end_p (gsi); gsi_next (&gsi ))
416 {
417 gimple phi = gsi_stmt (gsi);
418 tree def;
419
420 if (!is_gimple_reg (PHI_RESULT (phi)))
421 continue;
422
423 def = PHI_ARG_DEF_FROM_EDGE (phi, exit);
424
425 if (!expr_invariant_in_loop_p (loop, def))
426 return false;
427 }
428
429 /* And there should be no memory modifying or from other reasons
430 unremovable statements. */
431 body = get_loop_body (loop);
432 for (i = 0; i < loop->num_nodes; i++)
433 {
434 /* Irreducible region might be infinite. */
435 if (body[i]->flags & BB_IRREDUCIBLE_LOOP)
436 {
437 free (body);
438 return false;
439 }
440
441 for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi); gsi_next (&gsi))
442 {
443 gimple stmt = gsi_stmt (gsi);
444
445 if (!ZERO_SSA_OPERANDS (stmt, SSA_OP_VIRTUAL_DEFS)
446 || gimple_has_volatile_ops (stmt))
447 {
448 free (body);
449 return false;
450 }
451
452 /* Also, asm statements and calls may have side effects and we
453 cannot change the number of times they are executed. */
454 switch (gimple_code (stmt))
455 {
456 case GIMPLE_CALL:
457 if (gimple_has_side_effects (stmt))
458 {
459 free (body);
460 return false;
461 }
462 break;
463
464 case GIMPLE_ASM:
465 /* We cannot remove volatile assembler. */
466 if (gimple_asm_volatile_p (stmt))
467 {
468 free (body);
469 return false;
470 }
471 break;
472
473 default:
474 break;
475 }
476 }
477 }
478 free (body);
479
480 return true;
481 }
482
483 /* Remove LOOP by making it exit in the first iteration. */
484
485 static void
486 remove_empty_loop (struct loop *loop)
487 {
488 edge exit = single_dom_exit (loop), non_exit;
489 gimple cond_stmt = last_stmt (exit->src);
490 basic_block *body;
491 unsigned n_before, freq_in, freq_h;
492 gcov_type exit_count = exit->count;
493
494 if (dump_file)
495 fprintf (dump_file, "Removing empty loop %d\n", loop->num);
496
497 non_exit = EDGE_SUCC (exit->src, 0);
498 if (non_exit == exit)
499 non_exit = EDGE_SUCC (exit->src, 1);
500
501 if (exit->flags & EDGE_TRUE_VALUE)
502 gimple_cond_make_true (cond_stmt);
503 else
504 gimple_cond_make_false (cond_stmt);
505 update_stmt (cond_stmt);
506
507 /* Let us set the probabilities of the edges coming from the exit block. */
508 exit->probability = REG_BR_PROB_BASE;
509 non_exit->probability = 0;
510 non_exit->count = 0;
511
512 /* Update frequencies and counts. Everything before
513 the exit needs to be scaled FREQ_IN/FREQ_H times,
514 where FREQ_IN is the frequency of the entry edge
515 and FREQ_H is the frequency of the loop header.
516 Everything after the exit has zero frequency. */
517 freq_h = loop->header->frequency;
518 freq_in = EDGE_FREQUENCY (loop_preheader_edge (loop));
519 if (freq_h != 0)
520 {
521 body = get_loop_body_in_dom_order (loop);
522 for (n_before = 1; n_before <= loop->num_nodes; n_before++)
523 if (body[n_before - 1] == exit->src)
524 break;
525 scale_bbs_frequencies_int (body, n_before, freq_in, freq_h);
526 scale_bbs_frequencies_int (body + n_before, loop->num_nodes - n_before,
527 0, 1);
528 free (body);
529 }
530
531 /* Number of executions of exit is not changed, thus we need to restore
532 the original value. */
533 exit->count = exit_count;
534 }
535
536 /* Removes LOOP if it is empty. Returns true if LOOP is removed. CHANGED
537 is set to true if LOOP or any of its subloops is removed. */
538
539 static bool
540 try_remove_empty_loop (struct loop *loop, bool *changed)
541 {
542 bool nonempty_subloop = false;
543 struct loop *sub;
544
545 /* First, all subloops must be removed. */
546 for (sub = loop->inner; sub; sub = sub->next)
547 nonempty_subloop |= !try_remove_empty_loop (sub, changed);
548
549 if (nonempty_subloop || !empty_loop_p (loop))
550 return false;
551
552 remove_empty_loop (loop);
553 *changed = true;
554 return true;
555 }
556
557 /* Remove the empty loops. */
558
559 unsigned int
560 remove_empty_loops (void)
561 {
562 bool changed = false;
563 struct loop *loop;
564
565 for (loop = current_loops->tree_root->inner; loop; loop = loop->next)
566 try_remove_empty_loop (loop, &changed);
567
568 if (changed)
569 {
570 scev_reset ();
571 return TODO_cleanup_cfg;
572 }
573 return 0;
574 }
575
OLDNEW
« no previous file with comments | « gcc/gcc/tree-ssa-loop-im.c ('k') | gcc/gcc/tree-ssa-loop-prefetch.c » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698