Index: gcc/gcc/sel-sched-ir.c |
diff --git a/gcc/gcc/sel-sched-ir.c b/gcc/gcc/sel-sched-ir.c |
index 6f16d751bee9fc1249e652b3d4138839ba7de9fb..3542bde53ab2dcfa88782b2fbf6a2888ec09f1d1 100644 |
--- a/gcc/gcc/sel-sched-ir.c |
+++ b/gcc/gcc/sel-sched-ir.c |
@@ -1,5 +1,5 @@ |
/* Instruction scheduling pass. Selective scheduler and pipeliner. |
- Copyright (C) 2006, 2007, 2008 Free Software Foundation, Inc. |
+ Copyright (C) 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc. |
This file is part of GCC. |
@@ -122,7 +122,7 @@ static struct |
int n; |
/* Its size. */ |
- int s; |
+ int s; |
} nop_pool = { NULL, 0, 0 }; |
/* The pool for basic block notes. */ |
@@ -134,7 +134,7 @@ rtx nop_pattern = NULL_RTX; |
EXIT_INSN is successor of the insns that lead to EXIT_BLOCK. */ |
rtx exit_insn = NULL_RTX; |
-/* TRUE if while scheduling current region, which is loop, its preheader |
+/* TRUE if while scheduling current region, which is loop, its preheader |
was removed. */ |
bool preheader_removed = false; |
@@ -157,6 +157,7 @@ static void sel_remove_loop_preheader (void); |
static bool insn_is_the_only_one_in_bb_p (insn_t); |
static void create_initial_data_sets (basic_block); |
+static void free_av_set (basic_block); |
static void invalidate_av_set (basic_block); |
static void extend_insn_data (void); |
static void sel_init_new_insn (insn_t, int); |
@@ -258,10 +259,10 @@ init_fence_for_scheduling (fence_t f) |
/* Add new fence consisting of INSN and STATE to the list pointed to by LP. */ |
static void |
-flist_add (flist_t *lp, insn_t insn, state_t state, deps_t dc, void *tc, |
- insn_t last_scheduled_insn, VEC(rtx,gc) *executing_insns, |
- int *ready_ticks, int ready_ticks_size, insn_t sched_next, |
- int cycle, int cycle_issued_insns, |
+flist_add (flist_t *lp, insn_t insn, state_t state, deps_t dc, void *tc, |
+ insn_t last_scheduled_insn, VEC(rtx,gc) *executing_insns, |
+ int *ready_ticks, int ready_ticks_size, insn_t sched_next, |
+ int cycle, int cycle_issued_insns, int issue_more, |
bool starts_cycle_p, bool after_stall_p) |
{ |
fence_t f; |
@@ -286,6 +287,7 @@ flist_add (flist_t *lp, insn_t insn, state_t state, deps_t dc, void *tc, |
FENCE_TC (f) = tc; |
FENCE_LAST_SCHEDULED_INSN (f) = last_scheduled_insn; |
+ FENCE_ISSUE_MORE (f) = issue_more; |
FENCE_EXECUTING_INSNS (f) = executing_insns; |
FENCE_READY_TICKS (f) = ready_ticks; |
FENCE_READY_TICKS_SIZE (f) = ready_ticks_size; |
@@ -316,7 +318,7 @@ void |
def_list_add (def_list_t *dl, insn_t original_insn, bool crosses_call) |
{ |
def_t d; |
- |
+ |
_list_add (dl); |
d = DEF_LIST_DEF (*dl); |
@@ -327,11 +329,11 @@ def_list_add (def_list_t *dl, insn_t original_insn, bool crosses_call) |
/* Functions to work with target contexts. */ |
-/* Bulk target context. It is convenient for debugging purposes to ensure |
+/* Bulk target context. It is convenient for debugging purposes to ensure |
that there are no uninitialized (null) target contexts. */ |
static tc_t bulk_tc = (tc_t) 1; |
-/* Target hooks wrappers. In the future we can provide some default |
+/* Target hooks wrappers. In the future we can provide some default |
implementations for them. */ |
/* Allocate a store for the target context. */ |
@@ -423,8 +425,8 @@ reset_target_context (tc_t tc, bool clean_p) |
init_target_context (tc, clean_p); |
} |
-/* Functions to work with dependence contexts. |
- Dc (aka deps context, aka deps_t, aka struct deps *) is short for dependence |
+/* Functions to work with dependence contexts. |
+ Dc (aka deps context, aka deps_t, aka struct deps_desc *) is short for dependence |
context. It accumulates information about processed insns to decide if |
current insn is dependent on the processed ones. */ |
@@ -432,7 +434,7 @@ reset_target_context (tc_t tc, bool clean_p) |
static void |
copy_deps_context (deps_t to, deps_t from) |
{ |
- init_deps (to); |
+ init_deps (to, false); |
deps_join (to, from); |
} |
@@ -440,7 +442,7 @@ copy_deps_context (deps_t to, deps_t from) |
static deps_t |
alloc_deps_context (void) |
{ |
- return XNEW (struct deps); |
+ return XNEW (struct deps_desc); |
} |
/* Allocate and initialize dep context. */ |
@@ -449,7 +451,7 @@ create_deps_context (void) |
{ |
deps_t dc = alloc_deps_context (); |
- init_deps (dc); |
+ init_deps (dc, false); |
return dc; |
} |
@@ -483,10 +485,10 @@ static void |
reset_deps_context (deps_t dc) |
{ |
clear_deps_context (dc); |
- init_deps (dc); |
+ init_deps (dc, false); |
} |
-/* This structure describes the dependence analysis hooks for advancing |
+/* This structure describes the dependence analysis hooks for advancing |
dependence context. */ |
static struct sched_deps_info_def advance_deps_context_sched_deps_info = |
{ |
@@ -597,11 +599,11 @@ init_fences (insn_t old_fence) |
succ_iterator si; |
bool first = true; |
int ready_ticks_size = get_max_uid () + 1; |
- |
- FOR_EACH_SUCC_1 (succ, si, old_fence, |
+ |
+ FOR_EACH_SUCC_1 (succ, si, old_fence, |
SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS) |
{ |
- |
+ |
if (first) |
first = false; |
else |
@@ -611,53 +613,54 @@ init_fences (insn_t old_fence) |
state_create (), |
create_deps_context () /* dc */, |
create_target_context (true) /* tc */, |
- NULL_RTX /* last_scheduled_insn */, |
+ NULL_RTX /* last_scheduled_insn */, |
NULL, /* executing_insns */ |
XCNEWVEC (int, ready_ticks_size), /* ready_ticks */ |
ready_ticks_size, |
NULL_RTX /* sched_next */, |
- 1 /* cycle */, 0 /* cycle_issued_insns */, |
- 1 /* starts_cycle_p */, 0 /* after_stall_p */); |
+ 1 /* cycle */, 0 /* cycle_issued_insns */, |
+ issue_rate, /* issue_more */ |
+ 1 /* starts_cycle_p */, 0 /* after_stall_p */); |
} |
} |
/* Merges two fences (filling fields of fence F with resulting values) by |
following rules: 1) state, target context and last scheduled insn are |
- propagated from fallthrough edge if it is available; |
+ propagated from fallthrough edge if it is available; |
2) deps context and cycle is propagated from more probable edge; |
- 3) all other fields are set to corresponding constant values. |
+ 3) all other fields are set to corresponding constant values. |
- INSN, STATE, DC, TC, LAST_SCHEDULED_INSN, EXECUTING_INSNS, |
- READY_TICKS, READY_TICKS_SIZE, SCHED_NEXT, CYCLE and AFTER_STALL_P |
- are the corresponding fields of the second fence. */ |
+ INSN, STATE, DC, TC, LAST_SCHEDULED_INSN, EXECUTING_INSNS, |
+ READY_TICKS, READY_TICKS_SIZE, SCHED_NEXT, CYCLE, ISSUE_MORE |
+ and AFTER_STALL_P are the corresponding fields of the second fence. */ |
static void |
merge_fences (fence_t f, insn_t insn, |
- state_t state, deps_t dc, void *tc, |
+ state_t state, deps_t dc, void *tc, |
rtx last_scheduled_insn, VEC(rtx, gc) *executing_insns, |
int *ready_ticks, int ready_ticks_size, |
- rtx sched_next, int cycle, bool after_stall_p) |
+ rtx sched_next, int cycle, int issue_more, bool after_stall_p) |
{ |
insn_t last_scheduled_insn_old = FENCE_LAST_SCHEDULED_INSN (f); |
gcc_assert (sel_bb_head_p (FENCE_INSN (f)) |
&& !sched_next && !FENCE_SCHED_NEXT (f)); |
- /* Check if we can decide which path fences came. |
+ /* Check if we can decide which path fences came. |
If we can't (or don't want to) - reset all. */ |
if (last_scheduled_insn == NULL |
|| last_scheduled_insn_old == NULL |
- /* This is a case when INSN is reachable on several paths from |
- one insn (this can happen when pipelining of outer loops is on and |
- there are two edges: one going around of inner loop and the other - |
+ /* This is a case when INSN is reachable on several paths from |
+ one insn (this can happen when pipelining of outer loops is on and |
+ there are two edges: one going around of inner loop and the other - |
right through it; in such case just reset everything). */ |
|| last_scheduled_insn == last_scheduled_insn_old) |
{ |
state_reset (FENCE_STATE (f)); |
state_free (state); |
- |
+ |
reset_deps_context (FENCE_DC (f)); |
delete_deps_context (dc); |
- |
+ |
reset_target_context (FENCE_TC (f), true); |
delete_target_context (tc); |
@@ -665,10 +668,11 @@ merge_fences (fence_t f, insn_t insn, |
FENCE_CYCLE (f) = cycle; |
FENCE_LAST_SCHEDULED_INSN (f) = NULL; |
+ FENCE_ISSUE_MORE (f) = issue_rate; |
VEC_free (rtx, gc, executing_insns); |
free (ready_ticks); |
if (FENCE_EXECUTING_INSNS (f)) |
- VEC_block_remove (rtx, FENCE_EXECUTING_INSNS (f), 0, |
+ VEC_block_remove (rtx, FENCE_EXECUTING_INSNS (f), 0, |
VEC_length (rtx, FENCE_EXECUTING_INSNS (f))); |
if (FENCE_READY_TICKS (f)) |
memset (FENCE_READY_TICKS (f), 0, FENCE_READY_TICKS_SIZE (f)); |
@@ -679,7 +683,7 @@ merge_fences (fence_t f, insn_t insn, |
edge candidate; |
succ_iterator si; |
insn_t succ; |
- |
+ |
/* Find fallthrough edge. */ |
gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb); |
candidate = find_fallthru_edge (BLOCK_FOR_INSN (insn)->prev_bb); |
@@ -691,16 +695,17 @@ merge_fences (fence_t f, insn_t insn, |
/* No fallthrough edge leading to basic block of INSN. */ |
state_reset (FENCE_STATE (f)); |
state_free (state); |
- |
+ |
reset_target_context (FENCE_TC (f), true); |
delete_target_context (tc); |
- |
+ |
FENCE_LAST_SCHEDULED_INSN (f) = NULL; |
+ FENCE_ISSUE_MORE (f) = issue_rate; |
} |
else |
if (candidate->src == BLOCK_FOR_INSN (last_scheduled_insn)) |
{ |
- /* Would be weird if same insn is successor of several fallthrough |
+ /* Would be weird if same insn is successor of several fallthrough |
edges. */ |
gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb |
!= BLOCK_FOR_INSN (last_scheduled_insn_old)); |
@@ -712,6 +717,7 @@ merge_fences (fence_t f, insn_t insn, |
FENCE_TC (f) = tc; |
FENCE_LAST_SCHEDULED_INSN (f) = last_scheduled_insn; |
+ FENCE_ISSUE_MORE (f) = issue_more; |
} |
else |
{ |
@@ -753,10 +759,10 @@ merge_fences (fence_t f, insn_t insn, |
delete_deps_context (dc); |
VEC_free (rtx, gc, executing_insns); |
free (ready_ticks); |
- |
+ |
FENCE_CYCLE (f) = MAX (FENCE_CYCLE (f), cycle); |
if (FENCE_EXECUTING_INSNS (f)) |
- VEC_block_remove (rtx, FENCE_EXECUTING_INSNS (f), 0, |
+ VEC_block_remove (rtx, FENCE_EXECUTING_INSNS (f), 0, |
VEC_length (rtx, FENCE_EXECUTING_INSNS (f))); |
if (FENCE_READY_TICKS (f)) |
memset (FENCE_READY_TICKS (f), 0, FENCE_READY_TICKS_SIZE (f)); |
@@ -791,32 +797,33 @@ merge_fences (fence_t f, insn_t insn, |
FENCE_SCHED_NEXT (f) = NULL; |
} |
-/* Add a new fence to NEW_FENCES list, initializing it from all |
+/* Add a new fence to NEW_FENCES list, initializing it from all |
other parameters. */ |
static void |
add_to_fences (flist_tail_t new_fences, insn_t insn, |
- state_t state, deps_t dc, void *tc, rtx last_scheduled_insn, |
- VEC(rtx, gc) *executing_insns, int *ready_ticks, |
- int ready_ticks_size, rtx sched_next, int cycle, |
- int cycle_issued_insns, bool starts_cycle_p, bool after_stall_p) |
+ state_t state, deps_t dc, void *tc, rtx last_scheduled_insn, |
+ VEC(rtx, gc) *executing_insns, int *ready_ticks, |
+ int ready_ticks_size, rtx sched_next, int cycle, |
+ int cycle_issued_insns, int issue_rate, |
+ bool starts_cycle_p, bool after_stall_p) |
{ |
fence_t f = flist_lookup (FLIST_TAIL_HEAD (new_fences), insn); |
if (! f) |
{ |
flist_add (FLIST_TAIL_TAILP (new_fences), insn, state, dc, tc, |
- last_scheduled_insn, executing_insns, ready_ticks, |
+ last_scheduled_insn, executing_insns, ready_ticks, |
ready_ticks_size, sched_next, cycle, cycle_issued_insns, |
- starts_cycle_p, after_stall_p); |
+ issue_rate, starts_cycle_p, after_stall_p); |
FLIST_TAIL_TAILP (new_fences) |
= &FLIST_NEXT (*FLIST_TAIL_TAILP (new_fences)); |
} |
else |
{ |
- merge_fences (f, insn, state, dc, tc, last_scheduled_insn, |
- executing_insns, ready_ticks, ready_ticks_size, |
- sched_next, cycle, after_stall_p); |
+ merge_fences (f, insn, state, dc, tc, last_scheduled_insn, |
+ executing_insns, ready_ticks, ready_ticks_size, |
+ sched_next, cycle, issue_rate, after_stall_p); |
} |
} |
@@ -828,14 +835,14 @@ move_fence_to_fences (flist_t old_fences, flist_tail_t new_fences) |
flist_t *tailp = FLIST_TAIL_TAILP (new_fences); |
old = FLIST_FENCE (old_fences); |
- f = flist_lookup (FLIST_TAIL_HEAD (new_fences), |
+ f = flist_lookup (FLIST_TAIL_HEAD (new_fences), |
FENCE_INSN (FLIST_FENCE (old_fences))); |
if (f) |
{ |
merge_fences (f, old->insn, old->state, old->dc, old->tc, |
old->last_scheduled_insn, old->executing_insns, |
old->ready_ticks, old->ready_ticks_size, |
- old->sched_next, old->cycle, |
+ old->sched_next, old->cycle, old->issue_more, |
old->after_stall_p); |
} |
else |
@@ -848,43 +855,44 @@ move_fence_to_fences (flist_t old_fences, flist_tail_t new_fences) |
FENCE_INSN (old) = NULL; |
} |
-/* Add a new fence to NEW_FENCES list and initialize most of its data |
+/* Add a new fence to NEW_FENCES list and initialize most of its data |
as a clean one. */ |
void |
add_clean_fence_to_fences (flist_tail_t new_fences, insn_t succ, fence_t fence) |
{ |
int ready_ticks_size = get_max_uid () + 1; |
- |
+ |
add_to_fences (new_fences, |
succ, state_create (), create_deps_context (), |
create_target_context (true), |
- NULL_RTX, NULL, |
+ NULL_RTX, NULL, |
XCNEWVEC (int, ready_ticks_size), ready_ticks_size, |
NULL_RTX, FENCE_CYCLE (fence) + 1, |
- 0, 1, FENCE_AFTER_STALL_P (fence)); |
+ 0, issue_rate, 1, FENCE_AFTER_STALL_P (fence)); |
} |
-/* Add a new fence to NEW_FENCES list and initialize all of its data |
+/* Add a new fence to NEW_FENCES list and initialize all of its data |
from FENCE and SUCC. */ |
void |
add_dirty_fence_to_fences (flist_tail_t new_fences, insn_t succ, fence_t fence) |
{ |
- int * new_ready_ticks |
+ int * new_ready_ticks |
= XNEWVEC (int, FENCE_READY_TICKS_SIZE (fence)); |
- |
+ |
memcpy (new_ready_ticks, FENCE_READY_TICKS (fence), |
FENCE_READY_TICKS_SIZE (fence) * sizeof (int)); |
add_to_fences (new_fences, |
succ, state_create_copy (FENCE_STATE (fence)), |
create_copy_of_deps_context (FENCE_DC (fence)), |
create_copy_of_target_context (FENCE_TC (fence)), |
- FENCE_LAST_SCHEDULED_INSN (fence), |
+ FENCE_LAST_SCHEDULED_INSN (fence), |
VEC_copy (rtx, gc, FENCE_EXECUTING_INSNS (fence)), |
new_ready_ticks, |
FENCE_READY_TICKS_SIZE (fence), |
FENCE_SCHED_NEXT (fence), |
FENCE_CYCLE (fence), |
FENCE_ISSUED_INSNS (fence), |
+ FENCE_ISSUE_MORE (fence), |
FENCE_STARTS_CYCLE_P (fence), |
FENCE_AFTER_STALL_P (fence)); |
} |
@@ -958,19 +966,19 @@ free_regset_pool (void) |
regset *v = regset_pool.v; |
int i = 0; |
int n = regset_pool.n; |
- |
+ |
regset *vv = regset_pool.vv; |
int ii = 0; |
int nn = regset_pool.nn; |
- |
+ |
int diff = 0; |
- |
+ |
gcc_assert (n <= nn); |
- |
+ |
/* Sort both vectors so it will be possible to compare them. */ |
qsort (v, n, sizeof (*v), cmp_v_in_regset_pool); |
qsort (vv, nn, sizeof (*vv), cmp_v_in_regset_pool); |
- |
+ |
while (ii < nn) |
{ |
if (v[i] == vv[ii]) |
@@ -978,17 +986,17 @@ free_regset_pool (void) |
else |
/* VV[II] was lost. */ |
diff++; |
- |
+ |
ii++; |
} |
- |
+ |
gcc_assert (diff == regset_pool.diff); |
} |
#endif |
- |
+ |
/* If not true - we have a memory leak. */ |
gcc_assert (regset_pool.diff == 0); |
- |
+ |
while (regset_pool.n) |
{ |
--regset_pool.n; |
@@ -998,7 +1006,7 @@ free_regset_pool (void) |
free (regset_pool.v); |
regset_pool.v = NULL; |
regset_pool.s = 0; |
- |
+ |
free (regset_pool.vv); |
regset_pool.vv = NULL; |
regset_pool.nn = 0; |
@@ -1008,8 +1016,8 @@ free_regset_pool (void) |
} |
-/* Functions to work with nop pools. NOP insns are used as temporary |
- placeholders of the insns being scheduled to allow correct update of |
+/* Functions to work with nop pools. NOP insns are used as temporary |
+ placeholders of the insns being scheduled to allow correct update of |
the data sets. When update is finished, NOPs are deleted. */ |
/* A vinsn that is used to represent a nop. This vinsn is shared among all |
@@ -1044,13 +1052,13 @@ get_nop_from_pool (insn_t insn) |
/* Remove NOP from the instruction stream and return it to the pool. */ |
void |
-return_nop_to_pool (insn_t nop) |
+return_nop_to_pool (insn_t nop, bool full_tidying) |
{ |
gcc_assert (INSN_IN_STREAM_P (nop)); |
- sel_remove_insn (nop, false, true); |
+ sel_remove_insn (nop, false, full_tidying); |
if (nop_pool.n == nop_pool.s) |
- nop_pool.v = XRESIZEVEC (rtx, nop_pool.v, |
+ nop_pool.v = XRESIZEVEC (rtx, nop_pool.v, |
(nop_pool.s = 2 * nop_pool.s + 1)); |
nop_pool.v[nop_pool.n++] = nop; |
} |
@@ -1066,7 +1074,7 @@ free_nop_pool (void) |
} |
-/* Skip unspec to support ia64 speculation. Called from rtx_equal_p_cb. |
+/* Skip unspec to support ia64 speculation. Called from rtx_equal_p_cb. |
The callback is given two rtxes XX and YY and writes the new rtxes |
to NX and NY in case some needs to be skipped. */ |
static int |
@@ -1074,7 +1082,7 @@ skip_unspecs_callback (const_rtx *xx, const_rtx *yy, rtx *nx, rtx* ny) |
{ |
const_rtx x = *xx; |
const_rtx y = *yy; |
- |
+ |
if (GET_CODE (x) == UNSPEC |
&& (targetm.sched.skip_rtx_p == NULL |
|| targetm.sched.skip_rtx_p (x))) |
@@ -1083,7 +1091,7 @@ skip_unspecs_callback (const_rtx *xx, const_rtx *yy, rtx *nx, rtx* ny) |
*ny = CONST_CAST_RTX (y); |
return 1; |
} |
- |
+ |
if (GET_CODE (y) == UNSPEC |
&& (targetm.sched.skip_rtx_p == NULL |
|| targetm.sched.skip_rtx_p (y))) |
@@ -1092,26 +1100,26 @@ skip_unspecs_callback (const_rtx *xx, const_rtx *yy, rtx *nx, rtx* ny) |
*ny = XVECEXP (y, 0, 0); |
return 1; |
} |
- |
+ |
return 0; |
} |
-/* Callback, called from hash_rtx_cb. Helps to hash UNSPEC rtx X in a correct way |
+/* Callback, called from hash_rtx_cb. Helps to hash UNSPEC rtx X in a correct way |
to support ia64 speculation. When changes are needed, new rtx X and new mode |
NMODE are written, and the callback returns true. */ |
static int |
hash_with_unspec_callback (const_rtx x, enum machine_mode mode ATTRIBUTE_UNUSED, |
rtx *nx, enum machine_mode* nmode) |
{ |
- if (GET_CODE (x) == UNSPEC |
+ if (GET_CODE (x) == UNSPEC |
&& targetm.sched.skip_rtx_p |
&& targetm.sched.skip_rtx_p (x)) |
{ |
*nx = XVECEXP (x, 0 ,0); |
- *nmode = 0; |
+ *nmode = VOIDmode; |
return 1; |
} |
- |
+ |
return 0; |
} |
@@ -1122,9 +1130,9 @@ lhs_and_rhs_separable_p (rtx lhs, rtx rhs) |
if (lhs == NULL || rhs == NULL) |
return false; |
- /* Do not schedule CONST, CONST_INT and CONST_DOUBLE etc as rhs: no point |
- to use reg, if const can be used. Moreover, scheduling const as rhs may |
- lead to mode mismatch cause consts don't have modes but they could be |
+ /* Do not schedule CONST, CONST_INT and CONST_DOUBLE etc as rhs: no point |
+ to use reg, if const can be used. Moreover, scheduling const as rhs may |
+ lead to mode mismatch cause consts don't have modes but they could be |
merged from branches where the same const used in different modes. */ |
if (CONSTANT_P (rhs)) |
return false; |
@@ -1137,7 +1145,7 @@ lhs_and_rhs_separable_p (rtx lhs, rtx rhs) |
if (REG_P (rhs)) |
return false; |
- /* See comment at find_used_regs_1 (*1) for explanation of this |
+ /* See comment at find_used_regs_1 (*1) for explanation of this |
restriction. */ |
/* FIXME: remove this later. */ |
if (MEM_P (lhs)) |
@@ -1151,8 +1159,8 @@ lhs_and_rhs_separable_p (rtx lhs, rtx rhs) |
return true; |
} |
-/* Initialize vinsn VI for INSN. Only for use from vinsn_create (). When |
- FORCE_UNIQUE_P is true, the resulting vinsn will not be clonable. This is |
+/* Initialize vinsn VI for INSN. Only for use from vinsn_create (). When |
+ FORCE_UNIQUE_P is true, the resulting vinsn will not be clonable. This is |
used e.g. for insns from recovery blocks. */ |
static void |
vinsn_init (vinsn_t vi, insn_t insn, bool force_unique_p) |
@@ -1163,12 +1171,12 @@ vinsn_init (vinsn_t vi, insn_t insn, bool force_unique_p) |
VINSN_INSN_RTX (vi) = insn; |
VINSN_COUNT (vi) = 0; |
vi->cost = -1; |
- |
+ |
if (DF_INSN_UID_SAFE_GET (INSN_UID (insn)) != NULL) |
init_id_from_df (VINSN_ID (vi), insn, force_unique_p); |
else |
deps_init_id (VINSN_ID (vi), insn, force_unique_p); |
- |
+ |
/* Hash vinsn depending on whether it is separable or not. */ |
hrcf = targetm.sched.skip_rtx_p ? hash_with_unspec_callback : NULL; |
if (VINSN_SEPARABLE_P (vi)) |
@@ -1187,7 +1195,7 @@ vinsn_init (vinsn_t vi, insn_t insn, bool force_unique_p) |
NULL, NULL, false, hrcf); |
VINSN_HASH_RTX (vi) = VINSN_HASH (vi); |
} |
- |
+ |
insn_class = haifa_classify_insn (insn); |
if (insn_class >= 2 |
&& (!targetm.sched.get_insn_spec_ds |
@@ -1208,7 +1216,7 @@ vinsn_attach (vinsn_t vi) |
VINSN_COUNT (vi)++; |
} |
-/* Create and init VI from the INSN. Use UNIQUE_P for determining the correct |
+/* Create and init VI from the INSN. Use UNIQUE_P for determining the correct |
VINSN_TYPE (VI). */ |
static vinsn_t |
vinsn_create (insn_t insn, bool force_unique_p) |
@@ -1221,13 +1229,13 @@ vinsn_create (insn_t insn, bool force_unique_p) |
/* Return a copy of VI. When REATTACH_P is true, detach VI and attach |
the copy. */ |
-vinsn_t |
+vinsn_t |
vinsn_copy (vinsn_t vi, bool reattach_p) |
{ |
rtx copy; |
bool unique = VINSN_UNIQUE_P (vi); |
vinsn_t new_vi; |
- |
+ |
copy = create_copy_of_insn_rtx (VINSN_INSN_RTX (vi)); |
new_vi = create_vinsn_from_insn_rtx (copy, unique); |
if (reattach_p) |
@@ -1252,7 +1260,7 @@ vinsn_delete (vinsn_t vi) |
free (vi); |
} |
-/* Indicate that VI is no longer a part of some rtx object. |
+/* Indicate that VI is no longer a part of some rtx object. |
Remove VI if it is no longer needed. */ |
void |
vinsn_detach (vinsn_t vi) |
@@ -1359,21 +1367,21 @@ sel_gen_recovery_insn_from_rtx_after (rtx pattern, expr_t expr, int seqno, |
} |
/* Emit new insn after AFTER based on EXPR and SEQNO. If VINSN is not NULL, |
- take it as a new vinsn instead of EXPR's vinsn. |
- We simplify insns later, after scheduling region in |
+ take it as a new vinsn instead of EXPR's vinsn. |
+ We simplify insns later, after scheduling region in |
simplify_changed_insns. */ |
insn_t |
-sel_gen_insn_from_expr_after (expr_t expr, vinsn_t vinsn, int seqno, |
+sel_gen_insn_from_expr_after (expr_t expr, vinsn_t vinsn, int seqno, |
insn_t after) |
{ |
expr_t emit_expr; |
insn_t insn; |
int flags; |
- |
- emit_expr = set_insn_init (expr, vinsn ? vinsn : EXPR_VINSN (expr), |
+ |
+ emit_expr = set_insn_init (expr, vinsn ? vinsn : EXPR_VINSN (expr), |
seqno); |
insn = EXPR_INSN_RTX (emit_expr); |
- add_insn_after (insn, after, BLOCK_FOR_INSN (insn)); |
+ add_insn_after (insn, after, BLOCK_FOR_INSN (insn)); |
flags = INSN_INIT_TODO_SSID; |
if (INSN_LUID (insn) == 0) |
@@ -1403,7 +1411,7 @@ sel_move_insn (expr_t expr, int seqno, insn_t after) |
df_insn_change_bb (insn, bb); |
if (BB_END (bb) == after) |
BB_END (bb) = insn; |
- |
+ |
prepare_insn_expr (insn, seqno); |
return insn; |
} |
@@ -1411,14 +1419,14 @@ sel_move_insn (expr_t expr, int seqno, insn_t after) |
/* Functions to work with right-hand sides. */ |
-/* Search for a hash value determined by UID/NEW_VINSN in a sorted vector |
+/* Search for a hash value determined by UID/NEW_VINSN in a sorted vector |
VECT and return true when found. Use NEW_VINSN for comparison only when |
- COMPARE_VINSNS is true. Write to INDP the index on which |
- the search has stopped, such that inserting the new element at INDP will |
+ COMPARE_VINSNS is true. Write to INDP the index on which |
+ the search has stopped, such that inserting the new element at INDP will |
retain VECT's sort order. */ |
static bool |
-find_in_history_vect_1 (VEC(expr_history_def, heap) *vect, |
- unsigned uid, vinsn_t new_vinsn, |
+find_in_history_vect_1 (VEC(expr_history_def, heap) *vect, |
+ unsigned uid, vinsn_t new_vinsn, |
bool compare_vinsns, int *indp) |
{ |
expr_history_def *arr; |
@@ -1436,14 +1444,14 @@ find_in_history_vect_1 (VEC(expr_history_def, heap) *vect, |
while (i <= j) |
{ |
unsigned auid = arr[i].uid; |
- vinsn_t avinsn = arr[i].new_expr_vinsn; |
+ vinsn_t avinsn = arr[i].new_expr_vinsn; |
if (auid == uid |
- /* When undoing transformation on a bookkeeping copy, the new vinsn |
- may not be exactly equal to the one that is saved in the vector. |
+ /* When undoing transformation on a bookkeeping copy, the new vinsn |
+ may not be exactly equal to the one that is saved in the vector. |
This is because the insn whose copy we're checking was possibly |
substituted itself. */ |
- && (! compare_vinsns |
+ && (! compare_vinsns |
|| vinsn_equal_p (avinsn, new_vinsn))) |
{ |
*indp = i; |
@@ -1458,16 +1466,16 @@ find_in_history_vect_1 (VEC(expr_history_def, heap) *vect, |
return false; |
} |
-/* Search for a uid of INSN and NEW_VINSN in a sorted vector VECT. Return |
- the position found or -1, if no such value is in vector. |
+/* Search for a uid of INSN and NEW_VINSN in a sorted vector VECT. Return |
+ the position found or -1, if no such value is in vector. |
Search also for UIDs of insn's originators, if ORIGINATORS_P is true. */ |
int |
-find_in_history_vect (VEC(expr_history_def, heap) *vect, rtx insn, |
+find_in_history_vect (VEC(expr_history_def, heap) *vect, rtx insn, |
vinsn_t new_vinsn, bool originators_p) |
{ |
int ind; |
- if (find_in_history_vect_1 (vect, INSN_UID (insn), new_vinsn, |
+ if (find_in_history_vect_1 (vect, INSN_UID (insn), new_vinsn, |
false, &ind)) |
return ind; |
@@ -1480,18 +1488,18 @@ find_in_history_vect (VEC(expr_history_def, heap) *vect, rtx insn, |
if (find_in_history_vect_1 (vect, uid, new_vinsn, false, &ind)) |
return ind; |
} |
- |
+ |
return -1; |
} |
-/* Insert new element in a sorted history vector pointed to by PVECT, |
- if it is not there already. The element is searched using |
+/* Insert new element in a sorted history vector pointed to by PVECT, |
+ if it is not there already. The element is searched using |
UID/NEW_EXPR_VINSN pair. TYPE, OLD_EXPR_VINSN and SPEC_DS save |
the history of a transformation. */ |
void |
insert_in_history_vect (VEC (expr_history_def, heap) **pvect, |
unsigned uid, enum local_trans_type type, |
- vinsn_t old_expr_vinsn, vinsn_t new_expr_vinsn, |
+ vinsn_t old_expr_vinsn, vinsn_t new_expr_vinsn, |
ds_t spec_ds) |
{ |
VEC(expr_history_def, heap) *vect = *pvect; |
@@ -1505,25 +1513,17 @@ insert_in_history_vect (VEC (expr_history_def, heap) **pvect, |
{ |
expr_history_def *phist = VEC_index (expr_history_def, vect, ind); |
- /* When merging, either old vinsns are the *same* or, if not, both |
- old and new vinsns are different pointers. In the latter case, |
- though, new vinsns should be equal. */ |
- gcc_assert (phist->old_expr_vinsn == old_expr_vinsn |
- || (phist->new_expr_vinsn != new_expr_vinsn |
- && (vinsn_equal_p |
- (phist->old_expr_vinsn, old_expr_vinsn)))); |
- |
- /* It is possible that speculation types of expressions that were |
+ /* It is possible that speculation types of expressions that were |
propagated through different paths will be different here. In this |
case, merge the status to get the correct check later. */ |
if (phist->spec_ds != spec_ds) |
phist->spec_ds = ds_max_merge (phist->spec_ds, spec_ds); |
return; |
} |
- |
+ |
temp.uid = uid; |
temp.old_expr_vinsn = old_expr_vinsn; |
- temp.new_expr_vinsn = new_expr_vinsn; |
+ temp.new_expr_vinsn = new_expr_vinsn; |
temp.spec_ds = spec_ds; |
temp.type = type; |
@@ -1542,15 +1542,15 @@ free_history_vect (VEC (expr_history_def, heap) **pvect) |
if (! *pvect) |
return; |
- |
- for (i = 0; |
+ |
+ for (i = 0; |
VEC_iterate (expr_history_def, *pvect, i, phist); |
i++) |
{ |
vinsn_detach (phist->old_expr_vinsn); |
vinsn_detach (phist->new_expr_vinsn); |
} |
- |
+ |
VEC_free (expr_history_def, heap, *pvect); |
*pvect = NULL; |
} |
@@ -1572,7 +1572,7 @@ vinsn_equal_p (vinsn_t x, vinsn_t y) |
return false; |
repcf = targetm.sched.skip_rtx_p ? skip_unspecs_callback : NULL; |
- if (VINSN_SEPARABLE_P (x)) |
+ if (VINSN_SEPARABLE_P (x)) |
{ |
/* Compare RHSes of VINSNs. */ |
gcc_assert (VINSN_RHS (x)); |
@@ -1592,7 +1592,7 @@ static void |
init_expr (expr_t expr, vinsn_t vi, int spec, int use, int priority, |
int sched_times, int orig_bb_index, ds_t spec_done_ds, |
ds_t spec_to_check_ds, int orig_sched_cycle, |
- VEC(expr_history_def, heap) *history, bool target_available, |
+ VEC(expr_history_def, heap) *history, bool target_available, |
bool was_substituted, bool was_renamed, bool needs_spec_check_p, |
bool cant_move) |
{ |
@@ -1633,7 +1633,7 @@ copy_expr (expr_t to, expr_t from) |
expr_history_def *phist; |
temp = VEC_copy (expr_history_def, heap, EXPR_HISTORY_OF_CHANGES (from)); |
- for (i = 0; |
+ for (i = 0; |
VEC_iterate (expr_history_def, temp, i, phist); |
i++) |
{ |
@@ -1642,17 +1642,17 @@ copy_expr (expr_t to, expr_t from) |
} |
} |
- init_expr (to, EXPR_VINSN (from), EXPR_SPEC (from), |
+ init_expr (to, EXPR_VINSN (from), EXPR_SPEC (from), |
EXPR_USEFULNESS (from), EXPR_PRIORITY (from), |
EXPR_SCHED_TIMES (from), EXPR_ORIG_BB_INDEX (from), |
- EXPR_SPEC_DONE_DS (from), EXPR_SPEC_TO_CHECK_DS (from), |
+ EXPR_SPEC_DONE_DS (from), EXPR_SPEC_TO_CHECK_DS (from), |
EXPR_ORIG_SCHED_CYCLE (from), temp, |
- EXPR_TARGET_AVAILABLE (from), EXPR_WAS_SUBSTITUTED (from), |
+ EXPR_TARGET_AVAILABLE (from), EXPR_WAS_SUBSTITUTED (from), |
EXPR_WAS_RENAMED (from), EXPR_NEEDS_SPEC_CHECK_P (from), |
EXPR_CANT_MOVE (from)); |
} |
-/* Same, but the final expr will not ever be in av sets, so don't copy |
+/* Same, but the final expr will not ever be in av sets, so don't copy |
"uninteresting" data such as bitmap cache. */ |
void |
copy_expr_onside (expr_t to, expr_t from) |
@@ -1672,7 +1672,7 @@ prepare_insn_expr (insn_t insn, int seqno) |
{ |
expr_t expr = INSN_EXPR (insn); |
ds_t ds; |
- |
+ |
INSN_SEQNO (insn) = seqno; |
EXPR_ORIG_BB_INDEX (expr) = BLOCK_NUM (insn); |
EXPR_SPEC (expr) = 0; |
@@ -1695,12 +1695,12 @@ prepare_insn_expr (insn_t insn, int seqno) |
} |
/* Update target_available bits when merging exprs TO and FROM. SPLIT_POINT |
- is non-null when expressions are merged from different successors at |
+ is non-null when expressions are merged from different successors at |
a split point. */ |
static void |
update_target_availability (expr_t to, expr_t from, insn_t split_point) |
{ |
- if (EXPR_TARGET_AVAILABLE (to) < 0 |
+ if (EXPR_TARGET_AVAILABLE (to) < 0 |
|| EXPR_TARGET_AVAILABLE (from) < 0) |
EXPR_TARGET_AVAILABLE (to) = -1; |
else |
@@ -1714,9 +1714,9 @@ update_target_availability (expr_t to, expr_t from, insn_t split_point) |
toind = EXPR_ORIG_BB_INDEX (to); |
fromind = EXPR_ORIG_BB_INDEX (from); |
- |
+ |
if (toind && toind == fromind) |
- /* Do nothing -- everything is done in |
+ /* Do nothing -- everything is done in |
merge_with_other_exprs. */ |
; |
else |
@@ -1728,7 +1728,7 @@ update_target_availability (expr_t to, expr_t from, insn_t split_point) |
} |
/* Update speculation bits when merging exprs TO and FROM. SPLIT_POINT |
- is non-null when expressions are merged from different successors at |
+ is non-null when expressions are merged from different successors at |
a split point. */ |
static void |
update_speculative_bits (expr_t to, expr_t from, insn_t split_point) |
@@ -1737,30 +1737,30 @@ update_speculative_bits (expr_t to, expr_t from, insn_t split_point) |
old_to_ds = EXPR_SPEC_DONE_DS (to); |
old_from_ds = EXPR_SPEC_DONE_DS (from); |
- |
+ |
EXPR_SPEC_DONE_DS (to) = ds_max_merge (old_to_ds, old_from_ds); |
EXPR_SPEC_TO_CHECK_DS (to) |= EXPR_SPEC_TO_CHECK_DS (from); |
EXPR_NEEDS_SPEC_CHECK_P (to) |= EXPR_NEEDS_SPEC_CHECK_P (from); |
/* When merging e.g. control & data speculative exprs, or a control |
- speculative with a control&data speculative one, we really have |
+ speculative with a control&data speculative one, we really have |
to change vinsn too. Also, when speculative status is changed, |
we also need to record this as a transformation in expr's history. */ |
if ((old_to_ds & SPECULATIVE) || (old_from_ds & SPECULATIVE)) |
{ |
old_to_ds = ds_get_speculation_types (old_to_ds); |
old_from_ds = ds_get_speculation_types (old_from_ds); |
- |
+ |
if (old_to_ds != old_from_ds) |
{ |
ds_t record_ds; |
- |
- /* When both expressions are speculative, we need to change |
+ |
+ /* When both expressions are speculative, we need to change |
the vinsn first. */ |
if ((old_to_ds & SPECULATIVE) && (old_from_ds & SPECULATIVE)) |
{ |
int res; |
- |
+ |
res = speculate_expr (to, EXPR_SPEC_DONE_DS (to)); |
gcc_assert (res >= 0); |
} |
@@ -1771,9 +1771,9 @@ update_speculative_bits (expr_t to, expr_t from, insn_t split_point) |
record_ds = EXPR_SPEC_DONE_DS (to) & SPECULATIVE; |
record_ds &= ~(old_to_ds & SPECULATIVE); |
record_ds &= ~(old_from_ds & SPECULATIVE); |
- |
- insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (to), |
- INSN_UID (split_point), TRANS_SPECULATION, |
+ |
+ insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (to), |
+ INSN_UID (split_point), TRANS_SPECULATION, |
EXPR_VINSN (from), EXPR_VINSN (to), |
record_ds); |
} |
@@ -1789,7 +1789,7 @@ merge_expr_data (expr_t to, expr_t from, insn_t split_point) |
{ |
int i; |
expr_history_def *phist; |
- |
+ |
/* For now, we just set the spec of resulting expr to be minimum of the specs |
of merged exprs. */ |
if (EXPR_SPEC (to) > EXPR_SPEC (from)) |
@@ -1798,7 +1798,7 @@ merge_expr_data (expr_t to, expr_t from, insn_t split_point) |
if (split_point) |
EXPR_USEFULNESS (to) += EXPR_USEFULNESS (from); |
else |
- EXPR_USEFULNESS (to) = MAX (EXPR_USEFULNESS (to), |
+ EXPR_USEFULNESS (to) = MAX (EXPR_USEFULNESS (to), |
EXPR_USEFULNESS (from)); |
if (EXPR_PRIORITY (to) < EXPR_PRIORITY (from)) |
@@ -1810,17 +1810,17 @@ merge_expr_data (expr_t to, expr_t from, insn_t split_point) |
if (EXPR_ORIG_BB_INDEX (to) != EXPR_ORIG_BB_INDEX (from)) |
EXPR_ORIG_BB_INDEX (to) = 0; |
- EXPR_ORIG_SCHED_CYCLE (to) = MIN (EXPR_ORIG_SCHED_CYCLE (to), |
+ EXPR_ORIG_SCHED_CYCLE (to) = MIN (EXPR_ORIG_SCHED_CYCLE (to), |
EXPR_ORIG_SCHED_CYCLE (from)); |
/* We keep this vector sorted. */ |
- for (i = 0; |
- VEC_iterate (expr_history_def, EXPR_HISTORY_OF_CHANGES (from), |
+ for (i = 0; |
+ VEC_iterate (expr_history_def, EXPR_HISTORY_OF_CHANGES (from), |
i, phist); |
i++) |
- insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (to), |
- phist->uid, phist->type, |
- phist->old_expr_vinsn, phist->new_expr_vinsn, |
+ insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (to), |
+ phist->uid, phist->type, |
+ phist->old_expr_vinsn, phist->new_expr_vinsn, |
phist->spec_ds); |
EXPR_WAS_SUBSTITUTED (to) |= EXPR_WAS_SUBSTITUTED (from); |
@@ -1832,7 +1832,7 @@ merge_expr_data (expr_t to, expr_t from, insn_t split_point) |
} |
/* Merge bits of FROM expr to TO expr. Vinsns in the exprs should be equal |
- in terms of vinsn_equal_p. SPLIT_POINT is non-null when expressions |
+ in terms of vinsn_equal_p. SPLIT_POINT is non-null when expressions |
are merged from different successors at a split point. */ |
void |
merge_expr (expr_t to, expr_t from, insn_t split_point) |
@@ -1857,7 +1857,7 @@ merge_expr (expr_t to, expr_t from, insn_t split_point) |
void |
clear_expr (expr_t expr) |
{ |
- |
+ |
vinsn_detach (EXPR_VINSN (expr)); |
EXPR_VINSN (expr) = NULL; |
@@ -1873,17 +1873,17 @@ set_unavailable_target_for_expr (expr_t expr, regset lv_set) |
if (REG_P (EXPR_LHS (expr)) |
&& bitmap_bit_p (lv_set, REGNO (EXPR_LHS (expr)))) |
{ |
- /* If it's an insn like r1 = use (r1, ...), and it exists in |
- different forms in each of the av_sets being merged, we can't say |
- whether original destination register is available or not. |
- However, this still works if destination register is not used |
+ /* If it's an insn like r1 = use (r1, ...), and it exists in |
+ different forms in each of the av_sets being merged, we can't say |
+ whether original destination register is available or not. |
+ However, this still works if destination register is not used |
in the original expression: if the branch at which LV_SET we're |
looking here is not actually 'other branch' in sense that same |
- expression is available through it (but it can't be determined |
+ expression is available through it (but it can't be determined |
at computation stage because of transformations on one of the |
- branches), it still won't affect the availability. |
- Liveness of a register somewhere on a code motion path means |
- it's either read somewhere on a codemotion path, live on |
+ branches), it still won't affect the availability. |
+ Liveness of a register somewhere on a code motion path means |
+ it's either read somewhere on a codemotion path, live on |
'other' branch, live at the point immediately following |
the original operation, or is read by the original operation. |
The latter case is filtered out in the condition below. |
@@ -1892,7 +1892,7 @@ set_unavailable_target_for_expr (expr_t expr, regset lv_set) |
miss a unifying code motion along both branches using a renamed |
register, but it won't affect a code correctness since upon |
an actual code motion a bookkeeping code would be generated. */ |
- if (bitmap_bit_p (VINSN_REG_USES (EXPR_VINSN (expr)), |
+ if (bitmap_bit_p (VINSN_REG_USES (EXPR_VINSN (expr)), |
REGNO (EXPR_LHS (expr)))) |
EXPR_TARGET_AVAILABLE (expr) = -1; |
else |
@@ -1903,8 +1903,8 @@ set_unavailable_target_for_expr (expr_t expr, regset lv_set) |
{ |
unsigned regno; |
reg_set_iterator rsi; |
- |
- EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_SETS (EXPR_VINSN (expr)), |
+ |
+ EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_SETS (EXPR_VINSN (expr)), |
0, regno, rsi) |
if (bitmap_bit_p (lv_set, regno)) |
{ |
@@ -1922,7 +1922,7 @@ set_unavailable_target_for_expr (expr_t expr, regset lv_set) |
} |
} |
-/* Try to make EXPR speculative. Return 1 when EXPR's pattern |
+/* Try to make EXPR speculative. Return 1 when EXPR's pattern |
or dependence status have changed, 2 when also the target register |
became unavailable, 0 if nothing had to be changed. */ |
int |
@@ -1947,7 +1947,7 @@ speculate_expr (expr_t expr, ds_t ds) |
case 0: |
EXPR_SPEC_DONE_DS (expr) = ds; |
return current_ds != ds ? 1 : 0; |
- |
+ |
case 1: |
{ |
rtx spec_insn_rtx = create_insn_rtx_from_pattern (spec_pat, NULL_RTX); |
@@ -1957,9 +1957,9 @@ speculate_expr (expr_t expr, ds_t ds) |
EXPR_SPEC_DONE_DS (expr) = ds; |
EXPR_NEEDS_SPEC_CHECK_P (expr) = true; |
- /* Do not allow clobbering the address register of speculative |
+ /* Do not allow clobbering the address register of speculative |
insns. */ |
- if (bitmap_bit_p (VINSN_REG_USES (EXPR_VINSN (expr)), |
+ if (bitmap_bit_p (VINSN_REG_USES (EXPR_VINSN (expr)), |
expr_dest_regno (expr))) |
{ |
EXPR_TARGET_AVAILABLE (expr) = false; |
@@ -2000,7 +2000,7 @@ expr_dest_regno (expr_t expr) |
return REGNO (dest); |
} |
-/* For a given LV_SET, mark all expressions in JOIN_SET, but not present in |
+/* For a given LV_SET, mark all expressions in JOIN_SET, but not present in |
AV_SET having unavailable target register. */ |
void |
mark_unavailable_targets (av_set_t join_set, av_set_t av_set, regset lv_set) |
@@ -2031,7 +2031,7 @@ void |
av_set_add (av_set_t *setp, expr_t expr) |
{ |
av_set_t elem; |
- |
+ |
gcc_assert (!INSN_NOP_P (EXPR_INSN_RTX (expr))); |
elem = av_set_add_element (setp); |
copy_expr (_AV_SET_EXPR (elem), expr); |
@@ -2121,10 +2121,10 @@ merge_with_other_exprs (av_set_t *avp, av_set_iterator *ip, expr_t expr) |
EXPR_USEFULNESS (expr2) = 0; |
merge_expr (expr2, expr, NULL); |
- |
+ |
/* Fix usefulness as it should be now REG_BR_PROB_BASE. */ |
EXPR_USEFULNESS (expr2) = REG_BR_PROB_BASE; |
- |
+ |
av_set_iter_remove (ip); |
return expr2; |
} |
@@ -2187,7 +2187,7 @@ av_set_union_and_clear (av_set_t *top, av_set_t *fromp, insn_t insn) |
join_distinct_sets (i.lp, fromp); |
} |
-/* Same as above, but also update availability of target register in |
+/* Same as above, but also update availability of target register in |
TOP judging by TO_LV_SET and FROM_LV_SET. */ |
void |
av_set_union_and_live (av_set_t *top, av_set_t *fromp, regset to_lv_set, |
@@ -2204,16 +2204,16 @@ av_set_union_and_live (av_set_t *top, av_set_t *fromp, regset to_lv_set, |
if (expr2) |
{ |
- /* It may be that the expressions have different destination |
+ /* It may be that the expressions have different destination |
registers, in which case we need to check liveness here. */ |
if (EXPR_SEPARABLE_P (expr1)) |
{ |
- int regno1 = (REG_P (EXPR_LHS (expr1)) |
+ int regno1 = (REG_P (EXPR_LHS (expr1)) |
? (int) expr_dest_regno (expr1) : -1); |
- int regno2 = (REG_P (EXPR_LHS (expr2)) |
+ int regno2 = (REG_P (EXPR_LHS (expr2)) |
? (int) expr_dest_regno (expr2) : -1); |
- |
- /* ??? We don't have a way to check restrictions for |
+ |
+ /* ??? We don't have a way to check restrictions for |
*other* register on the current path, we did it only |
for the current target register. Give up. */ |
if (regno1 != regno2) |
@@ -2227,7 +2227,7 @@ av_set_union_and_live (av_set_t *top, av_set_t *fromp, regset to_lv_set, |
av_set_iter_remove (&i); |
} |
else |
- /* EXPR1 is present in TOP, but not in FROMP. Check it on |
+ /* EXPR1 is present in TOP, but not in FROMP. Check it on |
FROM_LV_SET. */ |
set_unavailable_target_for_expr (expr1, from_lv_set); |
} |
@@ -2263,7 +2263,7 @@ av_set_leave_one_nonspec (av_set_t *setp) |
av_set_iterator i; |
bool has_one_nonspec = false; |
- /* Keep all speculative exprs, and leave one non-speculative |
+ /* Keep all speculative exprs, and leave one non-speculative |
(the first one). */ |
FOR_EACH_EXPR_1 (expr, i, setp) |
{ |
@@ -2304,7 +2304,7 @@ av_set_substract_cond_branches (av_set_t *avp) |
av_set_iter_remove (&i); |
} |
-/* Multiplies usefulness attribute of each member of av-set *AVP by |
+/* Multiplies usefulness attribute of each member of av-set *AVP by |
value PROB / ALL_PROB. */ |
void |
av_set_split_usefulness (av_set_t av, int prob, int all_prob) |
@@ -2313,7 +2313,7 @@ av_set_split_usefulness (av_set_t av, int prob, int all_prob) |
expr_t expr; |
FOR_EACH_EXPR (expr, i, av) |
- EXPR_USEFULNESS (expr) = (all_prob |
+ EXPR_USEFULNESS (expr) = (all_prob |
? (EXPR_USEFULNESS (expr) * prob) / all_prob |
: 0); |
} |
@@ -2353,13 +2353,13 @@ static struct |
} deps_init_id_data; |
-/* Setup ID for INSN. FORCE_UNIQUE_P is true when INSN should not be |
+/* Setup ID for INSN. FORCE_UNIQUE_P is true when INSN should not be |
clonable. */ |
static void |
setup_id_for_insn (idata_t id, insn_t insn, bool force_unique_p) |
{ |
int type; |
- |
+ |
/* Determine whether INSN could be cloned and return appropriate vinsn type. |
That clonable insns which can be separated into lhs and rhs have type SET. |
Other clonable insns have type USE. */ |
@@ -2370,7 +2370,9 @@ setup_id_for_insn (idata_t id, insn_t insn, bool force_unique_p) |
type = SET; |
else if (type == JUMP_INSN && simplejump_p (insn)) |
type = PC; |
- |
+ else if (type == DEBUG_INSN) |
+ type = !force_unique_p ? USE : INSN; |
+ |
IDATA_TYPE (id) = type; |
IDATA_REG_SETS (id) = get_clear_regset_from_pool (); |
IDATA_REG_USES (id) = get_clear_regset_from_pool (); |
@@ -2422,7 +2424,7 @@ deps_init_id_note_reg_set (int regno) |
SET_REGNO_REG_SET (IDATA_REG_SETS (deps_init_id_data.id), regno); |
#ifdef STACK_REGS |
- /* Make instructions that set stack registers to be ineligible for |
+ /* Make instructions that set stack registers to be ineligible for |
renaming to avoid issues with find_used_regs. */ |
if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG)) |
deps_init_id_data.force_use_p = true; |
@@ -2489,9 +2491,9 @@ deps_init_id_finish_insn (void) |
if (lhs == NULL || rhs == NULL || !lhs_and_rhs_separable_p (lhs, rhs) |
|| deps_init_id_data.force_use_p) |
{ |
- /* This should be a USE, as we don't want to schedule its RHS |
+ /* This should be a USE, as we don't want to schedule its RHS |
separately. However, we still want to have them recorded |
- for the purposes of substitution. That's why we don't |
+ for the purposes of substitution. That's why we don't |
simply call downgrade_to_use () here. */ |
gcc_assert (IDATA_TYPE (deps_init_id_data.id) == SET); |
gcc_assert (!lhs == !rhs); |
@@ -2534,9 +2536,9 @@ static void |
setup_id_lhs_rhs (idata_t id, insn_t insn, bool force_unique_p) |
{ |
rtx pat = PATTERN (insn); |
- |
- if (GET_CODE (insn) == INSN |
- && GET_CODE (pat) == SET |
+ |
+ if (NONJUMP_INSN_P (insn) |
+ && GET_CODE (pat) == SET |
&& !force_unique_p) |
{ |
IDATA_RHS (id) = SET_SRC (pat); |
@@ -2555,7 +2557,7 @@ maybe_downgrade_id_to_use (idata_t id, insn_t insn) |
df_ref *rec; |
rtx lhs = IDATA_LHS (id); |
rtx rhs = IDATA_RHS (id); |
- |
+ |
/* We downgrade only SETs. */ |
if (IDATA_TYPE (id) != SET) |
return; |
@@ -2565,11 +2567,11 @@ maybe_downgrade_id_to_use (idata_t id, insn_t insn) |
IDATA_TYPE (id) = USE; |
return; |
} |
- |
+ |
for (rec = DF_INSN_UID_DEFS (uid); *rec; rec++) |
{ |
df_ref def = *rec; |
- |
+ |
if (DF_REF_INSN (def) |
&& DF_REF_FLAGS_IS_SET (def, DF_REF_PRE_POST_MODIFY) |
&& loc_mentioned_in_p (DF_REF_LOC (def), IDATA_RHS (id))) |
@@ -2579,7 +2581,7 @@ maybe_downgrade_id_to_use (idata_t id, insn_t insn) |
} |
#ifdef STACK_REGS |
- /* Make instructions that set stack registers to be ineligible for |
+ /* Make instructions that set stack registers to be ineligible for |
renaming to avoid issues with find_used_regs. */ |
if (IN_RANGE (DF_REF_REGNO (def), FIRST_STACK_REG, LAST_STACK_REG)) |
{ |
@@ -2587,8 +2589,8 @@ maybe_downgrade_id_to_use (idata_t id, insn_t insn) |
break; |
} |
#endif |
- } |
- |
+ } |
+ |
if (must_be_use) |
IDATA_TYPE (id) = USE; |
} |
@@ -2600,12 +2602,12 @@ setup_id_reg_sets (idata_t id, insn_t insn) |
unsigned uid = INSN_UID (insn); |
df_ref *rec; |
regset tmp = get_clear_regset_from_pool (); |
- |
+ |
for (rec = DF_INSN_UID_DEFS (uid); *rec; rec++) |
{ |
df_ref def = *rec; |
unsigned int regno = DF_REF_REGNO (def); |
- |
+ |
/* Post modifies are treated like clobbers by sched-deps.c. */ |
if (DF_REF_FLAGS_IS_SET (def, (DF_REF_MUST_CLOBBER |
| DF_REF_PRE_POST_MODIFY))) |
@@ -2615,7 +2617,7 @@ setup_id_reg_sets (idata_t id, insn_t insn) |
SET_REGNO_REG_SET (IDATA_REG_SETS (id), regno); |
#ifdef STACK_REGS |
- /* For stack registers, treat writes to them as writes |
+ /* For stack registers, treat writes to them as writes |
to the first one to be consistent with sched-deps.c. */ |
if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG)) |
SET_REGNO_REG_SET (IDATA_REG_SETS (id), FIRST_STACK_REG); |
@@ -2626,7 +2628,7 @@ setup_id_reg_sets (idata_t id, insn_t insn) |
|| regno == STACK_POINTER_REGNUM) |
bitmap_set_bit (tmp, regno); |
} |
- |
+ |
for (rec = DF_INSN_UID_USES (uid); *rec; rec++) |
{ |
df_ref use = *rec; |
@@ -2641,7 +2643,7 @@ setup_id_reg_sets (idata_t id, insn_t insn) |
SET_REGNO_REG_SET (IDATA_REG_USES (id), regno); |
#ifdef STACK_REGS |
- /* For stack registers, treat reads from them as reads from |
+ /* For stack registers, treat reads from them as reads from |
the first one to be consistent with sched-deps.c. */ |
if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG)) |
SET_REGNO_REG_SET (IDATA_REG_USES (id), FIRST_STACK_REG); |
@@ -2672,14 +2674,14 @@ init_id_from_df (idata_t id, insn_t insn, bool force_unique_p) |
static void |
deps_init_id (idata_t id, insn_t insn, bool force_unique_p) |
{ |
- struct deps _dc, *dc = &_dc; |
+ struct deps_desc _dc, *dc = &_dc; |
deps_init_id_data.where = DEPS_IN_NOWHERE; |
deps_init_id_data.id = id; |
deps_init_id_data.force_unique_p = force_unique_p; |
deps_init_id_data.force_use_p = false; |
- init_deps (dc); |
+ init_deps (dc, false); |
memcpy (&deps_init_id_sched_deps_info, |
&const_deps_init_id_sched_deps_info, |
@@ -2739,7 +2741,7 @@ free_transformed_insns (void *p) |
free (pti); |
} |
-/* Init the s_i_d data for INSN which should be inited just once, when |
+/* Init the s_i_d data for INSN which should be inited just once, when |
we first see the insn. */ |
static void |
init_first_time_insn_data (insn_t insn) |
@@ -2747,39 +2749,58 @@ init_first_time_insn_data (insn_t insn) |
/* This should not be set if this is the first time we init data for |
insn. */ |
gcc_assert (first_time_insn_init (insn)); |
- |
+ |
/* These are needed for nops too. */ |
INSN_LIVE (insn) = get_regset_from_pool (); |
INSN_LIVE_VALID_P (insn) = false; |
- |
+ |
if (!INSN_NOP_P (insn)) |
{ |
INSN_ANALYZED_DEPS (insn) = BITMAP_ALLOC (NULL); |
INSN_FOUND_DEPS (insn) = BITMAP_ALLOC (NULL); |
- INSN_TRANSFORMED_INSNS (insn) |
+ INSN_TRANSFORMED_INSNS (insn) |
= htab_create (16, hash_transformed_insns, |
eq_transformed_insns, free_transformed_insns); |
- init_deps (&INSN_DEPS_CONTEXT (insn)); |
+ init_deps (&INSN_DEPS_CONTEXT (insn), true); |
} |
} |
-/* Free the same data as above for INSN. */ |
-static void |
-free_first_time_insn_data (insn_t insn) |
+/* Free almost all above data for INSN that is scheduled already. |
+ Used for extra-large basic blocks. */ |
+void |
+free_data_for_scheduled_insn (insn_t insn) |
{ |
gcc_assert (! first_time_insn_init (insn)); |
+ if (! INSN_ANALYZED_DEPS (insn)) |
+ return; |
+ |
BITMAP_FREE (INSN_ANALYZED_DEPS (insn)); |
BITMAP_FREE (INSN_FOUND_DEPS (insn)); |
htab_delete (INSN_TRANSFORMED_INSNS (insn)); |
- return_regset_to_pool (INSN_LIVE (insn)); |
- INSN_LIVE (insn) = NULL; |
- INSN_LIVE_VALID_P (insn) = false; |
/* This is allocated only for bookkeeping insns. */ |
if (INSN_ORIGINATORS (insn)) |
BITMAP_FREE (INSN_ORIGINATORS (insn)); |
free_deps (&INSN_DEPS_CONTEXT (insn)); |
+ |
+ INSN_ANALYZED_DEPS (insn) = NULL; |
+ |
+ /* Clear the readonly flag so we would ICE when trying to recalculate |
+ the deps context (as we believe that it should not happen). */ |
+ (&INSN_DEPS_CONTEXT (insn))->readonly = 0; |
+} |
+ |
+/* Free the same data as above for INSN. */ |
+static void |
+free_first_time_insn_data (insn_t insn) |
+{ |
+ gcc_assert (! first_time_insn_init (insn)); |
+ |
+ free_data_for_scheduled_insn (insn); |
+ return_regset_to_pool (INSN_LIVE (insn)); |
+ INSN_LIVE (insn) = NULL; |
+ INSN_LIVE_VALID_P (insn) = false; |
} |
/* Initialize region-scope data structures for basic blocks. */ |
@@ -2842,7 +2863,7 @@ init_global_and_expr_for_insn (insn_t insn) |
if (CANT_MOVE (insn) |
|| INSN_ASM_P (insn) |
|| SCHED_GROUP_P (insn) |
- || prologue_epilogue_contains (insn) |
+ || prologue_epilogue_contains (insn) |
/* Exception handling insns are always unique. */ |
|| (flag_non_call_exceptions && can_throw_internal (insn)) |
/* TRAP_IF though have an INSN code is control_flow_insn_p (). */ |
@@ -2862,7 +2883,7 @@ init_global_and_expr_for_insn (insn_t insn) |
/* Initialize INSN's expr. */ |
init_expr (INSN_EXPR (insn), vinsn_create (insn, force_unique_p), 0, |
REG_BR_PROB_BASE, INSN_PRIORITY (insn), 0, BLOCK_NUM (insn), |
- spec_done_ds, 0, 0, NULL, true, false, false, false, |
+ spec_done_ds, 0, 0, NULL, true, false, false, false, |
CANT_MOVE (insn)); |
} |
@@ -2881,7 +2902,7 @@ sel_init_global_and_expr (bb_vec_t bbs) |
extend_insn_data, /* extend_insn */ |
init_global_and_expr_for_insn /* init_insn */ |
}; |
- |
+ |
sched_scan (&ssi, bbs, NULL, NULL, NULL); |
} |
@@ -2907,9 +2928,9 @@ finish_global_and_expr_insn (insn_t insn) |
free_first_time_insn_data (insn); |
INSN_WS_LEVEL (insn) = 0; |
CANT_MOVE (insn) = 0; |
- |
- /* We can no longer assert this, as vinsns of this insn could be |
- easily live in other insn's caches. This should be changed to |
+ |
+ /* We can no longer assert this, as vinsns of this insn could be |
+ easily live in other insn's caches. This should be changed to |
a counter-like approach among all vinsns. */ |
gcc_assert (true || VINSN_COUNT (INSN_VINSN (insn)) == 1); |
clear_expr (INSN_EXPR (insn)); |
@@ -2949,8 +2970,8 @@ sel_finish_global_and_expr (void) |
} |
-/* In the below hooks, we merely calculate whether or not a dependence |
- exists, and in what part of insn. However, we will need more data |
+/* In the below hooks, we merely calculate whether or not a dependence |
+ exists, and in what part of insn. However, we will need more data |
when we'll start caching dependence requests. */ |
/* Container to hold information for dependency analysis. */ |
@@ -3063,7 +3084,7 @@ has_dependence_note_reg_clobber (int regno) |
if (reg_last->sets) |
*dsp = (*dsp & ~SPECULATIVE) | DEP_OUTPUT; |
- |
+ |
if (reg_last->uses) |
*dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI; |
} |
@@ -3208,7 +3229,7 @@ has_dependence_p (expr_t expr, insn_t pred, ds_t **has_dep_pp) |
{ |
int i; |
ds_t ds; |
- struct deps *dc; |
+ struct deps_desc *dc; |
if (INSN_SIMPLEJUMP_P (pred)) |
/* Unconditional jump is just a transfer of control flow. |
@@ -3216,6 +3237,11 @@ has_dependence_p (expr_t expr, insn_t pred, ds_t **has_dep_pp) |
return false; |
dc = &INSN_DEPS_CONTEXT (pred); |
+ |
+ /* We init this field lazily. */ |
+ if (dc->reg_last == NULL) |
+ init_deps_reg_last (dc); |
+ |
if (!dc->readonly) |
{ |
has_dependence_data.pro = NULL; |
@@ -3245,13 +3271,13 @@ has_dependence_p (expr_t expr, insn_t pred, ds_t **has_dep_pp) |
/* Do not allow stores to memory to move through checks. Currently |
we don't move this to sched-deps.c as the check doesn't have |
- obvious places to which this dependence can be attached. |
+ obvious places to which this dependence can be attached. |
FIMXE: this should go to a hook. */ |
if (EXPR_LHS (expr) |
&& MEM_P (EXPR_LHS (expr)) |
&& sel_insn_is_speculation_check (pred)) |
has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_ANTI; |
- |
+ |
*has_dep_pp = has_dependence_data.has_dep_p; |
ds = 0; |
for (i = 0; i < DEPS_IN_NOWHERE; i++) |
@@ -3262,9 +3288,9 @@ has_dependence_p (expr_t expr, insn_t pred, ds_t **has_dep_pp) |
} |
-/* Dependence hooks implementation that checks dependence latency constraints |
- on the insns being scheduled. The entry point for these routines is |
- tick_check_p predicate. */ |
+/* Dependence hooks implementation that checks dependence latency constraints |
+ on the insns being scheduled. The entry point for these routines is |
+ tick_check_p predicate. */ |
static struct |
{ |
@@ -3374,7 +3400,7 @@ tick_check_p (expr_t expr, deps_t dc, fence_t fence) |
tick_check_data.cycle = 0; |
tick_check_data.seen_true_dep_p = false; |
sched_deps_info = &tick_check_sched_deps_info; |
- |
+ |
gcc_assert (!dc->readonly); |
dc->readonly = 1; |
deps_analyze_insn (dc, EXPR_INSN_RTX (expr)); |
@@ -3397,7 +3423,7 @@ lhs_of_insn_equals_to_dest_p (insn_t insn, rtx dest) |
if (lhs == NULL || dest == NULL) |
return false; |
- |
+ |
return rtx_equal_p (lhs, dest); |
} |
@@ -3417,7 +3443,7 @@ sel_insn_is_speculation_check (rtx insn) |
return s_i_d && !! INSN_SPEC_CHECKED_DS (insn); |
} |
-/* Extracts machine mode MODE and destination location DST_LOC |
+/* Extracts machine mode MODE and destination location DST_LOC |
for given INSN. */ |
void |
get_dest_and_mode (rtx insn, rtx *dst_loc, enum machine_mode *mode) |
@@ -3436,7 +3462,7 @@ get_dest_and_mode (rtx insn, rtx *dst_loc, enum machine_mode *mode) |
*mode = GET_MODE (*dst_loc); |
} |
-/* Returns true when moving through JUMP will result in bookkeeping |
+/* Returns true when moving through JUMP will result in bookkeeping |
creation. */ |
bool |
bookkeeping_can_be_created_if_moved_through_p (insn_t jump) |
@@ -3459,7 +3485,7 @@ insn_is_the_only_one_in_bb_p (insn_t insn) |
} |
#ifdef ENABLE_CHECKING |
-/* Check that the region we're scheduling still has at most one |
+/* Check that the region we're scheduling still has at most one |
backedge. */ |
static void |
verify_backedges (void) |
@@ -3469,13 +3495,13 @@ verify_backedges (void) |
int i, n = 0; |
edge e; |
edge_iterator ei; |
- |
+ |
for (i = 0; i < current_nr_blocks; i++) |
FOR_EACH_EDGE (e, ei, BASIC_BLOCK (BB_TO_BLOCK (i))->succs) |
if (in_current_region_p (e->dest) |
&& BLOCK_TO_BB (e->dest->index) < i) |
n++; |
- |
+ |
gcc_assert (n <= 1); |
} |
} |
@@ -3484,22 +3510,59 @@ verify_backedges (void) |
/* Functions to work with control flow. */ |
+/* Recompute BLOCK_TO_BB and BB_FOR_BLOCK for current region so that blocks |
+ are sorted in topological order (it might have been invalidated by |
+ redirecting an edge). */ |
+static void |
+sel_recompute_toporder (void) |
+{ |
+ int i, n, rgn; |
+ int *postorder, n_blocks; |
+ |
+ postorder = XALLOCAVEC (int, n_basic_blocks); |
+ n_blocks = post_order_compute (postorder, false, false); |
+ |
+ rgn = CONTAINING_RGN (BB_TO_BLOCK (0)); |
+ for (n = 0, i = n_blocks - 1; i >= 0; i--) |
+ if (CONTAINING_RGN (postorder[i]) == rgn) |
+ { |
+ BLOCK_TO_BB (postorder[i]) = n; |
+ BB_TO_BLOCK (n) = postorder[i]; |
+ n++; |
+ } |
+ |
+ /* Assert that we updated info for all blocks. We may miss some blocks if |
+ this function is called when redirecting an edge made a block |
+ unreachable, but that block is not deleted yet. */ |
+ gcc_assert (n == RGN_NR_BLOCKS (rgn)); |
+} |
+ |
/* Tidy the possibly empty block BB. */ |
-bool |
-maybe_tidy_empty_bb (basic_block bb) |
+static bool |
+maybe_tidy_empty_bb (basic_block bb, bool recompute_toporder_p) |
{ |
basic_block succ_bb, pred_bb; |
+ edge e; |
+ edge_iterator ei; |
bool rescan_p; |
/* Keep empty bb only if this block immediately precedes EXIT and |
- has incoming non-fallthrough edge. Otherwise remove it. */ |
- if (!sel_bb_empty_p (bb) |
- || (single_succ_p (bb) |
+ has incoming non-fallthrough edge, or it has no predecessors or |
+ successors. Otherwise remove it. */ |
+ if (!sel_bb_empty_p (bb) |
+ || (single_succ_p (bb) |
&& single_succ (bb) == EXIT_BLOCK_PTR |
- && (!single_pred_p (bb) |
- || !(single_pred_edge (bb)->flags & EDGE_FALLTHRU)))) |
+ && (!single_pred_p (bb) |
+ || !(single_pred_edge (bb)->flags & EDGE_FALLTHRU))) |
+ || EDGE_COUNT (bb->preds) == 0 |
+ || EDGE_COUNT (bb->succs) == 0) |
return false; |
+ /* Do not attempt to redirect complex edges. */ |
+ FOR_EACH_EDGE (e, ei, bb->preds) |
+ if (e->flags & EDGE_COMPLEX) |
+ return false; |
+ |
free_data_sets (bb); |
/* Do not delete BB if it has more than one successor. |
@@ -3518,9 +3581,6 @@ maybe_tidy_empty_bb (basic_block bb) |
/* Redirect all non-fallthru edges to the next bb. */ |
while (rescan_p) |
{ |
- edge e; |
- edge_iterator ei; |
- |
rescan_p = false; |
FOR_EACH_EDGE (e, ei, bb->preds) |
@@ -3529,7 +3589,7 @@ maybe_tidy_empty_bb (basic_block bb) |
if (!(e->flags & EDGE_FALLTHRU)) |
{ |
- sel_redirect_edge_and_branch (e, succ_bb); |
+ recompute_toporder_p |= sel_redirect_edge_and_branch (e, succ_bb); |
rescan_p = true; |
break; |
} |
@@ -3545,10 +3605,14 @@ maybe_tidy_empty_bb (basic_block bb) |
{ |
gcc_assert (pred_bb != NULL); |
- move_bb_info (pred_bb, bb); |
+ if (in_current_region_p (pred_bb)) |
+ move_bb_info (pred_bb, bb); |
remove_empty_bb (bb, true); |
} |
+ if (recompute_toporder_p) |
+ sel_recompute_toporder (); |
+ |
#ifdef ENABLE_CHECKING |
verify_backedges (); |
#endif |
@@ -3556,19 +3620,20 @@ maybe_tidy_empty_bb (basic_block bb) |
return true; |
} |
-/* Tidy the control flow after we have removed original insn from |
+/* Tidy the control flow after we have removed original insn from |
XBB. Return true if we have removed some blocks. When FULL_TIDYING |
is true, also try to optimize control flow on non-empty blocks. */ |
bool |
tidy_control_flow (basic_block xbb, bool full_tidying) |
{ |
bool changed = true; |
- |
+ insn_t first, last; |
+ |
/* First check whether XBB is empty. */ |
- changed = maybe_tidy_empty_bb (xbb); |
+ changed = maybe_tidy_empty_bb (xbb, false); |
if (changed || !full_tidying) |
return changed; |
- |
+ |
/* Check if there is a unnecessary jump after insn left. */ |
if (jump_leads_only_to_bb_p (BB_END (xbb), xbb->next_bb) |
&& INSN_SCHED_TIMES (BB_END (xbb)) == 0 |
@@ -3579,16 +3644,30 @@ tidy_control_flow (basic_block xbb, bool full_tidying) |
tidy_fallthru_edge (EDGE_SUCC (xbb, 0)); |
} |
+ first = sel_bb_head (xbb); |
+ last = sel_bb_end (xbb); |
+ if (MAY_HAVE_DEBUG_INSNS) |
+ { |
+ if (first != last && DEBUG_INSN_P (first)) |
+ do |
+ first = NEXT_INSN (first); |
+ while (first != last && (DEBUG_INSN_P (first) || NOTE_P (first))); |
+ |
+ if (first != last && DEBUG_INSN_P (last)) |
+ do |
+ last = PREV_INSN (last); |
+ while (first != last && (DEBUG_INSN_P (last) || NOTE_P (last))); |
+ } |
/* Check if there is an unnecessary jump in previous basic block leading |
- to next basic block left after removing INSN from stream. |
- If it is so, remove that jump and redirect edge to current |
- basic block (where there was INSN before deletion). This way |
- when NOP will be deleted several instructions later with its |
- basic block we will not get a jump to next instruction, which |
+ to next basic block left after removing INSN from stream. |
+ If it is so, remove that jump and redirect edge to current |
+ basic block (where there was INSN before deletion). This way |
+ when NOP will be deleted several instructions later with its |
+ basic block we will not get a jump to next instruction, which |
can be harmful. */ |
- if (sel_bb_head (xbb) == sel_bb_end (xbb) |
+ if (first == last |
&& !sel_bb_empty_p (xbb) |
- && INSN_NOP_P (sel_bb_end (xbb)) |
+ && INSN_NOP_P (last) |
/* Flow goes fallthru from current block to the next. */ |
&& EDGE_COUNT (xbb->succs) == 1 |
&& (EDGE_SUCC (xbb, 0)->flags & EDGE_FALLTHRU) |
@@ -3602,24 +3681,47 @@ tidy_control_flow (basic_block xbb, bool full_tidying) |
/* Also this jump is not at the scheduling boundary. */ |
&& !IN_CURRENT_FENCE_P (BB_END (xbb->prev_bb))) |
{ |
+ bool recompute_toporder_p; |
/* Clear data structures of jump - jump itself will be removed |
by sel_redirect_edge_and_branch. */ |
clear_expr (INSN_EXPR (BB_END (xbb->prev_bb))); |
- sel_redirect_edge_and_branch (EDGE_SUCC (xbb->prev_bb, 0), xbb); |
+ recompute_toporder_p |
+ = sel_redirect_edge_and_branch (EDGE_SUCC (xbb->prev_bb, 0), xbb); |
+ |
gcc_assert (EDGE_SUCC (xbb->prev_bb, 0)->flags & EDGE_FALLTHRU); |
/* It can turn out that after removing unused jump, basic block |
that contained that jump, becomes empty too. In such case |
remove it too. */ |
if (sel_bb_empty_p (xbb->prev_bb)) |
- changed = maybe_tidy_empty_bb (xbb->prev_bb); |
+ changed = maybe_tidy_empty_bb (xbb->prev_bb, recompute_toporder_p); |
+ else if (recompute_toporder_p) |
+ sel_recompute_toporder (); |
} |
return changed; |
} |
-/* Rip-off INSN from the insn stream. When ONLY_DISCONNECT is true, |
- do not delete insn's data, because it will be later re-emitted. |
+/* Purge meaningless empty blocks in the middle of a region. */ |
+void |
+purge_empty_blocks (void) |
+{ |
+ /* Do not attempt to delete preheader. */ |
+ int i = sel_is_loop_preheader_p (BASIC_BLOCK (BB_TO_BLOCK (0))) ? 1 : 0; |
+ |
+ while (i < current_nr_blocks) |
+ { |
+ basic_block b = BASIC_BLOCK (BB_TO_BLOCK (i)); |
+ |
+ if (maybe_tidy_empty_bb (b, false)) |
+ continue; |
+ |
+ i++; |
+ } |
+} |
+ |
+/* Rip-off INSN from the insn stream. When ONLY_DISCONNECT is true, |
+ do not delete insn's data, because it will be later re-emitted. |
Return true if we have removed some blocks afterwards. */ |
bool |
sel_remove_insn (insn_t insn, bool only_disconnect, bool full_tidying) |
@@ -3628,6 +3730,21 @@ sel_remove_insn (insn_t insn, bool only_disconnect, bool full_tidying) |
gcc_assert (INSN_IN_STREAM_P (insn)); |
+ if (DEBUG_INSN_P (insn) && BB_AV_SET_VALID_P (bb)) |
+ { |
+ expr_t expr; |
+ av_set_iterator i; |
+ |
+ /* When we remove a debug insn that is head of a BB, it remains |
+ in the AV_SET of the block, but it shouldn't. */ |
+ FOR_EACH_EXPR_1 (expr, i, &BB_AV_SET (bb)) |
+ if (EXPR_INSN_RTX (expr) == insn) |
+ { |
+ av_set_iter_remove (&i); |
+ break; |
+ } |
+ } |
+ |
if (only_disconnect) |
{ |
insn_t prev = PREV_INSN (insn); |
@@ -3666,7 +3783,7 @@ sel_estimate_number_of_insns (basic_block bb) |
insn_t insn = NEXT_INSN (BB_HEAD (bb)), next_tail = NEXT_INSN (BB_END (bb)); |
for (; insn != next_tail; insn = NEXT_INSN (insn)) |
- if (INSN_P (insn)) |
+ if (NONDEBUG_INSN_P (insn)) |
res++; |
return res; |
@@ -3699,15 +3816,15 @@ get_seqno_of_a_pred (insn_t insn) |
&& !in_current_region_p (single_pred (bb))) |
{ |
/* We can have preds outside a region when splitting edges |
- for pipelining of an outer loop. Use succ instead. |
+ for pipelining of an outer loop. Use succ instead. |
There should be only one of them. */ |
insn_t succ = NULL; |
succ_iterator si; |
bool first = true; |
- |
+ |
gcc_assert (flag_sel_sched_pipelining_outer_loops |
&& current_loop_nest); |
- FOR_EACH_SUCC_1 (succ, si, insn, |
+ FOR_EACH_SUCC_1 (succ, si, insn, |
SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS) |
{ |
gcc_assert (first); |
@@ -3726,7 +3843,7 @@ get_seqno_of_a_pred (insn_t insn) |
gcc_assert (n == 1); |
seqno = INSN_SEQNO (preds[0]); |
- |
+ |
free (preds); |
} |
} |
@@ -3734,7 +3851,8 @@ get_seqno_of_a_pred (insn_t insn) |
return seqno; |
} |
-/* Find the proper seqno for inserting at INSN. */ |
+/* Find the proper seqno for inserting at INSN. Returns -1 if no predecessors |
+ with positive seqno exist. */ |
int |
get_seqno_by_preds (rtx insn) |
{ |
@@ -3748,12 +3866,11 @@ get_seqno_by_preds (rtx insn) |
return INSN_SEQNO (tmp); |
else |
tmp = PREV_INSN (tmp); |
- |
+ |
cfg_preds (bb, &preds, &n); |
for (i = 0, seqno = -1; i < n; i++) |
seqno = MAX (seqno, INSN_SEQNO (preds[i])); |
- gcc_assert (seqno > 0); |
return seqno; |
} |
@@ -3809,17 +3926,26 @@ static void |
extend_insn_data (void) |
{ |
int reserve; |
- |
+ |
sched_extend_target (); |
sched_deps_init (false); |
/* Extend data structures for insns from current region. */ |
- reserve = (sched_max_luid + 1 |
+ reserve = (sched_max_luid + 1 |
- VEC_length (sel_insn_data_def, s_i_d)); |
- if (reserve > 0 |
+ if (reserve > 0 |
&& ! VEC_space (sel_insn_data_def, s_i_d, reserve)) |
- VEC_safe_grow_cleared (sel_insn_data_def, heap, s_i_d, |
- 3 * sched_max_luid / 2); |
+ { |
+ int size; |
+ |
+ if (sched_max_luid / 2 > 1024) |
+ size = sched_max_luid + 1024; |
+ else |
+ size = 3 * sched_max_luid / 2; |
+ |
+ |
+ VEC_safe_grow_cleared (sel_insn_data_def, heap, s_i_d, size); |
+ } |
} |
/* Finalize data structures for insns from current region. */ |
@@ -3833,7 +3959,7 @@ finish_insns (void) |
for (i = 0; i < VEC_length (sel_insn_data_def, s_i_d); i++) |
{ |
sel_insn_data_def *sid_entry = VEC_index (sel_insn_data_def, s_i_d, i); |
- |
+ |
if (sid_entry->live) |
return_regset_to_pool (sid_entry->live); |
if (sid_entry->analyzed_deps) |
@@ -3846,13 +3972,13 @@ finish_insns (void) |
if (EXPR_VINSN (&sid_entry->expr)) |
{ |
clear_expr (&sid_entry->expr); |
- |
+ |
/* Also, clear CANT_MOVE bit here, because we really don't want it |
to be passed to the next region. */ |
CANT_MOVE_BY_LUID (i) = 0; |
} |
} |
- |
+ |
VEC_free (sel_insn_data_def, heap, s_i_d); |
} |
@@ -3901,7 +4027,7 @@ init_insn_data (insn_t insn) |
if (insn_init_create_new_vinsn_p) |
change_vinsn_in_expr (expr, vinsn_create (insn, init_insn_force_unique_p)); |
- |
+ |
if (first_time_insn_init (insn)) |
init_first_time_insn_data (insn); |
} |
@@ -3912,13 +4038,13 @@ static void |
init_simplejump_data (insn_t insn) |
{ |
init_expr (INSN_EXPR (insn), vinsn_create (insn, false), 0, |
- REG_BR_PROB_BASE, 0, 0, 0, 0, 0, 0, NULL, true, false, false, |
+ REG_BR_PROB_BASE, 0, 0, 0, 0, 0, 0, NULL, true, false, false, |
false, true); |
INSN_SEQNO (insn) = get_seqno_of_a_pred (insn); |
init_first_time_insn_data (insn); |
} |
-/* Perform deferred initialization of insns. This is used to process |
+/* Perform deferred initialization of insns. This is used to process |
a new jump that may be created by redirect_edge. */ |
void |
sel_init_new_insn (insn_t insn, int flags) |
@@ -3931,7 +4057,7 @@ sel_init_new_insn (insn_t insn, int flags) |
extend_bb_info (); |
create_initial_data_sets (BLOCK_FOR_INSN (insn)); |
} |
- |
+ |
if (flags & INSN_INIT_TODO_LUID) |
sched_init_luids (NULL, NULL, NULL, insn); |
@@ -3947,7 +4073,7 @@ sel_init_new_insn (insn_t insn, int flags) |
extend_insn_data (); |
init_simplejump_data (insn); |
} |
- |
+ |
gcc_assert (CONTAINING_RGN (BLOCK_NUM (insn)) |
== CONTAINING_RGN (BB_TO_BLOCK (0))); |
} |
@@ -3962,7 +4088,7 @@ init_lv_set (basic_block bb) |
gcc_assert (!BB_LV_SET_VALID_P (bb)); |
BB_LV_SET (bb) = get_regset_from_pool (); |
- COPY_REG_SET (BB_LV_SET (bb), DF_LR_IN (bb)); |
+ COPY_REG_SET (BB_LV_SET (bb), DF_LR_IN (bb)); |
BB_LV_SET_VALID_P (bb) = true; |
} |
@@ -3971,10 +4097,10 @@ static void |
copy_lv_set_from (basic_block bb, basic_block from_bb) |
{ |
gcc_assert (!BB_LV_SET_VALID_P (bb)); |
- |
+ |
COPY_REG_SET (BB_LV_SET (bb), BB_LV_SET (from_bb)); |
BB_LV_SET_VALID_P (bb) = true; |
-} |
+} |
/* Initialize lv set of all bb headers. */ |
void |
@@ -4261,6 +4387,11 @@ fallthru_bb_of_jump (rtx jump) |
if (!any_condjump_p (jump)) |
return NULL; |
+ /* A basic block that ends with a conditional jump may still have one successor |
+ (and be followed by a barrier), we are not interested. */ |
+ if (single_succ_p (BLOCK_FOR_INSN (jump))) |
+ return NULL; |
+ |
return FALLTHRU_EDGE (BLOCK_FOR_INSN (jump))->dest; |
} |
@@ -4286,11 +4417,12 @@ sel_init_bbs (bb_vec_t bbs, basic_block bb) |
sched_scan (&ssi, bbs, bb, new_insns, NULL); |
} |
-/* Restore other notes for the whole region. */ |
+/* Restore notes for the whole region. */ |
static void |
-sel_restore_other_notes (void) |
+sel_restore_notes (void) |
{ |
int bb; |
+ insn_t insn; |
for (bb = 0; bb < current_nr_blocks; bb++) |
{ |
@@ -4305,6 +4437,10 @@ sel_restore_other_notes (void) |
restore_other_notes (NULL, first); |
BB_NOTE_LIST (first) = NULL_RTX; |
+ FOR_BB_INSNS (first, insn) |
+ if (NONDEBUG_INSN_P (insn)) |
+ reemit_notes (insn); |
+ |
first = first->next_bb; |
} |
while (first != last); |
@@ -4315,7 +4451,7 @@ sel_restore_other_notes (void) |
void |
sel_finish_bbs (void) |
{ |
- sel_restore_other_notes (); |
+ sel_restore_notes (); |
/* Remove current loop preheader from this loop. */ |
if (current_loop_nest) |
@@ -4350,7 +4486,7 @@ alloc_succs_info (void) |
if (succs_info_pool.top == succs_info_pool.max_top) |
{ |
int i; |
- |
+ |
if (++succs_info_pool.max_top >= succs_info_pool.size) |
gcc_unreachable (); |
@@ -4369,23 +4505,23 @@ alloc_succs_info (void) |
void |
free_succs_info (struct succs_info * sinfo) |
{ |
- gcc_assert (succs_info_pool.top >= 0 |
+ gcc_assert (succs_info_pool.top >= 0 |
&& &succs_info_pool.stack[succs_info_pool.top] == sinfo); |
succs_info_pool.top--; |
/* Clear stale info. */ |
- VEC_block_remove (rtx, sinfo->succs_ok, |
+ VEC_block_remove (rtx, sinfo->succs_ok, |
0, VEC_length (rtx, sinfo->succs_ok)); |
- VEC_block_remove (rtx, sinfo->succs_other, |
+ VEC_block_remove (rtx, sinfo->succs_other, |
0, VEC_length (rtx, sinfo->succs_other)); |
- VEC_block_remove (int, sinfo->probs_ok, |
+ VEC_block_remove (int, sinfo->probs_ok, |
0, VEC_length (int, sinfo->probs_ok)); |
sinfo->all_prob = 0; |
sinfo->succs_ok_n = 0; |
sinfo->all_succs_n = 0; |
} |
-/* Compute successor info for INSN. FLAGS are the flags passed |
+/* Compute successor info for INSN. FLAGS are the flags passed |
to the FOR_EACH_SUCC_1 iterator. */ |
struct succs_info * |
compute_succs_info (insn_t insn, short flags) |
@@ -4405,10 +4541,10 @@ compute_succs_info (insn_t insn, short flags) |
{ |
VEC_safe_push (rtx, heap, sinfo->succs_ok, succ); |
VEC_safe_push (int, heap, sinfo->probs_ok, |
- /* FIXME: Improve calculation when skipping |
+ /* FIXME: Improve calculation when skipping |
inner loop to exits. */ |
- (si.bb_end |
- ? si.e1->probability |
+ (si.bb_end |
+ ? si.e1->probability |
: REG_BR_PROB_BASE)); |
sinfo->succs_ok_n++; |
} |
@@ -4427,7 +4563,7 @@ compute_succs_info (insn_t insn, short flags) |
return sinfo; |
} |
-/* Return the predecessors of BB in PREDS and their number in N. |
+/* Return the predecessors of BB in PREDS and their number in N. |
Empty blocks are skipped. SIZE is used to allocate PREDS. */ |
static void |
cfg_preds_1 (basic_block bb, insn_t **preds, int *n, int *size) |
@@ -4450,7 +4586,7 @@ cfg_preds_1 (basic_block bb, insn_t **preds, int *n, int *size) |
else |
{ |
if (*n == *size) |
- *preds = XRESIZEVEC (insn_t, *preds, |
+ *preds = XRESIZEVEC (insn_t, *preds, |
(*size = 2 * *size + 1)); |
(*preds)[(*n)++] = bb_end; |
} |
@@ -4459,8 +4595,8 @@ cfg_preds_1 (basic_block bb, insn_t **preds, int *n, int *size) |
gcc_assert (*n != 0); |
} |
-/* Find all predecessors of BB and record them in PREDS and their number |
- in N. Empty blocks are skipped, and only normal (forward in-region) |
+/* Find all predecessors of BB and record them in PREDS and their number |
+ in N. Empty blocks are skipped, and only normal (forward in-region) |
edges are processed. */ |
static void |
cfg_preds (basic_block bb, insn_t **preds, int *n) |
@@ -4498,7 +4634,7 @@ sel_num_cfg_preds_gt_1 (insn_t insn) |
return false; |
} |
-/* Returns true when BB should be the end of an ebb. Adapted from the |
+/* Returns true when BB should be the end of an ebb. Adapted from the |
code in sched-ebb.c. */ |
bool |
bb_ends_ebb_p (basic_block bb) |
@@ -4506,7 +4642,7 @@ bb_ends_ebb_p (basic_block bb) |
basic_block next_bb = bb_next_bb (bb); |
edge e; |
edge_iterator ei; |
- |
+ |
if (next_bb == EXIT_BLOCK_PTR |
|| bitmap_bit_p (forced_ebb_heads, next_bb->index) |
|| (LABEL_P (BB_HEAD (next_bb)) |
@@ -4540,7 +4676,7 @@ in_same_ebb_p (insn_t insn, insn_t succ) |
{ |
if (ptr == BLOCK_FOR_INSN (succ)) |
return true; |
- |
+ |
if (bb_ends_ebb_p (ptr)) |
return false; |
@@ -4562,7 +4698,7 @@ recompute_rev_top_order (void) |
if (!rev_top_order_index || rev_top_order_index_len < last_basic_block) |
{ |
- rev_top_order_index_len = last_basic_block; |
+ rev_top_order_index_len = last_basic_block; |
rev_top_order_index = XRESIZEVEC (int, rev_top_order_index, |
rev_top_order_index_len); |
} |
@@ -4598,7 +4734,7 @@ clear_outdated_rtx_info (basic_block bb) |
EXPR_PRIORITY_ADJ (INSN_EXPR (insn)) = 0; |
/* We cannot use the changed caches, as previously we could ignore |
- the LHS dependence due to enabled renaming and transform |
+ the LHS dependence due to enabled renaming and transform |
the expression, and currently we'll be unable to do this. */ |
htab_empty (INSN_TRANSFORMED_INSNS (insn)); |
} |
@@ -4649,12 +4785,12 @@ alloc_sched_pools (void) |
int succs_size; |
succs_size = MAX_WS + 1; |
- succs_info_pool.stack = XCNEWVEC (struct succs_info, succs_size); |
+ succs_info_pool.stack = XCNEWVEC (struct succs_info, succs_size); |
succs_info_pool.size = succs_size; |
succs_info_pool.top = -1; |
succs_info_pool.max_top = -1; |
- sched_lists_pool = create_alloc_pool ("sel-sched-lists", |
+ sched_lists_pool = create_alloc_pool ("sel-sched-lists", |
sizeof (struct _list_node), 500); |
} |
@@ -4663,7 +4799,7 @@ void |
free_sched_pools (void) |
{ |
int i; |
- |
+ |
free_alloc_pool (sched_lists_pool); |
gcc_assert (succs_info_pool.top == -1); |
for (i = 0; i < succs_info_pool.max_top; i++) |
@@ -4676,7 +4812,7 @@ free_sched_pools (void) |
} |
-/* Returns a position in RGN where BB can be inserted retaining |
+/* Returns a position in RGN where BB can be inserted retaining |
topological order. */ |
static int |
find_place_to_insert_bb (basic_block bb, int rgn) |
@@ -4684,7 +4820,7 @@ find_place_to_insert_bb (basic_block bb, int rgn) |
bool has_preds_outside_rgn = false; |
edge e; |
edge_iterator ei; |
- |
+ |
/* Find whether we have preds outside the region. */ |
FOR_EACH_EDGE (e, ei, bb->preds) |
if (!in_current_region_p (e->src)) |
@@ -4692,7 +4828,7 @@ find_place_to_insert_bb (basic_block bb, int rgn) |
has_preds_outside_rgn = true; |
break; |
} |
- |
+ |
/* Recompute the top order -- needed when we have > 1 pred |
and in case we don't have preds outside. */ |
if (flag_sel_sched_pipelining_outer_loops |
@@ -4704,11 +4840,11 @@ find_place_to_insert_bb (basic_block bb, int rgn) |
for (i = RGN_NR_BLOCKS (rgn) - 1; i >= 0; i--) |
{ |
cur_bbi = BB_TO_BLOCK (i); |
- if (rev_top_order_index[bbi] |
+ if (rev_top_order_index[bbi] |
< rev_top_order_index[cur_bbi]) |
break; |
} |
- |
+ |
/* We skipped the right block, so we increase i. We accomodate |
it for increasing by step later, so we decrease i. */ |
return (i + 1) - 1; |
@@ -4731,9 +4867,9 @@ find_place_to_insert_bb (basic_block bb, int rgn) |
if (EDGE_COUNT (bb->succs) > 0) |
{ |
int pred_bbi; |
- |
+ |
gcc_assert (EDGE_COUNT (bb->preds) == 1); |
- |
+ |
pred_bbi = EDGE_PRED (bb, 0)->src->index; |
return BLOCK_TO_BB (pred_bbi); |
} |
@@ -4753,14 +4889,14 @@ delete_and_free_basic_block (basic_block bb) |
bitmap_clear_bit (blocks_to_reschedule, bb->index); |
- /* Can't assert av_set properties because we use sel_aremove_bb |
- when removing loop preheader from the region. At the point of |
+ /* Can't assert av_set properties because we use sel_aremove_bb |
+ when removing loop preheader from the region. At the point of |
removing the preheader we already have deallocated sel_region_bb_info. */ |
gcc_assert (BB_LV_SET (bb) == NULL |
&& !BB_LV_SET_VALID_P (bb) |
&& BB_AV_LEVEL (bb) == 0 |
&& BB_AV_SET (bb) == NULL); |
- |
+ |
delete_basic_block (bb); |
} |
@@ -4777,13 +4913,13 @@ add_block_to_current_region (basic_block bb) |
gcc_assert (RGN_HAS_REAL_EBB (rgn) == 0 |
&& ebb_head[bbi] == pos); |
- |
+ |
/* Make a place for the new block. */ |
extend_regions (); |
for (i = RGN_BLOCKS (rgn + 1) - 1; i >= pos; i--) |
BLOCK_TO_BB (rgn_bb_table[i])++; |
- |
+ |
memmove (rgn_bb_table + pos + 1, |
rgn_bb_table + pos, |
(RGN_BLOCKS (nr_regions) - pos) * sizeof (*rgn_bb_table)); |
@@ -4794,7 +4930,7 @@ add_block_to_current_region (basic_block bb) |
CONTAINING_RGN (bb->index) = rgn; |
RGN_NR_BLOCKS (rgn)++; |
- |
+ |
for (i = rgn + 1; i <= nr_regions; i++) |
RGN_BLOCKS (i)++; |
} |
@@ -4824,7 +4960,7 @@ remove_bb_from_region (basic_block bb) |
RGN_BLOCKS (i)--; |
} |
-/* Add BB to the current region and update all data. If BB is NULL, add all |
+/* Add BB to the current region and update all data. If BB is NULL, add all |
blocks from last_added_blocks vector. */ |
static void |
sel_add_bb (basic_block bb) |
@@ -4834,23 +4970,23 @@ sel_add_bb (basic_block bb) |
sched_init_bbs (); |
sel_init_bbs (last_added_blocks, NULL); |
- /* When bb is passed explicitly, the vector should contain |
+ /* When bb is passed explicitly, the vector should contain |
the only element that equals to bb; otherwise, the vector |
should not be NULL. */ |
gcc_assert (last_added_blocks != NULL); |
- |
+ |
if (bb != NULL) |
{ |
gcc_assert (VEC_length (basic_block, last_added_blocks) == 1 |
- && VEC_index (basic_block, |
- last_added_blocks, 0) == bb); |
+ && VEC_index (basic_block, |
+ last_added_blocks, 0) == bb); |
add_block_to_current_region (bb); |
/* We associate creating/deleting data sets with the first insn |
appearing / disappearing in the bb. */ |
if (!sel_bb_empty_p (bb) && BB_LV_SET (bb) == NULL) |
create_initial_data_sets (bb); |
- |
+ |
VEC_free (basic_block, heap, last_added_blocks); |
} |
else |
@@ -4859,14 +4995,14 @@ sel_add_bb (basic_block bb) |
int i; |
basic_block temp_bb = NULL; |
- for (i = 0; |
+ for (i = 0; |
VEC_iterate (basic_block, last_added_blocks, i, bb); i++) |
{ |
add_block_to_current_region (bb); |
temp_bb = bb; |
} |
- /* We need to fetch at least one bb so we know the region |
+ /* We need to fetch at least one bb so we know the region |
to update. */ |
gcc_assert (temp_bb != NULL); |
bb = temp_bb; |
@@ -4877,17 +5013,17 @@ sel_add_bb (basic_block bb) |
rgn_setup_region (CONTAINING_RGN (bb->index)); |
} |
-/* Remove BB from the current region and update all data. |
+/* Remove BB from the current region and update all data. |
If REMOVE_FROM_CFG_PBB is true, also remove the block cfom cfg. */ |
static void |
sel_remove_bb (basic_block bb, bool remove_from_cfg_p) |
{ |
gcc_assert (bb != NULL && BB_NOTE_LIST (bb) == NULL_RTX); |
- |
+ |
remove_bb_from_region (bb); |
return_bb_to_pool (bb); |
bitmap_clear_bit (blocks_to_reschedule, bb->index); |
- |
+ |
if (remove_from_cfg_p) |
delete_and_free_basic_block (bb); |
@@ -4900,15 +5036,15 @@ move_bb_info (basic_block merge_bb, basic_block empty_bb) |
{ |
gcc_assert (in_current_region_p (merge_bb)); |
- concat_note_lists (BB_NOTE_LIST (empty_bb), |
+ concat_note_lists (BB_NOTE_LIST (empty_bb), |
&BB_NOTE_LIST (merge_bb)); |
BB_NOTE_LIST (empty_bb) = NULL_RTX; |
} |
-/* Remove an empty basic block EMPTY_BB. When MERGE_UP_P is true, we put |
- EMPTY_BB's note lists into its predecessor instead of putting them |
- into the successor. When REMOVE_FROM_CFG_P is true, also remove |
+/* Remove an empty basic block EMPTY_BB. When MERGE_UP_P is true, we put |
+ EMPTY_BB's note lists into its predecessor instead of putting them |
+ into the successor. When REMOVE_FROM_CFG_P is true, also remove |
the empty block. */ |
void |
sel_remove_empty_bb (basic_block empty_bb, bool merge_up_p, |
@@ -4931,7 +5067,7 @@ sel_remove_empty_bb (basic_block empty_bb, bool merge_up_p, |
merge_bb = bb_next_bb (empty_bb); |
- /* Redirect incoming edges (except fallthrough one) of EMPTY_BB to its |
+ /* Redirect incoming edges (except fallthrough one) of EMPTY_BB to its |
successor block. */ |
for (ei = ei_start (empty_bb->preds); |
(e = ei_safe_edge (ei)); ) |
@@ -5017,15 +5153,15 @@ remove_empty_bb (basic_block empty_bb, bool remove_from_cfg_p) |
sel_remove_bb (empty_bb, remove_from_cfg_p); |
} |
-/* An implementation of create_basic_block hook, which additionally updates |
+/* An implementation of create_basic_block hook, which additionally updates |
per-bb data structures. */ |
static basic_block |
sel_create_basic_block (void *headp, void *endp, basic_block after) |
{ |
basic_block new_bb; |
insn_t new_bb_note; |
- |
- gcc_assert (flag_sel_sched_pipelining_outer_loops |
+ |
+ gcc_assert (flag_sel_sched_pipelining_outer_loops |
|| last_added_blocks == NULL); |
new_bb_note = get_bb_note_from_pool (); |
@@ -5075,7 +5211,7 @@ change_loops_latches (basic_block from, basic_block to) |
} |
} |
-/* Splits BB on two basic blocks, adding it to the region and extending |
+/* Splits BB on two basic blocks, adding it to the region and extending |
per-bb data structures. Returns the newly created bb. */ |
static basic_block |
sel_split_block (basic_block bb, rtx after) |
@@ -5087,7 +5223,7 @@ sel_split_block (basic_block bb, rtx after) |
sel_add_bb (new_bb); |
/* This should be called after sel_add_bb, because this uses |
- CONTAINING_RGN for the new block, which is not yet initialized. |
+ CONTAINING_RGN for the new block, which is not yet initialized. |
FIXME: this function may be a no-op now. */ |
change_loops_latches (bb, new_bb); |
@@ -5127,7 +5263,7 @@ check_for_new_jump (basic_block bb, int prev_max_uid) |
return NULL; |
} |
-/* Look for a new jump either in FROM_BB block or in newly created JUMP_BB block. |
+/* Look for a new jump either in FROM_BB block or in newly created JUMP_BB block. |
New means having UID at least equal to PREV_MAX_UID. */ |
static rtx |
find_new_jump (basic_block from, basic_block jump_bb, int prev_max_uid) |
@@ -5137,7 +5273,7 @@ find_new_jump (basic_block from, basic_block jump_bb, int prev_max_uid) |
/* Return immediately if no new insns were emitted. */ |
if (get_max_uid () == prev_max_uid) |
return NULL; |
- |
+ |
/* Now check both blocks for new jumps. It will ever be only one. */ |
if ((jump = check_for_new_jump (from, prev_max_uid))) |
return jump; |
@@ -5161,15 +5297,15 @@ sel_split_edge (edge e) |
prev_max_uid = get_max_uid (); |
new_bb = split_edge (e); |
- if (flag_sel_sched_pipelining_outer_loops |
+ if (flag_sel_sched_pipelining_outer_loops |
&& current_loop_nest) |
{ |
int i; |
basic_block bb; |
- /* Some of the basic blocks might not have been added to the loop. |
+ /* Some of the basic blocks might not have been added to the loop. |
Add them here, until this is fixed in force_fallthru. */ |
- for (i = 0; |
+ for (i = 0; |
VEC_iterate (basic_block, last_added_blocks, i, bb); i++) |
if (!bb->loop_father) |
{ |
@@ -5238,9 +5374,9 @@ sel_create_recovery_block (insn_t orig_insn) |
sched_create_recovery_edges (first_bb, recovery_block, second_bb); |
if (current_loops != NULL) |
add_bb_to_loop (recovery_block, first_bb->loop_father); |
- |
+ |
sel_add_bb (recovery_block); |
- |
+ |
jump = BB_END (recovery_block); |
gcc_assert (sel_bb_head (recovery_block) == jump); |
sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP); |
@@ -5252,8 +5388,6 @@ sel_create_recovery_block (insn_t orig_insn) |
void |
sel_merge_blocks (basic_block a, basic_block b) |
{ |
- gcc_assert (can_merge_blocks_p (a, b)); |
- |
sel_remove_empty_bb (b, true, false); |
merge_blocks (a, b); |
@@ -5269,9 +5403,9 @@ sel_redirect_edge_and_branch_force (edge e, basic_block to) |
basic_block jump_bb, src; |
int prev_max_uid; |
rtx jump; |
- |
+ |
gcc_assert (!sel_bb_empty_p (e->src)); |
- |
+ |
src = e->src; |
prev_max_uid = get_max_uid (); |
jump_bb = redirect_edge_and_branch_force (e, to); |
@@ -5284,20 +5418,23 @@ sel_redirect_edge_and_branch_force (edge e, basic_block to) |
if (current_loop_nest |
&& pipelining_p) |
gcc_assert (loop_latch_edge (current_loop_nest)); |
- |
+ |
jump = find_new_jump (src, jump_bb, prev_max_uid); |
if (jump) |
sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP); |
} |
-/* A wrapper for redirect_edge_and_branch. */ |
-void |
+/* A wrapper for redirect_edge_and_branch. Return TRUE if blocks connected by |
+ redirected edge are in reverse topological order. */ |
+bool |
sel_redirect_edge_and_branch (edge e, basic_block to) |
{ |
bool latch_edge_p; |
basic_block src; |
int prev_max_uid; |
rtx jump; |
+ edge redirected; |
+ bool recompute_toporder_p = false; |
latch_edge_p = (pipelining_p |
&& current_loop_nest |
@@ -5305,9 +5442,10 @@ sel_redirect_edge_and_branch (edge e, basic_block to) |
src = e->src; |
prev_max_uid = get_max_uid (); |
- |
- redirect_edge_and_branch (e, to); |
- gcc_assert (last_added_blocks == NULL); |
+ |
+ redirected = redirect_edge_and_branch (e, to); |
+ |
+ gcc_assert (redirected && last_added_blocks == NULL); |
/* When we've redirected a latch edge, update the header. */ |
if (latch_edge_p) |
@@ -5316,9 +5454,18 @@ sel_redirect_edge_and_branch (edge e, basic_block to) |
gcc_assert (loop_latch_edge (current_loop_nest)); |
} |
+ /* In rare situations, the topological relation between the blocks connected |
+ by the redirected edge can change (see PR42245 for an example). Update |
+ block_to_bb/bb_to_block. */ |
+ if (CONTAINING_RGN (e->src->index) == CONTAINING_RGN (to->index) |
+ && BLOCK_TO_BB (e->src->index) > BLOCK_TO_BB (to->index)) |
+ recompute_toporder_p = true; |
+ |
jump = find_new_jump (src, NULL, prev_max_uid); |
if (jump) |
sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP); |
+ |
+ return recompute_toporder_p; |
} |
/* This variable holds the cfg hooks used by the selective scheduler. */ |
@@ -5367,6 +5514,8 @@ create_insn_rtx_from_pattern (rtx pattern, rtx label) |
if (label == NULL_RTX) |
insn_rtx = emit_insn (pattern); |
+ else if (DEBUG_INSN_P (label)) |
+ insn_rtx = emit_debug_insn (pattern); |
else |
{ |
insn_rtx = emit_jump_insn (pattern); |
@@ -5402,6 +5551,10 @@ create_copy_of_insn_rtx (rtx insn_rtx) |
{ |
rtx res; |
+ if (DEBUG_INSN_P (insn_rtx)) |
+ return create_insn_rtx_from_pattern (copy_rtx (PATTERN (insn_rtx)), |
+ insn_rtx); |
+ |
gcc_assert (NONJUMP_INSN_P (insn_rtx)); |
res = create_insn_rtx_from_pattern (copy_rtx (PATTERN (insn_rtx)), |
@@ -5422,7 +5575,7 @@ change_vinsn_in_expr (expr_t expr, vinsn_t new_vinsn) |
/* Helpers for global init. */ |
/* This structure is used to be able to call existing bundling mechanism |
and calculate insn priorities. */ |
-static struct haifa_sched_info sched_sel_haifa_sched_info = |
+static struct haifa_sched_info sched_sel_haifa_sched_info = |
{ |
NULL, /* init_ready_list */ |
NULL, /* can_schedule_ready_p */ |
@@ -5431,6 +5584,7 @@ static struct haifa_sched_info sched_sel_haifa_sched_info = |
NULL, /* rgn_rank */ |
sel_print_insn, /* rgn_print_insn */ |
contributes_to_priority, |
+ NULL, /* insn_finishes_block_p */ |
NULL, NULL, |
NULL, NULL, |
@@ -5443,7 +5597,7 @@ static struct haifa_sched_info sched_sel_haifa_sched_info = |
}; |
/* Setup special insns used in the scheduler. */ |
-void |
+void |
setup_nop_and_exit_insns (void) |
{ |
gcc_assert (nop_pattern == NULL_RTX |
@@ -5487,9 +5641,9 @@ free_nop_vinsn (void) |
void |
sel_set_sched_flags (void) |
{ |
- /* ??? This means that set_sched_flags were called, and we decided to |
+ /* ??? This means that set_sched_flags were called, and we decided to |
support speculation. However, set_sched_flags also modifies flags |
- on current_sched_info, doing this only at global init. And we |
+ on current_sched_info, doing this only at global init. And we |
sometimes change c_s_i later. So put the correct flags again. */ |
if (spec_info && targetm.sched.set_sched_flags) |
targetm.sched.set_sched_flags (spec_info); |
@@ -5514,9 +5668,9 @@ sel_setup_sched_infos (void) |
common_sched_info = &sel_common_sched_info; |
current_sched_info = &sched_sel_haifa_sched_info; |
- current_sched_info->sched_max_insns_priority = |
+ current_sched_info->sched_max_insns_priority = |
get_rgn_sched_max_insns_priority (); |
- |
+ |
sel_set_sched_flags (); |
} |
@@ -5550,14 +5704,14 @@ sel_create_new_region (void) |
/* FIXME: This will work only when EBBs are not created. */ |
if (new_rgn_number != 0) |
- RGN_BLOCKS (new_rgn_number) = RGN_BLOCKS (new_rgn_number - 1) + |
+ RGN_BLOCKS (new_rgn_number) = RGN_BLOCKS (new_rgn_number - 1) + |
RGN_NR_BLOCKS (new_rgn_number - 1); |
else |
RGN_BLOCKS (new_rgn_number) = 0; |
/* Set the blocks of the next region so the other functions may |
calculate the number of blocks in the region. */ |
- RGN_BLOCKS (new_rgn_number + 1) = RGN_BLOCKS (new_rgn_number) + |
+ RGN_BLOCKS (new_rgn_number + 1) = RGN_BLOCKS (new_rgn_number) + |
RGN_NR_BLOCKS (new_rgn_number); |
nr_regions++; |
@@ -5573,8 +5727,8 @@ bb_top_order_comparator (const void *x, const void *y) |
basic_block bb1 = *(const basic_block *) x; |
basic_block bb2 = *(const basic_block *) y; |
- gcc_assert (bb1 == bb2 |
- || rev_top_order_index[bb1->index] |
+ gcc_assert (bb1 == bb2 |
+ || rev_top_order_index[bb1->index] |
!= rev_top_order_index[bb2->index]); |
/* It's a reverse topological order in REV_TOP_ORDER_INDEX, so |
@@ -5585,7 +5739,7 @@ bb_top_order_comparator (const void *x, const void *y) |
return 1; |
} |
-/* Create a region for LOOP and return its number. If we don't want |
+/* Create a region for LOOP and return its number. If we don't want |
to pipeline LOOP, return -1. */ |
static int |
make_region_from_loop (struct loop *loop) |
@@ -5599,10 +5753,10 @@ make_region_from_loop (struct loop *loop) |
basic_block *loop_blocks; |
basic_block preheader_block; |
- if (loop->num_nodes |
+ if (loop->num_nodes |
> (unsigned) PARAM_VALUE (PARAM_MAX_PIPELINE_REGION_BLOCKS)) |
return -1; |
- |
+ |
/* Don't pipeline loops whose latch belongs to some of its inner loops. */ |
for (inner = loop->inner; inner; inner = inner->inner) |
if (flow_bb_inside_loop_p (inner, loop->latch)) |
@@ -5634,14 +5788,14 @@ make_region_from_loop (struct loop *loop) |
{ |
/* Add only those blocks that haven't been scheduled in the inner loop. |
The exception is the basic blocks with bookkeeping code - they should |
- be added to the region (and they actually don't belong to the loop |
+ be added to the region (and they actually don't belong to the loop |
body, but to the region containing that loop body). */ |
gcc_assert (new_rgn_number >= 0); |
if (! TEST_BIT (bbs_in_loop_rgns, loop_blocks[i]->index)) |
{ |
- sel_add_block_to_region (loop_blocks[i], &bb_ord_index, |
+ sel_add_block_to_region (loop_blocks[i], &bb_ord_index, |
new_rgn_number); |
SET_BIT (bbs_in_loop_rgns, loop_blocks[i]->index); |
} |
@@ -5679,11 +5833,11 @@ make_region_from_loop_preheader (VEC(basic_block, heap) **loop_blocks) |
/* Create region(s) from loop nest LOOP, such that inner loops will be |
- pipelined before outer loops. Returns true when a region for LOOP |
+ pipelined before outer loops. Returns true when a region for LOOP |
is created. */ |
static bool |
make_regions_from_loop_nest (struct loop *loop) |
-{ |
+{ |
struct loop *cur_loop; |
int rgn_number; |
@@ -5739,24 +5893,24 @@ considered_for_pipelining_p (struct loop *loop) |
if (loop_depth (loop) == 0) |
return false; |
- /* Now, the loop could be too large or irreducible. Check whether its |
- region is in LOOP_NESTS. |
- We determine the region number of LOOP as the region number of its |
- latch. We can't use header here, because this header could be |
+ /* Now, the loop could be too large or irreducible. Check whether its |
+ region is in LOOP_NESTS. |
+ We determine the region number of LOOP as the region number of its |
+ latch. We can't use header here, because this header could be |
just removed preheader and it will give us the wrong region number. |
Latch can't be used because it could be in the inner loop too. */ |
- if (LOOP_MARKED_FOR_PIPELINING_P (loop) && pipelining_p) |
+ if (LOOP_MARKED_FOR_PIPELINING_P (loop)) |
{ |
int rgn = CONTAINING_RGN (loop->latch->index); |
gcc_assert ((unsigned) rgn < VEC_length (loop_p, loop_nests)); |
return true; |
} |
- |
+ |
return false; |
} |
-/* Makes regions from the rest of the blocks, after loops are chosen |
+/* Makes regions from the rest of the blocks, after loops are chosen |
for pipelining. */ |
static void |
make_regions_from_the_rest (void) |
@@ -5769,13 +5923,11 @@ make_regions_from_the_rest (void) |
edge e; |
edge_iterator ei; |
int *degree; |
- int new_regions; |
/* Index in rgn_bb_table where to start allocating new regions. */ |
cur_rgn_blocks = nr_regions ? RGN_BLOCKS (nr_regions) : 0; |
- new_regions = nr_regions; |
- /* Make regions from all the rest basic blocks - those that don't belong to |
+ /* Make regions from all the rest basic blocks - those that don't belong to |
any loop or belong to irreducible loops. Prepare the data structures |
for extend_rgns. */ |
@@ -5798,10 +5950,10 @@ make_regions_from_the_rest (void) |
loop_hdr[bb->index] = bb->loop_father->num; |
} |
- /* For each basic block degree is calculated as the number of incoming |
+ /* For each basic block degree is calculated as the number of incoming |
edges, that are going out of bbs that are not yet scheduled. |
The basic blocks that are scheduled have degree value of zero. */ |
- FOR_EACH_BB (bb) |
+ FOR_EACH_BB (bb) |
{ |
degree[bb->index] = 0; |
@@ -5853,9 +6005,9 @@ void sel_finish_pipelining (void) |
rev_top_order_index = NULL; |
} |
-/* This function replaces the find_rgns when |
+/* This function replaces the find_rgns when |
FLAG_SEL_SCHED_PIPELINING_OUTER_LOOPS is set. */ |
-void |
+void |
sel_find_rgns (void) |
{ |
sel_init_pipelining (); |
@@ -5873,7 +6025,7 @@ sel_find_rgns (void) |
} |
/* Make regions from all the rest basic blocks and schedule them. |
- These blocks include blocks that don't belong to any loop or belong |
+ These blocks include blocks that don't belong to any loop or belong |
to irreducible loops. */ |
make_regions_from_the_rest (); |
@@ -5882,28 +6034,31 @@ sel_find_rgns (void) |
bbs_in_loop_rgns = NULL; |
} |
-/* Adds the preheader blocks from previous loop to current region taking |
- it from LOOP_PREHEADER_BLOCKS (current_loop_nest). |
+/* Adds the preheader blocks from previous loop to current region taking |
+ it from LOOP_PREHEADER_BLOCKS (current_loop_nest). |
This function is only used with -fsel-sched-pipelining-outer-loops. */ |
void |
sel_add_loop_preheaders (void) |
{ |
int i; |
basic_block bb; |
- VEC(basic_block, heap) *preheader_blocks |
+ VEC(basic_block, heap) *preheader_blocks |
= LOOP_PREHEADER_BLOCKS (current_loop_nest); |
for (i = 0; |
VEC_iterate (basic_block, preheader_blocks, i, bb); |
i++) |
+ { |
+ VEC_safe_push (basic_block, heap, last_added_blocks, bb); |
sel_add_bb (bb); |
+ } |
VEC_free (basic_block, heap, preheader_blocks); |
} |
-/* While pipelining outer loops, returns TRUE if BB is a loop preheader. |
- Please note that the function should also work when pipelining_p is |
- false, because it is used when deciding whether we should or should |
+/* While pipelining outer loops, returns TRUE if BB is a loop preheader. |
+ Please note that the function should also work when pipelining_p is |
+ false, because it is used when deciding whether we should or should |
not reschedule pipelined code. */ |
bool |
sel_is_loop_preheader_p (basic_block bb) |
@@ -5923,7 +6078,7 @@ sel_is_loop_preheader_p (basic_block bb) |
Check that the above code is equivalent to what we did before. */ |
if (in_current_region_p (current_loop_nest->header)) |
- gcc_assert (!(BLOCK_TO_BB (bb->index) |
+ gcc_assert (!(BLOCK_TO_BB (bb->index) |
< BLOCK_TO_BB (current_loop_nest->header->index))); |
/* Support the situation when the latch block of outer loop |
@@ -5944,13 +6099,13 @@ jump_leads_only_to_bb_p (insn_t jump, basic_block dest_bb) |
{ |
basic_block jump_bb = BLOCK_FOR_INSN (jump); |
- /* It is not jump, jump with side-effects or jump can lead to several |
+ /* It is not jump, jump with side-effects or jump can lead to several |
basic blocks. */ |
if (!onlyjump_p (jump) |
|| !any_uncondjump_p (jump)) |
return false; |
- /* Several outgoing edges, abnormal edge or destination of jump is |
+ /* Several outgoing edges, abnormal edge or destination of jump is |
not DEST_BB. */ |
if (EDGE_COUNT (jump_bb->succs) != 1 |
|| EDGE_SUCC (jump_bb, 0)->flags & EDGE_ABNORMAL |
@@ -5962,7 +6117,7 @@ jump_leads_only_to_bb_p (insn_t jump, basic_block dest_bb) |
} |
/* Removes the loop preheader from the current region and saves it in |
- PREHEADER_BLOCKS of the father loop, so they will be added later to |
+ PREHEADER_BLOCKS of the father loop, so they will be added later to |
region that represents an outer loop. */ |
static void |
sel_remove_loop_preheader (void) |
@@ -5971,7 +6126,7 @@ sel_remove_loop_preheader (void) |
int cur_rgn = CONTAINING_RGN (BB_TO_BLOCK (0)); |
basic_block bb; |
bool all_empty_p = true; |
- VEC(basic_block, heap) *preheader_blocks |
+ VEC(basic_block, heap) *preheader_blocks |
= LOOP_PREHEADER_BLOCKS (loop_outer (current_loop_nest)); |
gcc_assert (current_loop_nest); |
@@ -5982,7 +6137,7 @@ sel_remove_loop_preheader (void) |
{ |
bb = BASIC_BLOCK (BB_TO_BLOCK (i)); |
- /* If the basic block belongs to region, but doesn't belong to |
+ /* If the basic block belongs to region, but doesn't belong to |
corresponding loop, then it should be a preheader. */ |
if (sel_is_loop_preheader_p (bb)) |
{ |
@@ -5991,13 +6146,13 @@ sel_remove_loop_preheader (void) |
all_empty_p = false; |
} |
} |
- |
+ |
/* Remove these blocks only after iterating over the whole region. */ |
for (i = VEC_length (basic_block, preheader_blocks) - 1; |
i >= old_len; |
i--) |
{ |
- bb = VEC_index (basic_block, preheader_blocks, i); |
+ bb = VEC_index (basic_block, preheader_blocks, i); |
sel_remove_bb (bb, false); |
} |
@@ -6027,9 +6182,9 @@ sel_remove_loop_preheader (void) |
gcc_assert (BB_NOTE_LIST (bb) == NULL); |
delete_and_free_basic_block (bb); |
- /* Check if after deleting preheader there is a nonconditional |
- jump in PREV_BB that leads to the next basic block NEXT_BB. |
- If it is so - delete this jump and clear data sets of its |
+ /* Check if after deleting preheader there is a nonconditional |
+ jump in PREV_BB that leads to the next basic block NEXT_BB. |
+ If it is so - delete this jump and clear data sets of its |
basic block if it becomes empty. */ |
if (next_bb->prev_bb == prev_bb |
&& prev_bb != ENTRY_BLOCK_PTR |