+2008-08-06 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * builtins.c (expand_builtin_profile_func): Avoid C++ keywords.
+ * calls.c (avoid_likely_spilled_reg): Likewise.
+ * cfgexpand.c (gimple_assign_rhs_to_tree): Likewise.
+ * cgraph.c (cgraph_clone_edge, cgraph_clone_node): Likewise.
+ * config/i386/i386.c (ix86_expand_special_args_builtin,
+ ix86_secondary_reload): Likewise.
+ * except.c (struct eh_region, gen_eh_region_catch,
+ remove_unreachable_regions, duplicate_eh_regions,
+ assign_filter_values, build_post_landing_pads,
+ sjlj_find_directly_reachable_regions, remove_eh_handler,
+ reachable_next_level, foreach_reachable_handler,
+ can_throw_internal_1, can_throw_external_1,
+ collect_one_action_chain): Likewise.
+ * expr.c (expand_expr_real_1, vector_mode_valid_p): Likewise.
+ * fold-const.c (twoval_comparison_p, eval_subst): Likewise.
+ * function.c (update_temp_slot_address, instantiate_new_reg,
+ instantiate_virtual_regs_in_rtx,
+ instantiate_virtual_regs_in_insn): Likewise.
+ * gimple.c (extract_ops_from_tree, gimple_seq_copy): Likewise.
+ * gimplify.c (gimplify_call_expr, gimplify_init_constructor,
+ gimplify_cleanup_point_expr): Likewise.
+ * ipa-cp.c (ipcp_lattice_changed): Likewise.
+ * passes.c (next_pass_1): Likewise.
+ * print-tree.c (print_node_brief, print_node): Likewise.
+ * profile.c (branch_prob): Likewise.
+ * tree-dump.c (dump_register): Likewise.
+ * tree-eh.c (replace_goto_queue_cond_clause, lower_catch):
+ Likewise.
+ * tree-inline.c (remap_ssa_name, remap_type_1, remap_blocks,
+ copy_statement_list, remap_gimple_op_r, copy_tree_body_r,
+ copy_edges_for_bb, copy_cfg_body, copy_tree_r,
+ copy_arguments_for_versioning, copy_static_chain): Likewise.
+ * tree-into-ssa.c (names_replaced_by, add_to_repl_tbl,
+ add_new_name_mapping, register_new_name_mapping): Likewise.
+ * tree-mudflap.c (mf_xform_derefs): Likewise.
+ * tree-predcom.c (struct chain, dump_chain, replace_ref_with,
+ get_init_expr, combine_chains): Likewise.
+ * tree-pretty-print.c (dump_generic_node): Likewise.
+ * tree-ssa-structalias.c (create_variable_info_for): Likewise.
+ * tree-vrp.c (simplify_cond_using_ranges): Likewise.
+ * tree.c (substitute_in_expr, iterative_hash_expr): Likewise.
+ * value-prof.c (gimple_duplicate_stmt_histograms): Likewise.
+
2008-08-06 H.J. Lu <hongjiu.lu@intel.com>
PR middle-end/37010
static rtx
expand_builtin_profile_func (bool exitp)
{
- rtx this, which;
+ rtx this_rtx, which;
- this = DECL_RTL (current_function_decl);
- gcc_assert (MEM_P (this));
- this = XEXP (this, 0);
+ this_rtx = DECL_RTL (current_function_decl);
+ gcc_assert (MEM_P (this_rtx));
+ this_rtx = XEXP (this_rtx, 0);
if (exitp)
which = profile_function_exit_libfunc;
else
which = profile_function_entry_libfunc;
- emit_library_call (which, LCT_NORMAL, VOIDmode, 2, this, Pmode,
+ emit_library_call (which, LCT_NORMAL, VOIDmode, 2, this_rtx, Pmode,
expand_builtin_return_addr (BUILT_IN_RETURN_ADDRESS,
0),
Pmode);
static rtx
avoid_likely_spilled_reg (rtx x)
{
- rtx new;
+ rtx new_rtx;
if (REG_P (x)
&& HARD_REGISTER_P (x)
and the whole point of this function is to avoid
using the hard register directly in such a situation. */
generating_concat_p = 0;
- new = gen_reg_rtx (GET_MODE (x));
+ new_rtx = gen_reg_rtx (GET_MODE (x));
generating_concat_p = 1;
- emit_move_insn (new, x);
- return new;
+ emit_move_insn (new_rtx, x);
+ return new_rtx;
}
return x;
}
gimple_assign_rhs_to_tree (gimple stmt)
{
tree t;
- enum gimple_rhs_class class;
+ enum gimple_rhs_class grhs_class;
- class = get_gimple_rhs_class (gimple_expr_code (stmt));
+ grhs_class = get_gimple_rhs_class (gimple_expr_code (stmt));
- if (class == GIMPLE_BINARY_RHS)
+ if (grhs_class == GIMPLE_BINARY_RHS)
t = build2 (gimple_assign_rhs_code (stmt),
TREE_TYPE (gimple_assign_lhs (stmt)),
gimple_assign_rhs1 (stmt),
gimple_assign_rhs2 (stmt));
- else if (class == GIMPLE_UNARY_RHS)
+ else if (grhs_class == GIMPLE_UNARY_RHS)
t = build1 (gimple_assign_rhs_code (stmt),
TREE_TYPE (gimple_assign_lhs (stmt)),
gimple_assign_rhs1 (stmt));
- else if (class == GIMPLE_SINGLE_RHS)
+ else if (grhs_class == GIMPLE_SINGLE_RHS)
t = gimple_assign_rhs1 (stmt);
else
gcc_unreachable ();
gimple call_stmt, gcov_type count_scale, int freq_scale,
int loop_nest, bool update_original)
{
- struct cgraph_edge *new;
+ struct cgraph_edge *new_edge;
gcov_type count = e->count * count_scale / REG_BR_PROB_BASE;
gcov_type freq = e->frequency * (gcov_type) freq_scale / CGRAPH_FREQ_BASE;
if (freq > CGRAPH_FREQ_MAX)
freq = CGRAPH_FREQ_MAX;
- new = cgraph_create_edge (n, e->callee, call_stmt, count, freq,
+ new_edge = cgraph_create_edge (n, e->callee, call_stmt, count, freq,
e->loop_nest + loop_nest);
- new->inline_failed = e->inline_failed;
- new->indirect_call = e->indirect_call;
+ new_edge->inline_failed = e->inline_failed;
+ new_edge->indirect_call = e->indirect_call;
if (update_original)
{
- e->count -= new->count;
+ e->count -= new_edge->count;
if (e->count < 0)
e->count = 0;
}
- cgraph_call_edge_duplication_hooks (e, new);
- return new;
+ cgraph_call_edge_duplication_hooks (e, new_edge);
+ return new_edge;
}
/* Create node representing clone of N executed COUNT times. Decrease
cgraph_clone_node (struct cgraph_node *n, gcov_type count, int freq,
int loop_nest, bool update_original)
{
- struct cgraph_node *new = cgraph_create_node ();
+ struct cgraph_node *new_node = cgraph_create_node ();
struct cgraph_edge *e;
gcov_type count_scale;
- new->decl = n->decl;
- new->origin = n->origin;
- if (new->origin)
+ new_node->decl = n->decl;
+ new_node->origin = n->origin;
+ if (new_node->origin)
{
- new->next_nested = new->origin->nested;
- new->origin->nested = new;
+ new_node->next_nested = new_node->origin->nested;
+ new_node->origin->nested = new_node;
}
- new->analyzed = n->analyzed;
- new->local = n->local;
- new->global = n->global;
- new->rtl = n->rtl;
- new->master_clone = n->master_clone;
- new->count = count;
+ new_node->analyzed = n->analyzed;
+ new_node->local = n->local;
+ new_node->global = n->global;
+ new_node->rtl = n->rtl;
+ new_node->master_clone = n->master_clone;
+ new_node->count = count;
if (n->count)
- count_scale = new->count * REG_BR_PROB_BASE / n->count;
+ count_scale = new_node->count * REG_BR_PROB_BASE / n->count;
else
count_scale = 0;
if (update_original)
}
for (e = n->callees;e; e=e->next_callee)
- cgraph_clone_edge (e, new, e->call_stmt, count_scale, freq, loop_nest,
+ cgraph_clone_edge (e, new_node, e->call_stmt, count_scale, freq, loop_nest,
update_original);
- new->next_clone = n->next_clone;
- new->prev_clone = n;
- n->next_clone = new;
- if (new->next_clone)
- new->next_clone->prev_clone = new;
+ new_node->next_clone = n->next_clone;
+ new_node->prev_clone = n;
+ n->next_clone = new_node;
+ if (new_node->next_clone)
+ new_node->next_clone->prev_clone = new_node;
- cgraph_call_node_duplication_hooks (n, new);
- return new;
+ cgraph_call_node_duplication_hooks (n, new_node);
+ return new_node;
}
/* Return true if N is an master_clone, (see cgraph_master_clone). */
bool last_arg_constant = false;
const struct insn_data *insn_p = &insn_data[icode];
enum machine_mode tmode = insn_p->operand[0].mode;
- enum { load, store } class;
+ enum { load, store } klass;
switch ((enum ix86_special_builtin_type) d->flag)
{
case V4SF_FTYPE_PCFLOAT:
case V2DF_FTYPE_PCDOUBLE:
nargs = 1;
- class = load;
+ klass = load;
memory = 0;
break;
case VOID_FTYPE_PV2SF_V4SF:
case VOID_FTYPE_PDI_DI:
case VOID_FTYPE_PINT_INT:
nargs = 1;
- class = store;
+ klass = store;
/* Reserve memory operand for target. */
memory = ARRAY_SIZE (args);
break;
case V4SF_FTYPE_V4SF_PCV2SF:
case V2DF_FTYPE_V2DF_PCDOUBLE:
nargs = 2;
- class = load;
+ klass = load;
memory = 1;
break;
default:
gcc_assert (nargs <= ARRAY_SIZE (args));
- if (class == store)
+ if (klass == store)
{
arg = CALL_EXPR_ARG (exp, 0);
op = expand_normal (arg);
if (! pat)
return 0;
emit_insn (pat);
- return class == store ? 0 : target;
+ return klass == store ? 0 : target;
}
/* Return the integer constant in ARG. Constrain it to be in the range
}
static enum reg_class
-ix86_secondary_reload (bool in_p, rtx x, enum reg_class class,
+ix86_secondary_reload (bool in_p, rtx x, enum reg_class rclass,
enum machine_mode mode,
secondary_reload_info *sri ATTRIBUTE_UNUSED)
{
/* QImode spills from non-QI registers require
intermediate register on 32bit targets. */
if (!in_p && mode == QImode && !TARGET_64BIT
- && (class == GENERAL_REGS
- || class == LEGACY_REGS
- || class == INDEX_REGS))
+ && (rclass == GENERAL_REGS
+ || rclass == LEGACY_REGS
+ || rclass == INDEX_REGS))
{
int regno;
/* A list of catch blocks, a surrounding try block,
and the label for continuing after a catch. */
struct eh_region_u_try {
- struct eh_region *catch;
+ struct eh_region *eh_catch;
struct eh_region *last_catch;
- } GTY ((tag ("ERT_TRY"))) try;
+ } GTY ((tag ("ERT_TRY"))) eh_try;
/* The list through the catch handlers, the list of type objects
matched, and the list of associated filters. */
struct eh_region *prev_catch;
tree type_list;
tree filter_list;
- } GTY ((tag ("ERT_CATCH"))) catch;
+ } GTY ((tag ("ERT_CATCH"))) eh_catch;
/* A tree_list of allowed types. */
struct eh_region_u_allowed {
for a throw. */
struct eh_region_u_throw {
tree type;
- } GTY ((tag ("ERT_THROW"))) throw;
+ } GTY ((tag ("ERT_THROW"))) eh_throw;
/* Retain the cleanup expression even after expansion so that
we can match up fixup regions. */
}
c = gen_eh_region (ERT_CATCH, t->outer);
- c->u.catch.type_list = type_list;
- l = t->u.try.last_catch;
- c->u.catch.prev_catch = l;
+ c->u.eh_catch.type_list = type_list;
+ l = t->u.eh_try.last_catch;
+ c->u.eh_catch.prev_catch = l;
if (l)
- l->u.catch.next_catch = c;
+ l->u.eh_catch.next_catch = c;
else
- t->u.try.catch = c;
- t->u.try.last_catch = c;
+ t->u.eh_try.eh_catch = c;
+ t->u.eh_try.last_catch = c;
return c;
}
/* TRY regions are reachable if any of its CATCH regions
are reachable. */
struct eh_region *c;
- for (c = r->u.try.catch; c ; c = c->u.catch.next_catch)
+ for (c = r->u.eh_try.eh_catch; c ; c = c->u.eh_catch.next_catch)
if (reachable[c->region_number])
{
kill_it = false;
switch (cur->type)
{
case ERT_TRY:
- if (cur->u.try.catch)
- REMAP (cur->u.try.catch);
- if (cur->u.try.last_catch)
- REMAP (cur->u.try.last_catch);
+ if (cur->u.eh_try.eh_catch)
+ REMAP (cur->u.eh_try.eh_catch);
+ if (cur->u.eh_try.last_catch)
+ REMAP (cur->u.eh_try.last_catch);
break;
case ERT_CATCH:
- if (cur->u.catch.next_catch)
- REMAP (cur->u.catch.next_catch);
- if (cur->u.catch.prev_catch)
- REMAP (cur->u.catch.prev_catch);
+ if (cur->u.eh_catch.next_catch)
+ REMAP (cur->u.eh_catch.next_catch);
+ if (cur->u.eh_catch.prev_catch)
+ REMAP (cur->u.eh_catch.prev_catch);
break;
case ERT_CLEANUP:
case ERT_CATCH:
/* Whatever type_list is (NULL or true list), we build a list
of filters for the region. */
- r->u.catch.filter_list = NULL_TREE;
+ r->u.eh_catch.filter_list = NULL_TREE;
- if (r->u.catch.type_list != NULL)
+ if (r->u.eh_catch.type_list != NULL)
{
/* Get a filter value for each of the types caught and store
them in the region's dedicated list. */
- tree tp_node = r->u.catch.type_list;
+ tree tp_node = r->u.eh_catch.type_list;
for (;tp_node; tp_node = TREE_CHAIN (tp_node))
{
int flt = add_ttypes_entry (ttypes, TREE_VALUE (tp_node));
tree flt_node = build_int_cst (NULL_TREE, flt);
- r->u.catch.filter_list
- = tree_cons (NULL_TREE, flt_node, r->u.catch.filter_list);
+ r->u.eh_catch.filter_list
+ = tree_cons (NULL_TREE, flt_node, r->u.eh_catch.filter_list);
}
}
else
int flt = add_ttypes_entry (ttypes, NULL);
tree flt_node = build_int_cst (NULL_TREE, flt);
- r->u.catch.filter_list
- = tree_cons (NULL_TREE, flt_node, r->u.catch.filter_list);
+ r->u.eh_catch.filter_list
+ = tree_cons (NULL_TREE, flt_node, r->u.eh_catch.filter_list);
}
break;
Rapid prototyping sez a sequence of ifs. */
{
struct eh_region *c;
- for (c = region->u.try.catch; c ; c = c->u.catch.next_catch)
+ for (c = region->u.eh_try.eh_catch; c ; c = c->u.eh_catch.next_catch)
{
- if (c->u.catch.type_list == NULL)
+ if (c->u.eh_catch.type_list == NULL)
emit_jump (c->label);
else
{
/* Need for one cmp/jump per type caught. Each type
list entry has a matching entry in the filter list
(see assign_filter_values). */
- tree tp_node = c->u.catch.type_list;
- tree flt_node = c->u.catch.filter_list;
+ tree tp_node = c->u.eh_catch.type_list;
+ tree flt_node = c->u.eh_catch.filter_list;
for (; tp_node; )
{
seq = get_insns ();
end_sequence ();
- emit_to_new_bb_before (seq, region->u.try.catch->label);
+ emit_to_new_bb_before (seq, region->u.eh_try.eh_catch->label);
break;
type_thrown = NULL_TREE;
if (region->type == ERT_THROW)
{
- type_thrown = region->u.throw.type;
+ type_thrown = region->u.eh_throw.type;
region = region->outer;
}
if (region->type == ERT_CATCH)
{
- struct eh_region *try, *next, *prev;
+ struct eh_region *eh_try, *next, *prev;
- for (try = region->next_peer;
- try->type == ERT_CATCH;
- try = try->next_peer)
+ for (eh_try = region->next_peer;
+ eh_try->type == ERT_CATCH;
+ eh_try = eh_try->next_peer)
continue;
- gcc_assert (try->type == ERT_TRY);
+ gcc_assert (eh_try->type == ERT_TRY);
- next = region->u.catch.next_catch;
- prev = region->u.catch.prev_catch;
+ next = region->u.eh_catch.next_catch;
+ prev = region->u.eh_catch.prev_catch;
if (next)
- next->u.catch.prev_catch = prev;
+ next->u.eh_catch.prev_catch = prev;
else
- try->u.try.last_catch = prev;
+ eh_try->u.eh_try.last_catch = prev;
if (prev)
- prev->u.catch.next_catch = next;
+ prev->u.eh_catch.next_catch = next;
else
{
- try->u.try.catch = next;
+ eh_try->u.eh_try.eh_catch = next;
if (! next)
- remove_eh_handler (try);
+ remove_eh_handler (eh_try);
}
}
}
struct eh_region *c;
enum reachable_code ret = RNL_NOT_CAUGHT;
- for (c = region->u.try.catch; c ; c = c->u.catch.next_catch)
+ for (c = region->u.eh_try.eh_catch; c ; c = c->u.eh_catch.next_catch)
{
/* A catch-all handler ends the search. */
- if (c->u.catch.type_list == NULL)
+ if (c->u.eh_catch.type_list == NULL)
{
add_reachable_handler (info, region, c);
return RNL_CAUGHT;
if (type_thrown)
{
/* If we have at least one type match, end the search. */
- tree tp_node = c->u.catch.type_list;
+ tree tp_node = c->u.eh_catch.type_list;
for (; tp_node; tp_node = TREE_CHAIN (tp_node))
{
ret = RNL_MAYBE_CAUGHT;
else
{
- tree tp_node = c->u.catch.type_list;
+ tree tp_node = c->u.eh_catch.type_list;
bool maybe_reachable = false;
/* Compute the potential reachability of this handler and
}
else if (region->type == ERT_THROW)
{
- type_thrown = region->u.throw.type;
+ type_thrown = region->u.eh_throw.type;
region = region->outer;
}
region = region->outer;
else if (region->type == ERT_THROW)
{
- type_thrown = region->u.throw.type;
+ type_thrown = region->u.eh_throw.type;
region = region->outer;
}
region = region->outer;
else if (region->type == ERT_THROW)
{
- type_thrown = region->u.throw.type;
+ type_thrown = region->u.eh_throw.type;
region = region->outer;
}
search outer regions. Use a magic -3 value to record
that we haven't done the outer search. */
next = -3;
- for (c = region->u.try.last_catch; c ; c = c->u.catch.prev_catch)
+ for (c = region->u.eh_try.last_catch; c ; c = c->u.eh_catch.prev_catch)
{
- if (c->u.catch.type_list == NULL)
+ if (c->u.eh_catch.type_list == NULL)
{
/* Retrieve the filter from the head of the filter list
where we have stored it (see assign_filter_values). */
int filter
- = TREE_INT_CST_LOW (TREE_VALUE (c->u.catch.filter_list));
+ = TREE_INT_CST_LOW (TREE_VALUE (c->u.eh_catch.filter_list));
next = add_action_record (ar_hash, filter, 0);
}
next = add_action_record (ar_hash, 0, 0);
}
- flt_node = c->u.catch.filter_list;
+ flt_node = c->u.eh_catch.filter_list;
for (; flt_node; flt_node = TREE_CHAIN (flt_node))
{
int filter = TREE_INT_CST_LOW (TREE_VALUE (flt_node));
if (mode == BLKmode)
{
HOST_WIDE_INT size = GET_MODE_BITSIZE (ext_mode);
- rtx new;
+ rtx new_rtx;
/* If the reference doesn't use the alias set of its type,
we cannot create the temporary using that type. */
if (component_uses_parent_alias_set (exp))
{
- new = assign_stack_local (ext_mode, size, 0);
- set_mem_alias_set (new, get_alias_set (exp));
+ new_rtx = assign_stack_local (ext_mode, size, 0);
+ set_mem_alias_set (new_rtx, get_alias_set (exp));
}
else
- new = assign_stack_temp_for_type (ext_mode, size, 0, type);
+ new_rtx = assign_stack_temp_for_type (ext_mode, size, 0, type);
- emit_move_insn (new, op0);
- op0 = copy_rtx (new);
+ emit_move_insn (new_rtx, op0);
+ op0 = copy_rtx (new_rtx);
PUT_MODE (op0, BLKmode);
set_mem_attributes (op0, exp, 1);
}
HOST_WIDE_INT temp_size
= MAX (int_size_in_bytes (inner_type),
(HOST_WIDE_INT) GET_MODE_SIZE (TYPE_MODE (type)));
- rtx new = assign_stack_temp_for_type (TYPE_MODE (type),
- temp_size, 0, type);
- rtx new_with_op0_mode = adjust_address (new, GET_MODE (op0), 0);
+ rtx new_rtx = assign_stack_temp_for_type (TYPE_MODE (type),
+ temp_size, 0, type);
+ rtx new_with_op0_mode = adjust_address (new_rtx, GET_MODE (op0), 0);
gcc_assert (!TREE_ADDRESSABLE (exp));
else
emit_move_insn (new_with_op0_mode, op0);
- op0 = new;
+ op0 = new_rtx;
}
op0 = adjust_address (op0, TYPE_MODE (type), 0);
int
vector_mode_valid_p (enum machine_mode mode)
{
- enum mode_class class = GET_MODE_CLASS (mode);
+ enum mode_class mclass = GET_MODE_CLASS (mode);
enum machine_mode innermode;
/* Doh! What's going on? */
- if (class != MODE_VECTOR_INT
- && class != MODE_VECTOR_FLOAT
- && class != MODE_VECTOR_FRACT
- && class != MODE_VECTOR_UFRACT
- && class != MODE_VECTOR_ACCUM
- && class != MODE_VECTOR_UACCUM)
+ if (mclass != MODE_VECTOR_INT
+ && mclass != MODE_VECTOR_FLOAT
+ && mclass != MODE_VECTOR_FRACT
+ && mclass != MODE_VECTOR_UFRACT
+ && mclass != MODE_VECTOR_ACCUM
+ && mclass != MODE_VECTOR_UACCUM)
return 0;
/* Hardware support. Woo hoo! */
twoval_comparison_p (tree arg, tree *cval1, tree *cval2, int *save_p)
{
enum tree_code code = TREE_CODE (arg);
- enum tree_code_class class = TREE_CODE_CLASS (code);
+ enum tree_code_class tclass = TREE_CODE_CLASS (code);
/* We can handle some of the tcc_expression cases here. */
- if (class == tcc_expression && code == TRUTH_NOT_EXPR)
- class = tcc_unary;
- else if (class == tcc_expression
+ if (tclass == tcc_expression && code == TRUTH_NOT_EXPR)
+ tclass = tcc_unary;
+ else if (tclass == tcc_expression
&& (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR
|| code == COMPOUND_EXPR))
- class = tcc_binary;
+ tclass = tcc_binary;
- else if (class == tcc_expression && code == SAVE_EXPR
+ else if (tclass == tcc_expression && code == SAVE_EXPR
&& ! TREE_SIDE_EFFECTS (TREE_OPERAND (arg, 0)))
{
/* If we've already found a CVAL1 or CVAL2, this expression is
if (*cval1 || *cval2)
return 0;
- class = tcc_unary;
+ tclass = tcc_unary;
*save_p = 1;
}
- switch (class)
+ switch (tclass)
{
case tcc_unary:
return twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p);
{
tree type = TREE_TYPE (arg);
enum tree_code code = TREE_CODE (arg);
- enum tree_code_class class = TREE_CODE_CLASS (code);
+ enum tree_code_class tclass = TREE_CODE_CLASS (code);
/* We can handle some of the tcc_expression cases here. */
- if (class == tcc_expression && code == TRUTH_NOT_EXPR)
- class = tcc_unary;
- else if (class == tcc_expression
+ if (tclass == tcc_expression && code == TRUTH_NOT_EXPR)
+ tclass = tcc_unary;
+ else if (tclass == tcc_expression
&& (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR))
- class = tcc_binary;
+ tclass = tcc_binary;
- switch (class)
+ switch (tclass)
{
case tcc_unary:
return fold_build1 (code, type,
return 0;
}
-/* Indicate that NEW is an alternate way of referring to the temp slot
- that previously was known by OLD. */
+/* Indicate that NEW_RTX is an alternate way of referring to the temp
+ slot that previously was known by OLD_RTX. */
void
-update_temp_slot_address (rtx old, rtx new)
+update_temp_slot_address (rtx old_rtx, rtx new_rtx)
{
struct temp_slot *p;
- if (rtx_equal_p (old, new))
+ if (rtx_equal_p (old_rtx, new_rtx))
return;
- p = find_temp_slot_from_address (old);
+ p = find_temp_slot_from_address (old_rtx);
- /* If we didn't find one, see if both OLD is a PLUS. If so, and NEW
- is a register, see if one operand of the PLUS is a temporary
- location. If so, NEW points into it. Otherwise, if both OLD and
- NEW are a PLUS and if there is a register in common between them.
- If so, try a recursive call on those values. */
+ /* If we didn't find one, see if both OLD_RTX is a PLUS. If so, and
+ NEW_RTX is a register, see if one operand of the PLUS is a
+ temporary location. If so, NEW_RTX points into it. Otherwise,
+ if both OLD_RTX and NEW_RTX are a PLUS and if there is a register
+ in common between them. If so, try a recursive call on those
+ values. */
if (p == 0)
{
- if (GET_CODE (old) != PLUS)
+ if (GET_CODE (old_rtx) != PLUS)
return;
- if (REG_P (new))
+ if (REG_P (new_rtx))
{
- update_temp_slot_address (XEXP (old, 0), new);
- update_temp_slot_address (XEXP (old, 1), new);
+ update_temp_slot_address (XEXP (old_rtx, 0), new_rtx);
+ update_temp_slot_address (XEXP (old_rtx, 1), new_rtx);
return;
}
- else if (GET_CODE (new) != PLUS)
+ else if (GET_CODE (new_rtx) != PLUS)
return;
- if (rtx_equal_p (XEXP (old, 0), XEXP (new, 0)))
- update_temp_slot_address (XEXP (old, 1), XEXP (new, 1));
- else if (rtx_equal_p (XEXP (old, 1), XEXP (new, 0)))
- update_temp_slot_address (XEXP (old, 0), XEXP (new, 1));
- else if (rtx_equal_p (XEXP (old, 0), XEXP (new, 1)))
- update_temp_slot_address (XEXP (old, 1), XEXP (new, 0));
- else if (rtx_equal_p (XEXP (old, 1), XEXP (new, 1)))
- update_temp_slot_address (XEXP (old, 0), XEXP (new, 0));
+ if (rtx_equal_p (XEXP (old_rtx, 0), XEXP (new_rtx, 0)))
+ update_temp_slot_address (XEXP (old_rtx, 1), XEXP (new_rtx, 1));
+ else if (rtx_equal_p (XEXP (old_rtx, 1), XEXP (new_rtx, 0)))
+ update_temp_slot_address (XEXP (old_rtx, 0), XEXP (new_rtx, 1));
+ else if (rtx_equal_p (XEXP (old_rtx, 0), XEXP (new_rtx, 1)))
+ update_temp_slot_address (XEXP (old_rtx, 1), XEXP (new_rtx, 0));
+ else if (rtx_equal_p (XEXP (old_rtx, 1), XEXP (new_rtx, 1)))
+ update_temp_slot_address (XEXP (old_rtx, 0), XEXP (new_rtx, 0));
return;
}
/* Otherwise add an alias for the temp's address. */
else if (p->address == 0)
- p->address = new;
+ p->address = new_rtx;
else
{
if (GET_CODE (p->address) != EXPR_LIST)
p->address = gen_rtx_EXPR_LIST (VOIDmode, p->address, NULL_RTX);
- p->address = gen_rtx_EXPR_LIST (VOIDmode, new, p->address);
+ p->address = gen_rtx_EXPR_LIST (VOIDmode, new_rtx, p->address);
}
}
static rtx
instantiate_new_reg (rtx x, HOST_WIDE_INT *poffset)
{
- rtx new;
+ rtx new_rtx;
HOST_WIDE_INT offset;
if (x == virtual_incoming_args_rtx)
{
/* Replace virtual_incoming_args_rtx with internal arg
pointer if DRAP is used to realign stack. */
- new = crtl->args.internal_arg_pointer;
+ new_rtx = crtl->args.internal_arg_pointer;
offset = 0;
}
else
- new = arg_pointer_rtx, offset = in_arg_offset;
+ new_rtx = arg_pointer_rtx, offset = in_arg_offset;
}
else if (x == virtual_stack_vars_rtx)
- new = frame_pointer_rtx, offset = var_offset;
+ new_rtx = frame_pointer_rtx, offset = var_offset;
else if (x == virtual_stack_dynamic_rtx)
- new = stack_pointer_rtx, offset = dynamic_offset;
+ new_rtx = stack_pointer_rtx, offset = dynamic_offset;
else if (x == virtual_outgoing_args_rtx)
- new = stack_pointer_rtx, offset = out_arg_offset;
+ new_rtx = stack_pointer_rtx, offset = out_arg_offset;
else if (x == virtual_cfa_rtx)
{
#ifdef FRAME_POINTER_CFA_OFFSET
- new = frame_pointer_rtx;
+ new_rtx = frame_pointer_rtx;
#else
- new = arg_pointer_rtx;
+ new_rtx = arg_pointer_rtx;
#endif
offset = cfa_offset;
}
return NULL_RTX;
*poffset = offset;
- return new;
+ return new_rtx;
}
/* A subroutine of instantiate_virtual_regs, called via for_each_rtx.
{
HOST_WIDE_INT offset;
bool *changed = (bool *) data;
- rtx x, new;
+ rtx x, new_rtx;
x = *loc;
if (x == 0)
switch (GET_CODE (x))
{
case REG:
- new = instantiate_new_reg (x, &offset);
- if (new)
+ new_rtx = instantiate_new_reg (x, &offset);
+ if (new_rtx)
{
- *loc = plus_constant (new, offset);
+ *loc = plus_constant (new_rtx, offset);
if (changed)
*changed = true;
}
return -1;
case PLUS:
- new = instantiate_new_reg (XEXP (x, 0), &offset);
- if (new)
+ new_rtx = instantiate_new_reg (XEXP (x, 0), &offset);
+ if (new_rtx)
{
- new = plus_constant (new, offset);
- *loc = simplify_gen_binary (PLUS, GET_MODE (x), new, XEXP (x, 1));
+ new_rtx = plus_constant (new_rtx, offset);
+ *loc = simplify_gen_binary (PLUS, GET_MODE (x), new_rtx, XEXP (x, 1));
if (changed)
*changed = true;
return -1;
HOST_WIDE_INT offset;
int insn_code, i;
bool any_change = false;
- rtx set, new, x, seq;
+ rtx set, new_rtx, x, seq;
/* There are some special cases to be handled first. */
set = single_set (insn);
to mean that the underlying register gets assigned the inverse
transformation. This is used, for example, in the handling of
non-local gotos. */
- new = instantiate_new_reg (SET_DEST (set), &offset);
- if (new)
+ new_rtx = instantiate_new_reg (SET_DEST (set), &offset);
+ if (new_rtx)
{
start_sequence ();
for_each_rtx (&SET_SRC (set), instantiate_virtual_regs_in_rtx, NULL);
- x = simplify_gen_binary (PLUS, GET_MODE (new), SET_SRC (set),
+ x = simplify_gen_binary (PLUS, GET_MODE (new_rtx), SET_SRC (set),
GEN_INT (-offset));
- x = force_operand (x, new);
- if (x != new)
- emit_move_insn (new, x);
+ x = force_operand (x, new_rtx);
+ if (x != new_rtx)
+ emit_move_insn (new_rtx, x);
seq = get_insns ();
end_sequence ();
new add insn. The difference between this and falling through
to the generic case is avoiding a new pseudo and eliminating a
move insn in the initial rtl stream. */
- new = instantiate_new_reg (SET_SRC (set), &offset);
- if (new && offset != 0
+ new_rtx = instantiate_new_reg (SET_SRC (set), &offset);
+ if (new_rtx && offset != 0
&& REG_P (SET_DEST (set))
&& REGNO (SET_DEST (set)) > LAST_VIRTUAL_REGISTER)
{
start_sequence ();
x = expand_simple_binop (GET_MODE (SET_DEST (set)), PLUS,
- new, GEN_INT (offset), SET_DEST (set),
+ new_rtx, GEN_INT (offset), SET_DEST (set),
1, OPTAB_LIB_WIDEN);
if (x != SET_DEST (set))
emit_move_insn (SET_DEST (set), x);
&& recog_data.operand_loc[1] == &XEXP (SET_SRC (set), 0)
&& recog_data.operand_loc[2] == &XEXP (SET_SRC (set), 1)
&& GET_CODE (recog_data.operand[2]) == CONST_INT
- && (new = instantiate_new_reg (recog_data.operand[1], &offset)))
+ && (new_rtx = instantiate_new_reg (recog_data.operand[1], &offset)))
{
offset += INTVAL (recog_data.operand[2]);
&& REGNO (SET_DEST (set)) > LAST_VIRTUAL_REGISTER)
{
start_sequence ();
- emit_move_insn (SET_DEST (set), new);
+ emit_move_insn (SET_DEST (set), new_rtx);
seq = get_insns ();
end_sequence ();
/* Using validate_change and apply_change_group here leaves
recog_data in an invalid state. Since we know exactly what
we want to check, do those two by hand. */
- if (safe_insn_predicate (insn_code, 1, new)
+ if (safe_insn_predicate (insn_code, 1, new_rtx)
&& safe_insn_predicate (insn_code, 2, x))
{
- *recog_data.operand_loc[1] = recog_data.operand[1] = new;
+ *recog_data.operand_loc[1] = recog_data.operand[1] = new_rtx;
*recog_data.operand_loc[2] = recog_data.operand[2] = x;
any_change = true;
break;
case REG:
- new = instantiate_new_reg (x, &offset);
- if (new == NULL)
+ new_rtx = instantiate_new_reg (x, &offset);
+ if (new_rtx == NULL)
continue;
if (offset == 0)
- x = new;
+ x = new_rtx;
else
{
start_sequence ();
/* ??? Recognize address_operand and/or "p" constraints
to see if (plus new offset) is a valid before we put
this through expand_simple_binop. */
- x = expand_simple_binop (GET_MODE (x), PLUS, new,
+ x = expand_simple_binop (GET_MODE (x), PLUS, new_rtx,
GEN_INT (offset), NULL_RTX,
1, OPTAB_LIB_WIDEN);
seq = get_insns ();
break;
case SUBREG:
- new = instantiate_new_reg (SUBREG_REG (x), &offset);
- if (new == NULL)
+ new_rtx = instantiate_new_reg (SUBREG_REG (x), &offset);
+ if (new_rtx == NULL)
continue;
if (offset != 0)
{
start_sequence ();
- new = expand_simple_binop (GET_MODE (new), PLUS, new,
+ new_rtx = expand_simple_binop (GET_MODE (new_rtx), PLUS, new_rtx,
GEN_INT (offset), NULL_RTX,
1, OPTAB_LIB_WIDEN);
seq = get_insns ();
end_sequence ();
emit_insn_before (seq, insn);
}
- x = simplify_gen_subreg (recog_data.operand_mode[i], new,
- GET_MODE (new), SUBREG_BYTE (x));
+ x = simplify_gen_subreg (recog_data.operand_mode[i], new_rtx,
+ GET_MODE (new_rtx), SUBREG_BYTE (x));
break;
default:
extract_ops_from_tree (tree expr, enum tree_code *subcode_p, tree *op1_p,
tree *op2_p)
{
- enum gimple_rhs_class class;
+ enum gimple_rhs_class grhs_class;
*subcode_p = TREE_CODE (expr);
- class = get_gimple_rhs_class (*subcode_p);
+ grhs_class = get_gimple_rhs_class (*subcode_p);
- if (class == GIMPLE_BINARY_RHS)
+ if (grhs_class == GIMPLE_BINARY_RHS)
{
*op1_p = TREE_OPERAND (expr, 0);
*op2_p = TREE_OPERAND (expr, 1);
}
- else if (class == GIMPLE_UNARY_RHS)
+ else if (grhs_class == GIMPLE_UNARY_RHS)
{
*op1_p = TREE_OPERAND (expr, 0);
*op2_p = NULL_TREE;
}
- else if (class == GIMPLE_SINGLE_RHS)
+ else if (grhs_class == GIMPLE_SINGLE_RHS)
{
*op1_p = expr;
*op2_p = NULL_TREE;
gimple_seq_copy (gimple_seq src)
{
gimple_stmt_iterator gsi;
- gimple_seq new = gimple_seq_alloc ();
+ gimple_seq new_seq = gimple_seq_alloc ();
gimple stmt;
for (gsi = gsi_start (src); !gsi_end_p (gsi); gsi_next (&gsi))
{
stmt = gimple_copy (gsi_stmt (gsi));
- gimple_seq_add_stmt (&new, stmt);
+ gimple_seq_add_stmt (&new_seq, stmt);
}
- return new;
+ return new_seq;
}
fndecl = get_callee_fndecl (*expr_p);
if (fndecl && DECL_BUILT_IN (fndecl))
{
- tree new = fold_call_expr (*expr_p, !want_value);
+ tree new_tree = fold_call_expr (*expr_p, !want_value);
- if (new && new != *expr_p)
+ if (new_tree && new_tree != *expr_p)
{
/* There was a transformation of this call which computes the
same value, but in a more efficient way. Return and try
again. */
- *expr_p = new;
+ *expr_p = new_tree;
return GS_OK;
}
/* Try this again in case gimplification exposed something. */
if (ret != GS_ERROR)
{
- tree new = fold_call_expr (*expr_p, !want_value);
+ tree new_tree = fold_call_expr (*expr_p, !want_value);
- if (new && new != *expr_p)
+ if (new_tree && new_tree != *expr_p)
{
/* There was a transformation of this call which computes the
same value, but in a more efficient way. Return and try
again. */
- *expr_p = new;
+ *expr_p = new_tree;
return GS_OK;
}
}
if (size > 0 && !can_move_by_pieces (size, align))
{
- tree new;
+ tree new_tree;
if (notify_temp_creation)
return GS_ERROR;
- new = create_tmp_var_raw (type, "C");
+ new_tree = create_tmp_var_raw (type, "C");
- gimple_add_tmp_var (new);
- TREE_STATIC (new) = 1;
- TREE_READONLY (new) = 1;
- DECL_INITIAL (new) = ctor;
- if (align > DECL_ALIGN (new))
+ gimple_add_tmp_var (new_tree);
+ TREE_STATIC (new_tree) = 1;
+ TREE_READONLY (new_tree) = 1;
+ DECL_INITIAL (new_tree) = ctor;
+ if (align > DECL_ALIGN (new_tree))
{
- DECL_ALIGN (new) = align;
- DECL_USER_ALIGN (new) = 1;
+ DECL_ALIGN (new_tree) = align;
+ DECL_USER_ALIGN (new_tree) = 1;
}
- walk_tree (&DECL_INITIAL (new), force_labels_r, NULL, NULL);
+ walk_tree (&DECL_INITIAL (new_tree), force_labels_r, NULL, NULL);
- TREE_OPERAND (*expr_p, 1) = new;
+ TREE_OPERAND (*expr_p, 1) = new_tree;
/* This is no longer an assignment of a CONSTRUCTOR, but
we still may have processing to do on the LHS. So
}
else
{
- gimple try;
+ gimple gtry;
gimple_seq seq;
enum gimple_try_flags kind;
kind = GIMPLE_TRY_FINALLY;
seq = gsi_split_seq_after (iter);
- try = gimple_build_try (seq, gimple_wce_cleanup (wce), kind);
+ gtry = gimple_build_try (seq, gimple_wce_cleanup (wce), kind);
/* Do not use gsi_replace here, as it may scan operands.
We want to do a simple structural modification only. */
- *gsi_stmt_ptr (&iter) = try;
+ *gsi_stmt_ptr (&iter) = gtry;
iter = gsi_start (seq);
}
}
lat->type = IPA_BOTTOM;
}
-/* True when OLD and NEW values are not the same. */
+/* True when OLD_LAT and NEW_LAT values are not the same. */
+
static bool
-ipcp_lattice_changed (struct ipcp_lattice *old, struct ipcp_lattice *new)
+ipcp_lattice_changed (struct ipcp_lattice *old_lat,
+ struct ipcp_lattice *new_lat)
{
- if (old->type == new->type)
+ if (old_lat->type == new_lat->type)
{
- if (!ipcp_lat_is_const (old))
+ if (!ipcp_lat_is_const (old_lat))
return false;
- if (ipcp_lats_are_equal (old, new))
+ if (ipcp_lats_are_equal (old_lat, new_lat))
return false;
}
return true;
pass is already in the list. */
if (pass->static_pass_number)
{
- struct opt_pass *new;
+ struct opt_pass *new_pass;
- new = XNEW (struct opt_pass);
- memcpy (new, pass, sizeof (*new));
- new->next = NULL;
+ new_pass = XNEW (struct opt_pass);
+ memcpy (new_pass, pass, sizeof (*new_pass));
+ new_pass->next = NULL;
- new->todo_flags_start &= ~TODO_mark_first_instance;
+ new_pass->todo_flags_start &= ~TODO_mark_first_instance;
/* Indicate to register_dump_files that this pass has duplicates,
and so it should rename the dump file. The first instance will
if (pass->name)
{
pass->static_pass_number -= 1;
- new->static_pass_number = -pass->static_pass_number;
+ new_pass->static_pass_number = -pass->static_pass_number;
}
- *list = new;
+ *list = new_pass;
}
else
{
void
print_node_brief (FILE *file, const char *prefix, const_tree node, int indent)
{
- enum tree_code_class class;
+ enum tree_code_class tclass;
if (node == 0)
return;
- class = TREE_CODE_CLASS (TREE_CODE (node));
+ tclass = TREE_CODE_CLASS (TREE_CODE (node));
/* Always print the slot this node is in, and its code, address and
name if any. */
fprintf (file, "%s <%s", prefix, tree_code_name[(int) TREE_CODE (node)]);
dump_addr (file, " ", node);
- if (class == tcc_declaration)
+ if (tclass == tcc_declaration)
{
if (DECL_NAME (node))
fprintf (file, " %s", IDENTIFIER_POINTER (DECL_NAME (node)));
fprintf (file, " %c.%u", TREE_CODE (node) == CONST_DECL ? 'C' : 'D',
DECL_UID (node));
}
- else if (class == tcc_type)
+ else if (tclass == tcc_type)
{
if (TYPE_NAME (node))
{
int hash;
struct bucket *b;
enum machine_mode mode;
- enum tree_code_class class;
+ enum tree_code_class tclass;
int len;
int i;
expanded_location xloc;
return;
code = TREE_CODE (node);
- class = TREE_CODE_CLASS (code);
+ tclass = TREE_CODE_CLASS (code);
/* Don't get too deep in nesting. If the user wants to see deeper,
it is easy to use the address of a lowest-level node
return;
}
- if (indent > 8 && (class == tcc_type || class == tcc_declaration))
+ if (indent > 8 && (tclass == tcc_type || tclass == tcc_declaration))
{
print_node_brief (file, prefix, node, indent);
return;
dump_addr (file, " ", node);
/* Print the name, if any. */
- if (class == tcc_declaration)
+ if (tclass == tcc_declaration)
{
if (DECL_NAME (node))
fprintf (file, " %s", IDENTIFIER_POINTER (DECL_NAME (node)));
fprintf (file, " %c.%u", TREE_CODE (node) == CONST_DECL ? 'C' : 'D',
DECL_UID (node));
}
- else if (class == tcc_type)
+ else if (tclass == tcc_type)
{
if (TYPE_NAME (node))
{
|| (LOCATION_LINE (e->goto_locus)
!= LOCATION_LINE (gimple_location (last)))))
{
- basic_block new = split_edge (e);
- single_succ_edge (new)->goto_locus = e->goto_locus;
+ basic_block new_bb = split_edge (e);
+ single_succ_edge (new_bb)->goto_locus = e->goto_locus;
}
if ((e->flags & (EDGE_ABNORMAL | EDGE_ABNORMAL_CALL))
&& e->dest != EXIT_BLOCK_PTR)
static int next_dump = FIRST_AUTO_NUMBERED_DUMP;
int num = next_dump++;
- size_t this = extra_dump_files_in_use++;
+ size_t count = extra_dump_files_in_use++;
- if (this >= extra_dump_files_alloced)
+ if (count >= extra_dump_files_alloced)
{
if (extra_dump_files_alloced == 0)
extra_dump_files_alloced = 32;
extra_dump_files_alloced);
}
- memset (&extra_dump_files[this], 0, sizeof (struct dump_file_info));
- extra_dump_files[this].suffix = suffix;
- extra_dump_files[this].swtch = swtch;
- extra_dump_files[this].glob = glob;
- extra_dump_files[this].flags = flags;
- extra_dump_files[this].num = num;
+ memset (&extra_dump_files[count], 0, sizeof (struct dump_file_info));
+ extra_dump_files[count].suffix = suffix;
+ extra_dump_files[count].swtch = swtch;
+ extra_dump_files[count].glob = glob;
+ extra_dump_files[count].flags = flags;
+ extra_dump_files[count].num = num;
- return this + TDI_end;
+ return count + TDI_end;
}
gimple_stmt_iterator *gsi)
{
tree label;
- gimple_seq new;
+ gimple_seq new_seq;
treemple temp;
temp.tp = tp;
- new = find_goto_replacement (tf, temp);
- if (!new)
+ new_seq = find_goto_replacement (tf, temp);
+ if (!new_seq)
return;
- if (gimple_seq_singleton_p (new)
- && gimple_code (gimple_seq_first_stmt (new)) == GIMPLE_GOTO)
+ if (gimple_seq_singleton_p (new_seq)
+ && gimple_code (gimple_seq_first_stmt (new_seq)) == GIMPLE_GOTO)
{
- *tp = gimple_goto_dest (gimple_seq_first_stmt (new));
+ *tp = gimple_goto_dest (gimple_seq_first_stmt (new_seq));
return;
}
*tp = label;
gsi_insert_after (gsi, gimple_build_label (label), GSI_CONTINUE_LINKING);
- gsi_insert_seq_after (gsi, gimple_seq_copy (new), GSI_CONTINUE_LINKING);
+ gsi_insert_seq_after (gsi, gimple_seq_copy (new_seq), GSI_CONTINUE_LINKING);
}
/* The real work of replace_goto_queue. Returns with TSI updated to
{
struct eh_region *catch_region;
tree eh_label;
- gimple x, catch;
+ gimple x, gcatch;
- catch = gsi_stmt (gsi);
+ gcatch = gsi_stmt (gsi);
catch_region = gen_eh_region_catch (try_region,
- gimple_catch_types (catch));
+ gimple_catch_types (gcatch));
this_state.cur_region = catch_region;
this_state.prev_try = state->prev_try;
- lower_eh_constructs_1 (&this_state, gimple_catch_handler (catch));
+ lower_eh_constructs_1 (&this_state, gimple_catch_handler (gcatch));
eh_label = create_artificial_label ();
set_eh_region_tree_label (catch_region, eh_label);
x = gimple_build_label (eh_label);
gsi_insert_before (&gsi, x, GSI_SAME_STMT);
- if (gimple_seq_may_fallthru (gimple_catch_handler (catch)))
+ if (gimple_seq_may_fallthru (gimple_catch_handler (gcatch)))
{
if (!out_label)
out_label = create_artificial_label ();
x = gimple_build_goto (out_label);
- gimple_seq_add_stmt (gimple_catch_handler_ptr (catch), x);
+ gimple_seq_add_stmt (gimple_catch_handler_ptr (gcatch), x);
}
- gsi_insert_seq_before (&gsi, gimple_catch_handler (catch),
+ gsi_insert_seq_before (&gsi, gimple_catch_handler (gcatch),
GSI_SAME_STMT);
gsi_remove (&gsi, false);
}
static tree
remap_ssa_name (tree name, copy_body_data *id)
{
- tree new;
+ tree new_tree;
tree *n;
gcc_assert (TREE_CODE (name) == SSA_NAME);
/* Do not set DEF_STMT yet as statement is not copied yet. We do that
in copy_bb. */
- new = remap_decl (SSA_NAME_VAR (name), id);
+ new_tree = remap_decl (SSA_NAME_VAR (name), id);
/* We might've substituted constant or another SSA_NAME for
the variable.
Replace the SSA name representing RESULT_DECL by variable during
inlining: this saves us from need to introduce PHI node in a case
return value is just partly initialized. */
- if ((TREE_CODE (new) == VAR_DECL || TREE_CODE (new) == PARM_DECL)
+ if ((TREE_CODE (new_tree) == VAR_DECL || TREE_CODE (new_tree) == PARM_DECL)
&& (TREE_CODE (SSA_NAME_VAR (name)) != RESULT_DECL
|| !id->transform_return_to_modify))
{
- new = make_ssa_name (new, NULL);
- insert_decl_map (id, name, new);
- SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new)
+ new_tree = make_ssa_name (new_tree, NULL);
+ insert_decl_map (id, name, new_tree);
+ SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_tree)
= SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name);
- TREE_TYPE (new) = TREE_TYPE (SSA_NAME_VAR (new));
+ TREE_TYPE (new_tree) = TREE_TYPE (SSA_NAME_VAR (new_tree));
if (gimple_nop_p (SSA_NAME_DEF_STMT (name)))
{
/* By inlining function having uninitialized variable, we might
gimple_stmt_iterator gsi = gsi_last_bb (id->entry_bb);
gimple init_stmt;
- init_stmt = gimple_build_assign (new,
- fold_convert (TREE_TYPE (new),
+ init_stmt = gimple_build_assign (new_tree,
+ fold_convert (TREE_TYPE (new_tree),
integer_zero_node));
gsi_insert_after (&gsi, init_stmt, GSI_NEW_STMT);
- SSA_NAME_IS_DEFAULT_DEF (new) = 0;
+ SSA_NAME_IS_DEFAULT_DEF (new_tree) = 0;
}
else
{
- SSA_NAME_DEF_STMT (new) = gimple_build_nop ();
+ SSA_NAME_DEF_STMT (new_tree) = gimple_build_nop ();
if (gimple_default_def (id->src_cfun, SSA_NAME_VAR (name))
== name)
- set_default_def (SSA_NAME_VAR (new), new);
+ set_default_def (SSA_NAME_VAR (new_tree), new_tree);
}
}
}
else
- insert_decl_map (id, name, new);
- return new;
+ insert_decl_map (id, name, new_tree);
+ return new_tree;
}
/* Remap DECL during the copying of the BLOCK tree for the function. */
static tree
remap_type_1 (tree type, copy_body_data *id)
{
- tree new, t;
+ tree new_tree, t;
/* We do need a copy. build and register it now. If this is a pointer or
reference type, remap the designated type and make a new pointer or
reference type. */
if (TREE_CODE (type) == POINTER_TYPE)
{
- new = build_pointer_type_for_mode (remap_type (TREE_TYPE (type), id),
+ new_tree = build_pointer_type_for_mode (remap_type (TREE_TYPE (type), id),
TYPE_MODE (type),
TYPE_REF_CAN_ALIAS_ALL (type));
- insert_decl_map (id, type, new);
- return new;
+ insert_decl_map (id, type, new_tree);
+ return new_tree;
}
else if (TREE_CODE (type) == REFERENCE_TYPE)
{
- new = build_reference_type_for_mode (remap_type (TREE_TYPE (type), id),
+ new_tree = build_reference_type_for_mode (remap_type (TREE_TYPE (type), id),
TYPE_MODE (type),
TYPE_REF_CAN_ALIAS_ALL (type));
- insert_decl_map (id, type, new);
- return new;
+ insert_decl_map (id, type, new_tree);
+ return new_tree;
}
else
- new = copy_node (type);
+ new_tree = copy_node (type);
- insert_decl_map (id, type, new);
+ insert_decl_map (id, type, new_tree);
/* This is a new type, not a copy of an old type. Need to reassociate
variants. We can handle everything except the main variant lazily. */
if (type != t)
{
t = remap_type (t, id);
- TYPE_MAIN_VARIANT (new) = t;
- TYPE_NEXT_VARIANT (new) = TYPE_NEXT_VARIANT (t);
- TYPE_NEXT_VARIANT (t) = new;
+ TYPE_MAIN_VARIANT (new_tree) = t;
+ TYPE_NEXT_VARIANT (new_tree) = TYPE_NEXT_VARIANT (t);
+ TYPE_NEXT_VARIANT (t) = new_tree;
}
else
{
- TYPE_MAIN_VARIANT (new) = new;
- TYPE_NEXT_VARIANT (new) = NULL;
+ TYPE_MAIN_VARIANT (new_tree) = new_tree;
+ TYPE_NEXT_VARIANT (new_tree) = NULL;
}
if (TYPE_STUB_DECL (type))
- TYPE_STUB_DECL (new) = remap_decl (TYPE_STUB_DECL (type), id);
+ TYPE_STUB_DECL (new_tree) = remap_decl (TYPE_STUB_DECL (type), id);
/* Lazily create pointer and reference types. */
- TYPE_POINTER_TO (new) = NULL;
- TYPE_REFERENCE_TO (new) = NULL;
+ TYPE_POINTER_TO (new_tree) = NULL;
+ TYPE_REFERENCE_TO (new_tree) = NULL;
- switch (TREE_CODE (new))
+ switch (TREE_CODE (new_tree))
{
case INTEGER_TYPE:
case REAL_TYPE:
case FIXED_POINT_TYPE:
case ENUMERAL_TYPE:
case BOOLEAN_TYPE:
- t = TYPE_MIN_VALUE (new);
+ t = TYPE_MIN_VALUE (new_tree);
if (t && TREE_CODE (t) != INTEGER_CST)
- walk_tree (&TYPE_MIN_VALUE (new), copy_tree_body_r, id, NULL);
+ walk_tree (&TYPE_MIN_VALUE (new_tree), copy_tree_body_r, id, NULL);
- t = TYPE_MAX_VALUE (new);
+ t = TYPE_MAX_VALUE (new_tree);
if (t && TREE_CODE (t) != INTEGER_CST)
- walk_tree (&TYPE_MAX_VALUE (new), copy_tree_body_r, id, NULL);
- return new;
+ walk_tree (&TYPE_MAX_VALUE (new_tree), copy_tree_body_r, id, NULL);
+ return new_tree;
case FUNCTION_TYPE:
- TREE_TYPE (new) = remap_type (TREE_TYPE (new), id);
- walk_tree (&TYPE_ARG_TYPES (new), copy_tree_body_r, id, NULL);
- return new;
+ TREE_TYPE (new_tree) = remap_type (TREE_TYPE (new_tree), id);
+ walk_tree (&TYPE_ARG_TYPES (new_tree), copy_tree_body_r, id, NULL);
+ return new_tree;
case ARRAY_TYPE:
- TREE_TYPE (new) = remap_type (TREE_TYPE (new), id);
- TYPE_DOMAIN (new) = remap_type (TYPE_DOMAIN (new), id);
+ TREE_TYPE (new_tree) = remap_type (TREE_TYPE (new_tree), id);
+ TYPE_DOMAIN (new_tree) = remap_type (TYPE_DOMAIN (new_tree), id);
break;
case RECORD_TYPE:
{
tree f, nf = NULL;
- for (f = TYPE_FIELDS (new); f ; f = TREE_CHAIN (f))
+ for (f = TYPE_FIELDS (new_tree); f ; f = TREE_CHAIN (f))
{
t = remap_decl (f, id);
- DECL_CONTEXT (t) = new;
+ DECL_CONTEXT (t) = new_tree;
TREE_CHAIN (t) = nf;
nf = t;
}
- TYPE_FIELDS (new) = nreverse (nf);
+ TYPE_FIELDS (new_tree) = nreverse (nf);
}
break;
gcc_unreachable ();
}
- walk_tree (&TYPE_SIZE (new), copy_tree_body_r, id, NULL);
- walk_tree (&TYPE_SIZE_UNIT (new), copy_tree_body_r, id, NULL);
+ walk_tree (&TYPE_SIZE (new_tree), copy_tree_body_r, id, NULL);
+ walk_tree (&TYPE_SIZE_UNIT (new_tree), copy_tree_body_r, id, NULL);
- return new;
+ return new_tree;
}
tree
remap_blocks (tree block, copy_body_data *id)
{
tree t;
- tree new = block;
+ tree new_tree = block;
if (!block)
return NULL;
- remap_block (&new, id);
- gcc_assert (new != block);
+ remap_block (&new_tree, id);
+ gcc_assert (new_tree != block);
for (t = BLOCK_SUBBLOCKS (block); t ; t = BLOCK_CHAIN (t))
- add_lexical_block (new, remap_blocks (t, id));
- return new;
+ add_lexical_block (new_tree, remap_blocks (t, id));
+ return new_tree;
}
static void
copy_statement_list (tree *tp)
{
tree_stmt_iterator oi, ni;
- tree new;
+ tree new_tree;
- new = alloc_stmt_list ();
- ni = tsi_start (new);
+ new_tree = alloc_stmt_list ();
+ ni = tsi_start (new_tree);
oi = tsi_start (*tp);
- *tp = new;
+ *tp = new_tree;
for (; !tsi_end_p (oi); tsi_next (&oi))
tsi_link_after (&ni, tsi_stmt (oi), TSI_NEW_STMT);
n = (tree *) pointer_map_contains (id->decl_map, decl);
if (n)
{
- tree type, new, old;
+ tree type, new_tree, old;
/* If we happen to get an ADDR_EXPR in n->value, strip
it manually here as we'll eventually get ADDR_EXPRs
fold_indirect_ref does other useful transformations,
try that first, though. */
type = TREE_TYPE (TREE_TYPE (*n));
- new = unshare_expr (*n);
+ new_tree = unshare_expr (*n);
old = *tp;
- *tp = gimple_fold_indirect_ref (new);
+ *tp = gimple_fold_indirect_ref (new_tree);
if (!*tp)
{
- if (TREE_CODE (new) == ADDR_EXPR)
+ if (TREE_CODE (new_tree) == ADDR_EXPR)
{
- *tp = fold_indirect_ref_1 (type, new);
+ *tp = fold_indirect_ref_1 (type, new_tree);
/* ??? We should either assert here or build
a VIEW_CONVERT_EXPR instead of blindly leaking
incompatible types to our IL. */
if (! *tp)
- *tp = TREE_OPERAND (new, 0);
+ *tp = TREE_OPERAND (new_tree, 0);
}
else
{
- *tp = build1 (INDIRECT_REF, type, new);
+ *tp = build1 (INDIRECT_REF, type, new_tree);
TREE_THIS_VOLATILE (*tp) = TREE_THIS_VOLATILE (old);
}
}
n = (tree *) pointer_map_contains (id->decl_map, decl);
if (n)
{
- tree new;
+ tree new_tree;
tree old;
/* If we happen to get an ADDR_EXPR in n->value, strip
it manually here as we'll eventually get ADDR_EXPRs
but we absolutely rely on that. As fold_indirect_ref
does other useful transformations, try that first, though. */
tree type = TREE_TYPE (TREE_TYPE (*n));
- new = unshare_expr (*n);
+ new_tree = unshare_expr (*n);
old = *tp;
- *tp = gimple_fold_indirect_ref (new);
+ *tp = gimple_fold_indirect_ref (new_tree);
if (! *tp)
{
- if (TREE_CODE (new) == ADDR_EXPR)
+ if (TREE_CODE (new_tree) == ADDR_EXPR)
{
- *tp = fold_indirect_ref_1 (type, new);
+ *tp = fold_indirect_ref_1 (type, new_tree);
/* ??? We should either assert here or build
a VIEW_CONVERT_EXPR instead of blindly leaking
incompatible types to our IL. */
if (! *tp)
- *tp = TREE_OPERAND (new, 0);
+ *tp = TREE_OPERAND (new_tree, 0);
}
else
{
- *tp = build1 (INDIRECT_REF, type, new);
+ *tp = build1 (INDIRECT_REF, type, new_tree);
TREE_THIS_VOLATILE (*tp) = TREE_THIS_VOLATILE (old);
TREE_SIDE_EFFECTS (*tp) = TREE_SIDE_EFFECTS (old);
}
FOR_EACH_EDGE (old_edge, ei, bb->succs)
if (!(old_edge->flags & EDGE_EH))
{
- edge new;
+ edge new_edge;
flags = old_edge->flags;
if (old_edge->dest->index == EXIT_BLOCK && !old_edge->flags
&& old_edge->dest->aux != EXIT_BLOCK_PTR)
flags |= EDGE_FALLTHRU;
- new = make_edge (new_bb, (basic_block) old_edge->dest->aux, flags);
- new->count = old_edge->count * count_scale / REG_BR_PROB_BASE;
- new->probability = old_edge->probability;
+ new_edge = make_edge (new_bb, (basic_block) old_edge->dest->aux, flags);
+ new_edge->count = old_edge->count * count_scale / REG_BR_PROB_BASE;
+ new_edge->probability = old_edge->probability;
}
if (bb->index == ENTRY_BLOCK || bb->index == EXIT_BLOCK)
/* Use aux pointers to map the original blocks to copy. */
FOR_EACH_BB_FN (bb, cfun_to_copy)
{
- basic_block new = copy_bb (id, bb, frequency_scale, count_scale);
- bb->aux = new;
- new->aux = bb;
+ basic_block new_bb = copy_bb (id, bb, frequency_scale, count_scale);
+ bb->aux = new_bb;
+ new_bb->aux = bb;
}
last = last_basic_block;
{
/* Because the chain gets clobbered when we make a copy, we save it
here. */
- tree chain = NULL_TREE, new;
+ tree chain = NULL_TREE, new_tree;
chain = TREE_CHAIN (*tp);
/* Copy the node. */
- new = copy_node (*tp);
+ new_tree = copy_node (*tp);
/* Propagate mudflap marked-ness. */
if (flag_mudflap && mf_marked_p (*tp))
- mf_mark (new);
+ mf_mark (new_tree);
- *tp = new;
+ *tp = new_tree;
/* Now, restore the chain, if appropriate. That will cause
walk_tree to walk into the chain as well. */
{
/* CONSTRUCTOR nodes need special handling because
we need to duplicate the vector of elements. */
- tree new;
+ tree new_tree;
- new = copy_node (*tp);
+ new_tree = copy_node (*tp);
/* Propagate mudflap marked-ness. */
if (flag_mudflap && mf_marked_p (*tp))
- mf_mark (new);
+ mf_mark (new_tree);
- CONSTRUCTOR_ELTS (new) = VEC_copy (constructor_elt, gc,
+ CONSTRUCTOR_ELTS (new_tree) = VEC_copy (constructor_elt, gc,
CONSTRUCTOR_ELTS (*tp));
- *tp = new;
+ *tp = new_tree;
}
else if (TREE_CODE_CLASS (code) == tcc_type)
*walk_subtrees = 0;
arg_copy = &orig_parm;
for (parg = arg_copy; *parg; parg = &TREE_CHAIN (*parg))
{
- tree new = remap_decl (*parg, id);
- lang_hooks.dup_lang_specific_decl (new);
- TREE_CHAIN (new) = TREE_CHAIN (*parg);
- *parg = new;
+ tree new_tree = remap_decl (*parg, id);
+ lang_hooks.dup_lang_specific_decl (new_tree);
+ TREE_CHAIN (new_tree) = TREE_CHAIN (*parg);
+ *parg = new_tree;
}
return orig_parm;
}
chain_copy = &static_chain;
for (pvar = chain_copy; *pvar; pvar = &TREE_CHAIN (*pvar))
{
- tree new = remap_decl (*pvar, id);
- lang_hooks.dup_lang_specific_decl (new);
- TREE_CHAIN (new) = TREE_CHAIN (*pvar);
- *pvar = new;
+ tree new_tree = remap_decl (*pvar, id);
+ lang_hooks.dup_lang_specific_decl (new_tree);
+ TREE_CHAIN (new_tree) = TREE_CHAIN (*pvar);
+ *pvar = new_tree;
}
return static_chain;
}
}
-/* Return the names replaced by NEW (i.e., REPL_TBL[NEW].SET). */
+/* Return the names replaced by NEW_TREE (i.e., REPL_TBL[NEW_TREE].SET). */
static inline bitmap
-names_replaced_by (tree new)
+names_replaced_by (tree new_tree)
{
struct repl_map_d m;
void **slot;
- m.name = new;
+ m.name = new_tree;
slot = htab_find_slot (repl_tbl, (void *) &m, NO_INSERT);
/* If N was not registered in the replacement table, return NULL. */
}
-/* Add OLD to REPL_TBL[NEW].SET. */
+/* Add OLD to REPL_TBL[NEW_TREE].SET. */
static inline void
-add_to_repl_tbl (tree new, tree old)
+add_to_repl_tbl (tree new_tree, tree old)
{
struct repl_map_d m, *mp;
void **slot;
- m.name = new;
+ m.name = new_tree;
slot = htab_find_slot (repl_tbl, (void *) &m, INSERT);
if (*slot == NULL)
{
mp = XNEW (struct repl_map_d);
- mp->name = new;
+ mp->name = new_tree;
mp->set = BITMAP_ALLOC (NULL);
*slot = (void *) mp;
}
}
-/* Add a new mapping NEW -> OLD REPL_TBL. Every entry N_i in REPL_TBL
+/* Add a new mapping NEW_TREE -> OLD REPL_TBL. Every entry N_i in REPL_TBL
represents the set of names O_1 ... O_j replaced by N_i. This is
used by update_ssa and its helpers to introduce new SSA names in an
already formed SSA web. */
static void
-add_new_name_mapping (tree new, tree old)
+add_new_name_mapping (tree new_tree, tree old)
{
timevar_push (TV_TREE_SSA_INCREMENTAL);
- /* OLD and NEW must be different SSA names for the same symbol. */
- gcc_assert (new != old && SSA_NAME_VAR (new) == SSA_NAME_VAR (old));
+ /* OLD and NEW_TREE must be different SSA names for the same symbol. */
+ gcc_assert (new_tree != old && SSA_NAME_VAR (new_tree) == SSA_NAME_VAR (old));
/* If this mapping is for virtual names, we will need to update
virtual operands. If this is a mapping for .MEM, then we gather
the symbols associated with each name. */
- if (!is_gimple_reg (new))
+ if (!is_gimple_reg (new_tree))
{
tree sym;
will make more sense to rename the symbols from scratch.
Otherwise, the insertion of PHI nodes for each of the old
names in these mappings will be very slow. */
- sym = SSA_NAME_VAR (new);
+ sym = SSA_NAME_VAR (new_tree);
bitmap_set_bit (update_ssa_stats.virtual_symbols, DECL_UID (sym));
}
}
/* Update the REPL_TBL table. */
- add_to_repl_tbl (new, old);
+ add_to_repl_tbl (new_tree, old);
/* If OLD had already been registered as a new name, then all the
- names that OLD replaces should also be replaced by NEW. */
+ names that OLD replaces should also be replaced by NEW_TREE. */
if (is_new_name (old))
- bitmap_ior_into (names_replaced_by (new), names_replaced_by (old));
+ bitmap_ior_into (names_replaced_by (new_tree), names_replaced_by (old));
- /* Register NEW and OLD in NEW_SSA_NAMES and OLD_SSA_NAMES,
+ /* Register NEW_TREE and OLD in NEW_SSA_NAMES and OLD_SSA_NAMES,
respectively. */
- SET_BIT (new_ssa_names, SSA_NAME_VERSION (new));
+ SET_BIT (new_ssa_names, SSA_NAME_VERSION (new_tree));
SET_BIT (old_ssa_names, SSA_NAME_VERSION (old));
/* Update mapping counter to use in the virtual mapping heuristic. */
update_ssa. */
void
-register_new_name_mapping (tree new ATTRIBUTE_UNUSED, tree old ATTRIBUTE_UNUSED)
+register_new_name_mapping (tree new_Tree ATTRIBUTE_UNUSED, tree old ATTRIBUTE_UNUSED)
{
if (need_to_initialize_update_ssa_p)
init_update_ssa ();
- add_new_name_mapping (new, old);
+ add_new_name_mapping (new_Tree, old);
}
basic_block bb, next;
gimple_stmt_iterator i;
int saved_last_basic_block = last_basic_block;
- enum gimple_rhs_class class;
+ enum gimple_rhs_class grhs_class;
bb = ENTRY_BLOCK_PTR ->next_bb;
do
gimple_location (s), integer_one_node);
mf_xform_derefs_1 (&i, gimple_assign_rhs1_ptr (s),
gimple_location (s), integer_zero_node);
- class = get_gimple_rhs_class (gimple_assign_rhs_code (s));
- if (class == GIMPLE_BINARY_RHS)
+ grhs_class = get_gimple_rhs_class (gimple_assign_rhs_code (s));
+ if (grhs_class == GIMPLE_BINARY_RHS)
mf_xform_derefs_1 (&i, gimple_assign_rhs2_ptr (s),
gimple_location (s), integer_zero_node);
break;
/* For combination chains, the operator and the two chains that are
combined, and the type of the result. */
- enum tree_code operator;
+ enum tree_code op;
tree rslt_type;
struct chain *ch1, *ch2;
if (chain->type == CT_COMBINATION)
{
fprintf (file, " equal to %p %s %p in type ",
- (void *) chain->ch1, op_symbol_code (chain->operator),
+ (void *) chain->ch1, op_symbol_code (chain->op),
(void *) chain->ch2);
print_generic_expr (file, chain->rslt_type, TDF_SLIM);
fprintf (file, "\n");
}
/* Replace the reference in statement STMT with temporary variable
- NEW. If SET is true, NEW is instead initialized to the value of
+ NEW_TREE. If SET is true, NEW_TREE is instead initialized to the value of
the reference in the statement. IN_LHS is true if the reference
is in the lhs of STMT, false if it is in rhs. */
static void
-replace_ref_with (gimple stmt, tree new, bool set, bool in_lhs)
+replace_ref_with (gimple stmt, tree new_tree, bool set, bool in_lhs)
{
tree val;
gimple new_stmt;
remove_phi_node (&psi, false);
/* Turn the phi node into GIMPLE_ASSIGN. */
- new_stmt = gimple_build_assign (val, new);
+ new_stmt = gimple_build_assign (val, new_tree);
gsi_insert_before (&bsi, new_stmt, GSI_NEW_STMT);
return;
}
bsi = gsi_for_stmt (stmt);
- /* If we do not need to initialize NEW, just replace the use of OLD. */
+ /* If we do not need to initialize NEW_TREE, just replace the use of OLD. */
if (!set)
{
gcc_assert (!in_lhs);
- gimple_assign_set_rhs_from_tree (&bsi, new);
+ gimple_assign_set_rhs_from_tree (&bsi, new_tree);
stmt = gsi_stmt (bsi);
update_stmt (stmt);
return;
val = gimple_assign_lhs (stmt);
}
- new_stmt = gimple_build_assign (new, unshare_expr (val));
+ new_stmt = gimple_build_assign (new_tree, unshare_expr (val));
gsi_insert_after (&bsi, new_stmt, GSI_NEW_STMT);
}
tree e1 = get_init_expr (chain->ch1, index);
tree e2 = get_init_expr (chain->ch2, index);
- return fold_build2 (chain->operator, chain->rslt_type, e1, e2);
+ return fold_build2 (chain->op, chain->rslt_type, e1, e2);
}
else
return VEC_index (tree, chain->inits, index);
new_chain = XCNEW (struct chain);
new_chain->type = CT_COMBINATION;
- new_chain->operator = op;
+ new_chain->op = op;
new_chain->ch1 = ch1;
new_chain->ch2 = ch2;
new_chain->rslt_type = rslt_type;
case BOOLEAN_TYPE:
{
unsigned int quals = TYPE_QUALS (node);
- enum tree_code_class class;
+ enum tree_code_class tclass;
if (quals & TYPE_QUAL_CONST)
pp_string (buffer, "const ");
else if (quals & TYPE_QUAL_RESTRICT)
pp_string (buffer, "restrict ");
- class = TREE_CODE_CLASS (TREE_CODE (node));
+ tclass = TREE_CODE_CLASS (TREE_CODE (node));
- if (class == tcc_declaration)
+ if (tclass == tcc_declaration)
{
if (DECL_NAME (node))
dump_decl_name (buffer, node, flags);
else
pp_string (buffer, "<unnamed type decl>");
}
- else if (class == tcc_type)
+ else if (tclass == tcc_type)
{
if (TYPE_NAME (node))
{
{
unsigned int index = VEC_length (varinfo_t, varmap);
varinfo_t vi;
- tree decltype = TREE_TYPE (decl);
- tree declsize = DECL_P (decl) ? DECL_SIZE (decl) : TYPE_SIZE (decltype);
+ tree decl_type = TREE_TYPE (decl);
+ tree declsize = DECL_P (decl) ? DECL_SIZE (decl) : TYPE_SIZE (decl_type);
bool is_global = DECL_P (decl) ? is_global_var (decl) : false;
VEC (fieldoff_s,heap) *fieldstack = NULL;
return create_function_info_for (decl, name);
if (var_can_have_subvars (decl) && use_field_sensitive)
- push_fields_onto_fieldstack (decltype, &fieldstack, 0);
+ push_fields_onto_fieldstack (decl_type, &fieldstack, 0);
/* If the variable doesn't have subvars, we may end up needing to
sort the field list and create fake variables for all the
able to simplify this conditional. */
if (vr->type == VR_RANGE)
{
- tree new = test_for_singularity (cond_code, op0, op1, vr);
+ tree new_tree = test_for_singularity (cond_code, op0, op1, vr);
- if (new)
+ if (new_tree)
{
if (dump_file)
{
gimple_cond_set_code (stmt, EQ_EXPR);
gimple_cond_set_lhs (stmt, op0);
- gimple_cond_set_rhs (stmt, new);
+ gimple_cond_set_rhs (stmt, new_tree);
update_stmt (stmt);
with integral types here, so no need to worry about
issues with inverting FP comparisons. */
cond_code = invert_tree_comparison (cond_code, false);
- new = test_for_singularity (cond_code, op0, op1, vr);
+ new_tree = test_for_singularity (cond_code, op0, op1, vr);
- if (new)
+ if (new_tree)
{
if (dump_file)
{
gimple_cond_set_code (stmt, NE_EXPR);
gimple_cond_set_lhs (stmt, op0);
- gimple_cond_set_rhs (stmt, new);
+ gimple_cond_set_rhs (stmt, new_tree);
update_stmt (stmt);
{
enum tree_code code = TREE_CODE (exp);
tree op0, op1, op2, op3;
- tree new, inner;
+ tree new_tree, inner;
/* We handle TREE_LIST and COMPONENT_REF separately. */
if (code == TREE_LIST)
if (op0 == TREE_OPERAND (exp, 0))
return exp;
- new = fold_build3 (COMPONENT_REF, TREE_TYPE (exp),
+ new_tree = fold_build3 (COMPONENT_REF, TREE_TYPE (exp),
op0, TREE_OPERAND (exp, 1), NULL_TREE);
}
else
if (op0 == TREE_OPERAND (exp, 0))
return exp;
- new = fold_build1 (code, TREE_TYPE (exp), op0);
+ new_tree = fold_build1 (code, TREE_TYPE (exp), op0);
break;
case 2:
if (op0 == TREE_OPERAND (exp, 0) && op1 == TREE_OPERAND (exp, 1))
return exp;
- new = fold_build2 (code, TREE_TYPE (exp), op0, op1);
+ new_tree = fold_build2 (code, TREE_TYPE (exp), op0, op1);
break;
case 3:
&& op2 == TREE_OPERAND (exp, 2))
return exp;
- new = fold_build3 (code, TREE_TYPE (exp), op0, op1, op2);
+ new_tree = fold_build3 (code, TREE_TYPE (exp), op0, op1, op2);
break;
case 4:
&& op3 == TREE_OPERAND (exp, 3))
return exp;
- new = fold (build4 (code, TREE_TYPE (exp), op0, op1, op2, op3));
+ new_tree = fold (build4 (code, TREE_TYPE (exp), op0, op1, op2, op3));
break;
default:
}
if (copy)
- new = fold (copy);
+ new_tree = fold (copy);
else
return exp;
}
gcc_unreachable ();
}
- TREE_READONLY (new) = TREE_READONLY (exp);
- return new;
+ TREE_READONLY (new_tree) = TREE_READONLY (exp);
+ return new_tree;
}
/* Similar, but look for a PLACEHOLDER_EXPR in EXP and find a replacement
{
int i;
enum tree_code code;
- char class;
+ char tclass;
if (t == NULL_TREE)
return iterative_hash_pointer (t, val);
}
/* else FALL THROUGH */
default:
- class = TREE_CODE_CLASS (code);
+ tclass = TREE_CODE_CLASS (code);
- if (class == tcc_declaration)
+ if (tclass == tcc_declaration)
{
/* DECL's have a unique ID */
val = iterative_hash_host_wide_int (DECL_UID (t), val);
}
else
{
- gcc_assert (IS_EXPR_CODE_CLASS (class));
+ gcc_assert (IS_EXPR_CODE_CLASS (tclass));
val = iterative_hash_object (code, val);
histogram_value val;
for (val = gimple_histogram_value (ofun, ostmt); val != NULL; val = val->hvalue.next)
{
- histogram_value new = gimple_alloc_histogram_value (fun, val->type, NULL, NULL);
- memcpy (new, val, sizeof (*val));
- new->hvalue.stmt = stmt;
- new->hvalue.counters = XNEWVAR (gcov_type, sizeof (*new->hvalue.counters) * new->n_counters);
- memcpy (new->hvalue.counters, val->hvalue.counters, sizeof (*new->hvalue.counters) * new->n_counters);
- gimple_add_histogram_value (fun, stmt, new);
+ histogram_value new_val = gimple_alloc_histogram_value (fun, val->type, NULL, NULL);
+ memcpy (new_val, val, sizeof (*val));
+ new_val->hvalue.stmt = stmt;
+ new_val->hvalue.counters = XNEWVAR (gcov_type, sizeof (*new_val->hvalue.counters) * new_val->n_counters);
+ memcpy (new_val->hvalue.counters, val->hvalue.counters, sizeof (*new_val->hvalue.counters) * new_val->n_counters);
+ gimple_add_histogram_value (fun, stmt, new_val);
}
}