/* The aliasing API provided here solves related but different problems:
- Say there exists (in c)
+ Say there exists (in c)
struct X {
struct Y y1;
this may be too conservative for some C++ types.
The pass ipa-type-escape does this analysis for the types whose
- instances do not escape across the compilation boundary.
+ instances do not escape across the compilation boundary.
Historically in GCC, these two problems were combined and a single
data structure was used to represent the solution to these
`double'. (However, a store to an `int' cannot alias a `double'
and vice versa.) We indicate this via a tree structure that looks
like:
- struct S
- / \
+ struct S
+ / \
/ \
- |/_ _\|
- int double
+ |/_ _\|
+ int double
(The arrows are directed and point downwards.)
In this situation we say the alias set for `struct S' is the
{
int i;
tree binfo, base_binfo;
-
+
for (binfo = TYPE_BINFO (type), i = 0;
BINFO_BASE_ITERATE (binfo, i, base_binfo); i++)
record_alias_subset (superset,
If neither case holds, reject the original base value as invalid.
Note that the following situation is not detected:
- extern int x, y; int *p = &x; p += (&y-&x);
+ extern int x, y; int *p = &x; p += (&y-&x);
ANSI C does not allow computing the difference of addresses
of distinct top level objects. */
/* If a value is known for REGNO, return it. */
-rtx
+rtx
get_reg_known_value (unsigned int regno)
{
if (regno >= FIRST_PSEUDO_REGISTER)
if (offset)
addr = gen_rtx_PLUS (GET_MODE (addr), XEXP (addr, 0),
- GEN_INT (offset));
+ GEN_INT (offset));
else
addr = XEXP (addr, 0);
addr = canon_rtx (addr);
/* Unless both have exprs, we can't tell anything. */
if (exprx == 0 || expry == 0)
return 0;
-
+
/* If both are field references, we may be able to determine something. */
if (TREE_CODE (exprx) == COMPONENT_REF
&& TREE_CODE (expry) == COMPONENT_REF
&& nonoverlapping_component_refs_p (exprx, expry))
return 1;
-
+
/* If the field reference test failed, look at the DECLs involved. */
moffsetx = MEM_OFFSET (x);
if (TREE_CODE (exprx) == COMPONENT_REF)
tree fieldcontext = DECL_FIELD_CONTEXT (field);
if (ipa_type_escape_field_does_not_clobber_p (fieldcontext,
TREE_TYPE (field)))
- return 1;
+ return 1;
}
{
tree t = decl_for_component_ref (exprx);
tree fieldcontext = DECL_FIELD_CONTEXT (field);
if (ipa_type_escape_field_does_not_clobber_p (fieldcontext,
TREE_TYPE (field)))
- return 1;
+ return 1;
}
{
tree t = decl_for_component_ref (expry);
#endif
/* If this insn has a noalias note, process it, Otherwise,
- scan for sets. A simple set will have no side effects
- which could change the base value of any other register. */
+ scan for sets. A simple set will have no side effects
+ which could change the base value of any other register. */
if (GET_CODE (PATTERN (insn)) == SET
&& REG_NOTES (insn) != 0
/* Now propagate values from new_reg_base_value to reg_base_value. */
gcc_assert (maxreg == (unsigned int) max_reg_num());
-
+
for (ui = 0; ui < maxreg; ui++)
{
if (new_reg_base_value[ui]
slot = (struct alloc_pool_descriptor **)
htab_find_slot_with_hash (alloc_pool_hash, name,
- htab_hash_pointer (name),
+ htab_hash_pointer (name),
1);
if (*slot)
return *slot;
/* Mark the element to be free. */
((allocation_object *) block)->id = 0;
#endif
- header = (alloc_pool_list) USER_PTR_FROM_ALLOCATION_OBJECT_PTR (block);
- header->next = pool->free_list;
- pool->free_list = header;
+ header = (alloc_pool_list) USER_PTR_FROM_ALLOCATION_OBJECT_PTR (block);
+ header->next = pool->free_list;
+ pool->free_list = header;
}
/* Also update the number of elements we have free/allocated, and
- increment the allocated block count. */
+ increment the allocated block count. */
pool->elts_allocated += pool->elts_per_block;
pool->elts_free += pool->elts_per_block;
pool->blocks_allocated += 1;
/* The name must not begin and end with __. */
const char *name = attribute_tables[i][j].name;
int len = strlen (name);
-
+
gcc_assert (!(name[0] == '_' && name[1] == '_'
&& name[len - 1] == '_' && name[len - 2] == '_'));
-
+
/* The minimum and maximum lengths must be consistent. */
gcc_assert (attribute_tables[i][j].min_length >= 0);
-
+
gcc_assert (attribute_tables[i][j].max_length == -1
|| (attribute_tables[i][j].max_length
>= attribute_tables[i][j].min_length));
-
+
/* An attribute cannot require both a DECL and a TYPE. */
gcc_assert (!attribute_tables[i][j].decl_required
|| !attribute_tables[i][j].type_required);
-
+
/* If an attribute requires a function type, in particular
it requires a type. */
gcc_assert (!attribute_tables[i][j].function_type_required
pull out the target type now, frob it as appropriate, and
rebuild the pointer type later.
- This would all be simpler if attributes were part of the
- declarator, grumble grumble. */
+ This would all be simpler if attributes were part of the
+ declarator, grumble grumble. */
fn_ptr_tmp = TREE_TYPE (*anode);
anode = &fn_ptr_tmp;
flags &= ~(int) ATTR_FLAG_TYPE_IN_PLACE;
extern void clear_edges (void);
extern rtx first_insn_after_basic_block_note (basic_block);
extern void scale_bbs_frequencies_int (basic_block *, int, int, int);
-extern void scale_bbs_frequencies_gcov_type (basic_block *, int, gcov_type,
+extern void scale_bbs_frequencies_gcov_type (basic_block *, int, gcov_type,
gcov_type);
/* Structure to group all of the information to process IF-THEN and
an element might be removed during the traversal, otherwise
elements will be missed. Instead, use a for-loop like that shown
in the following pseudo-code:
-
+
FOR (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
{
IF (e != taken_edge)
extern void free_dominance_info (enum cdi_direction);
extern basic_block nearest_common_dominator (enum cdi_direction,
basic_block, basic_block);
-extern basic_block nearest_common_dominator_for_set (enum cdi_direction,
+extern basic_block nearest_common_dominator_for_set (enum cdi_direction,
bitmap);
extern void set_immediate_dominator (enum cdi_direction, basic_block,
basic_block);
#define N_ROUNDS 5
/* Stubs in case we don't have a return insn.
- We have to check at runtime too, not only compiletime. */
+ We have to check at runtime too, not only compiletime. */
#ifndef HAVE_return
#define HAVE_return 0
static bool copy_bb_p (basic_block, int);
static int get_uncond_jump_length (void);
static bool push_to_next_round_p (basic_block, int, int, int, gcov_type);
-static void find_rarely_executed_basic_blocks_and_crossing_edges (edge *,
+static void find_rarely_executed_basic_blocks_and_crossing_edges (edge *,
int *,
int *);
static void add_labels_and_missing_jumps (edge *, int);
there_exists_another_round = round < number_of_rounds - 1;
- block_not_hot_enough = (bb->frequency < exec_th
+ block_not_hot_enough = (bb->frequency < exec_th
|| bb->count < count_th
|| probably_never_executed_bb_p (bb));
if (there_exists_another_round
&& block_not_hot_enough)
return true;
- else
+ else
return false;
}
/* Duplicate HEADER if it is a small block containing cond jump
in the end. */
if (any_condjump_p (BB_END (header)) && copy_bb_p (header, 0)
- && !find_reg_note (BB_END (header), REG_CROSSING_JUMP,
+ && !find_reg_note (BB_END (header), REG_CROSSING_JUMP,
NULL_RTX))
copy_bb (header, single_succ_edge (prev_bb), prev_bb, trace_n);
}
fprintf (dump_file, "Getting bb %d\n", bb->index);
/* If the BB's frequency is too low send BB to the next round. When
- partitioning hot/cold blocks into separate sections, make sure all
- the cold blocks (and ONLY the cold blocks) go into the (extra) final
- round. */
+ partitioning hot/cold blocks into separate sections, make sure all
+ the cold blocks (and ONLY the cold blocks) go into the (extra) final
+ round. */
- if (push_to_next_round_p (bb, round, number_of_rounds, exec_th,
+ if (push_to_next_round_p (bb, round, number_of_rounds, exec_th,
count_th))
{
int key = bb_to_key (bb);
fprintf (dump_file, "Basic block %d was visited in trace %d\n",
bb->index, *n_traces - 1);
- ends_in_call = block_ends_with_call_p (bb);
+ ends_in_call = block_ends_with_call_p (bb);
/* Select the successor that will be placed after BB. */
FOR_EACH_EDGE (e, ei, bb->succs)
the cold blocks (and only the cold blocks) all get
pushed to the last round of trace collection. */
- if (push_to_next_round_p (e->dest, round,
+ if (push_to_next_round_p (e->dest, round,
number_of_rounds,
exec_th, count_th))
which_heap = new_heap;
best_edge->dest->index, bb->index);
}
bb->aux = best_edge->dest;
- bbd[best_edge->dest->index].in_trace =
- (*n_traces) - 1;
+ bbd[best_edge->dest->index].in_trace =
+ (*n_traces) - 1;
bb = rotate_loop (best_edge, trace, *n_traces);
}
}
non-crossing edges over crossing edges. */
if (!is_better_edge
- && flag_reorder_blocks_and_partition
- && cur_best_edge
+ && flag_reorder_blocks_and_partition
+ && cur_best_edge
&& (cur_best_edge->flags & EDGE_CROSSING)
&& !(e->flags & EDGE_CROSSING))
is_better_edge = true;
if (flag_reorder_blocks_and_partition)
for (i = 0; i < n_traces && !two_passes; i++)
- if (BB_PARTITION (traces[0].first)
+ if (BB_PARTITION (traces[0].first)
!= BB_PARTITION (traces[i].first))
two_passes = true;
else
current_partition = BB_HOT_PARTITION;
}
-
+
if (connected[t])
continue;
- if (two_passes
+ if (two_passes
&& BB_PARTITION (traces[t].first) != current_partition)
continue;
cache locality). */
static void
-find_rarely_executed_basic_blocks_and_crossing_edges (edge *crossing_edges,
- int *n_crossing_edges,
+find_rarely_executed_basic_blocks_and_crossing_edges (edge *crossing_edges,
+ int *n_crossing_edges,
int *max_idx)
{
basic_block bb;
edge_iterator ei;
/* Mark which partition (hot/cold) each basic block belongs in. */
-
+
FOR_EACH_BB (bb)
{
if (probably_never_executed_bb_p (bb))
Convert any fall-through crossing edges (for blocks that do not contain
a jump) to unconditional jumps. */
-static void
+static void
add_labels_and_missing_jumps (edge *crossing_edges, int n_crossing_edges)
{
int i;
rtx label;
rtx barrier;
rtx new_jump;
-
- for (i=0; i < n_crossing_edges; i++)
+
+ for (i=0; i < n_crossing_edges; i++)
{
- if (crossing_edges[i])
- {
- src = crossing_edges[i]->src;
- dest = crossing_edges[i]->dest;
-
- /* Make sure dest has a label. */
-
- if (dest && (dest != EXIT_BLOCK_PTR))
- {
+ if (crossing_edges[i])
+ {
+ src = crossing_edges[i]->src;
+ dest = crossing_edges[i]->dest;
+
+ /* Make sure dest has a label. */
+
+ if (dest && (dest != EXIT_BLOCK_PTR))
+ {
label = block_label (dest);
-
- /* Make sure source block ends with a jump. */
-
- if (src && (src != ENTRY_BLOCK_PTR))
- {
+
+ /* Make sure source block ends with a jump. */
+
+ if (src && (src != ENTRY_BLOCK_PTR))
+ {
if (!JUMP_P (BB_END (src)))
- /* bb just falls through. */
- {
- /* make sure there's only one successor */
+ /* bb just falls through. */
+ {
+ /* make sure there's only one successor */
gcc_assert (single_succ_p (src));
-
+
/* Find label in dest block. */
label = block_label (dest);
-
- new_jump = emit_jump_insn_after (gen_jump (label),
+
+ new_jump = emit_jump_insn_after (gen_jump (label),
BB_END (src));
barrier = emit_barrier_after (new_jump);
JUMP_LABEL (new_jump) = label;
src->il.rtl->footer = unlink_insn_chain (barrier, barrier);
/* Mark edge as non-fallthru. */
crossing_edges[i]->flags &= ~EDGE_FALLTHRU;
- } /* end: 'if (GET_CODE ... ' */
- } /* end: 'if (src && src->index...' */
- } /* end: 'if (dest && dest->index...' */
- } /* end: 'if (crossing_edges[i]...' */
+ } /* end: 'if (GET_CODE ... ' */
+ } /* end: 'if (src && src->index...' */
+ } /* end: 'if (dest && dest->index...' */
+ } /* end: 'if (crossing_edges[i]...' */
} /* end for loop */
}
unconditional jump (crossing edge) to the original fall through
destination. */
-static void
+static void
fix_up_fall_thru_edges (void)
{
basic_block cur_bb;
rtx old_jump;
rtx fall_thru_label;
rtx barrier;
-
+
FOR_EACH_BB (cur_bb)
{
fall_thru = NULL;
succ1 = NULL;
if (EDGE_COUNT (cur_bb->succs) > 1)
- succ2 = EDGE_SUCC (cur_bb, 1);
+ succ2 = EDGE_SUCC (cur_bb, 1);
else
- succ2 = NULL;
-
+ succ2 = NULL;
+
/* Find the fall-through edge. */
-
- if (succ1
- && (succ1->flags & EDGE_FALLTHRU))
- {
- fall_thru = succ1;
- cond_jump = succ2;
- }
- else if (succ2
- && (succ2->flags & EDGE_FALLTHRU))
- {
- fall_thru = succ2;
- cond_jump = succ1;
- }
-
+
+ if (succ1
+ && (succ1->flags & EDGE_FALLTHRU))
+ {
+ fall_thru = succ1;
+ cond_jump = succ2;
+ }
+ else if (succ2
+ && (succ2->flags & EDGE_FALLTHRU))
+ {
+ fall_thru = succ2;
+ cond_jump = succ1;
+ }
+
if (fall_thru && (fall_thru->dest != EXIT_BLOCK_PTR))
- {
- /* Check to see if the fall-thru edge is a crossing edge. */
-
+ {
+ /* Check to see if the fall-thru edge is a crossing edge. */
+
if (fall_thru->flags & EDGE_CROSSING)
- {
+ {
/* The fall_thru edge crosses; now check the cond jump edge, if
- it exists. */
-
- cond_jump_crosses = true;
- invert_worked = 0;
+ it exists. */
+
+ cond_jump_crosses = true;
+ invert_worked = 0;
old_jump = BB_END (cur_bb);
-
- /* Find the jump instruction, if there is one. */
-
- if (cond_jump)
- {
+
+ /* Find the jump instruction, if there is one. */
+
+ if (cond_jump)
+ {
if (!(cond_jump->flags & EDGE_CROSSING))
- cond_jump_crosses = false;
-
- /* We know the fall-thru edge crosses; if the cond
- jump edge does NOT cross, and its destination is the
+ cond_jump_crosses = false;
+
+ /* We know the fall-thru edge crosses; if the cond
+ jump edge does NOT cross, and its destination is the
next block in the bb order, invert the jump
- (i.e. fix it so the fall thru does not cross and
- the cond jump does). */
-
+ (i.e. fix it so the fall thru does not cross and
+ the cond jump does). */
+
if (!cond_jump_crosses
&& cur_bb->aux == cond_jump->dest)
- {
- /* Find label in fall_thru block. We've already added
- any missing labels, so there must be one. */
-
- fall_thru_label = block_label (fall_thru->dest);
-
- if (old_jump && fall_thru_label)
- invert_worked = invert_jump (old_jump,
- fall_thru_label,0);
- if (invert_worked)
- {
- fall_thru->flags &= ~EDGE_FALLTHRU;
- cond_jump->flags |= EDGE_FALLTHRU;
- update_br_prob_note (cur_bb);
- e = fall_thru;
- fall_thru = cond_jump;
- cond_jump = e;
+ {
+ /* Find label in fall_thru block. We've already added
+ any missing labels, so there must be one. */
+
+ fall_thru_label = block_label (fall_thru->dest);
+
+ if (old_jump && fall_thru_label)
+ invert_worked = invert_jump (old_jump,
+ fall_thru_label,0);
+ if (invert_worked)
+ {
+ fall_thru->flags &= ~EDGE_FALLTHRU;
+ cond_jump->flags |= EDGE_FALLTHRU;
+ update_br_prob_note (cur_bb);
+ e = fall_thru;
+ fall_thru = cond_jump;
+ cond_jump = e;
cond_jump->flags |= EDGE_CROSSING;
fall_thru->flags &= ~EDGE_CROSSING;
- }
- }
- }
-
- if (cond_jump_crosses || !invert_worked)
- {
- /* This is the case where both edges out of the basic
- block are crossing edges. Here we will fix up the
+ }
+ }
+ }
+
+ if (cond_jump_crosses || !invert_worked)
+ {
+ /* This is the case where both edges out of the basic
+ block are crossing edges. Here we will fix up the
fall through edge. The jump edge will be taken care
of later. */
-
- new_bb = force_nonfallthru (fall_thru);
-
- if (new_bb)
- {
- new_bb->aux = cur_bb->aux;
- cur_bb->aux = new_bb;
-
- /* Make sure new fall-through bb is in same
+
+ new_bb = force_nonfallthru (fall_thru);
+
+ if (new_bb)
+ {
+ new_bb->aux = cur_bb->aux;
+ cur_bb->aux = new_bb;
+
+ /* Make sure new fall-through bb is in same
partition as bb it's falling through from. */
BB_COPY_PARTITION (new_bb, cur_bb);
single_succ_edge (new_bb)->flags |= EDGE_CROSSING;
- }
-
- /* Add barrier after new jump */
-
- if (new_bb)
- {
- barrier = emit_barrier_after (BB_END (new_bb));
- new_bb->il.rtl->footer = unlink_insn_chain (barrier,
- barrier);
- }
- else
- {
- barrier = emit_barrier_after (BB_END (cur_bb));
- cur_bb->il.rtl->footer = unlink_insn_chain (barrier,
- barrier);
- }
- }
- }
- }
+ }
+
+ /* Add barrier after new jump */
+
+ if (new_bb)
+ {
+ barrier = emit_barrier_after (BB_END (new_bb));
+ new_bb->il.rtl->footer = unlink_insn_chain (barrier,
+ barrier);
+ }
+ else
+ {
+ barrier = emit_barrier_after (BB_END (cur_bb));
+ cur_bb->il.rtl->footer = unlink_insn_chain (barrier,
+ barrier);
+ }
+ }
+ }
+ }
}
}
contain unconditional jumps to the same destination). */
static basic_block
-find_jump_block (basic_block jump_dest)
-{
- basic_block source_bb = NULL;
+find_jump_block (basic_block jump_dest)
+{
+ basic_block source_bb = NULL;
edge e;
rtx insn;
edge_iterator ei;
if (e->flags & EDGE_CROSSING)
{
basic_block src = e->src;
-
+
/* Check each predecessor to see if it has a label, and contains
only one executable instruction, which is an unconditional jump.
If so, we can use it. */
-
+
if (LABEL_P (BB_HEAD (src)))
- for (insn = BB_HEAD (src);
+ for (insn = BB_HEAD (src);
!INSN_P (insn) && insn != NEXT_INSN (BB_END (src));
insn = NEXT_INSN (insn))
{
break;
}
}
-
+
if (source_bb)
break;
}
rtx barrier;
last_bb = EXIT_BLOCK_PTR->prev_bb;
-
+
FOR_EACH_BB (cur_bb)
{
crossing_edge = NULL;
succ1 = EDGE_SUCC (cur_bb, 0);
else
succ1 = NULL;
-
+
if (EDGE_COUNT (cur_bb->succs) > 1)
succ2 = EDGE_SUCC (cur_bb, 1);
else
succ2 = NULL;
-
+
/* We already took care of fall-through edges, so only one successor
can be a crossing edge. */
-
+
if (succ1 && (succ1->flags & EDGE_CROSSING))
crossing_edge = succ1;
else if (succ2 && (succ2->flags & EDGE_CROSSING))
- crossing_edge = succ2;
-
- if (crossing_edge)
- {
+ crossing_edge = succ2;
+
+ if (crossing_edge)
+ {
old_jump = BB_END (cur_bb);
-
+
/* Check to make sure the jump instruction is a
conditional jump. */
-
+
set_src = NULL_RTX;
if (any_condjump_p (old_jump))
old_label = XEXP (set_src, 2);
else if (GET_CODE (XEXP (set_src, 2)) == PC)
old_label = XEXP (set_src, 1);
-
+
/* Check to see if new bb for jumping to that dest has
already been created; if so, use it; if not, create
a new one. */
new_bb = find_jump_block (crossing_edge->dest);
-
+
if (new_bb)
new_label = block_label (new_bb);
else
{
/* Create new basic block to be dest for
conditional jump. */
-
+
new_bb = create_basic_block (NULL, NULL, last_bb);
new_bb->aux = last_bb->aux;
last_bb->aux = new_bb;
prev_bb = last_bb;
last_bb = new_bb;
-
+
/* Update register liveness information. */
-
+
new_bb->il.rtl->global_live_at_start = ALLOC_REG_SET (®_obstack);
new_bb->il.rtl->global_live_at_end = ALLOC_REG_SET (®_obstack);
COPY_REG_SET (new_bb->il.rtl->global_live_at_end,
prev_bb->il.rtl->global_live_at_end);
COPY_REG_SET (new_bb->il.rtl->global_live_at_start,
prev_bb->il.rtl->global_live_at_end);
-
+
/* Put appropriate instructions in new bb. */
-
+
new_label = gen_label_rtx ();
emit_label_before (new_label, BB_HEAD (new_bb));
BB_HEAD (new_bb) = new_label;
-
+
if (GET_CODE (old_label) == LABEL_REF)
{
old_label = JUMP_LABEL (old_jump);
- new_jump = emit_jump_insn_after (gen_jump
- (old_label),
+ new_jump = emit_jump_insn_after (gen_jump
+ (old_label),
BB_END (new_bb));
}
else
{
gcc_assert (HAVE_return
&& GET_CODE (old_label) == RETURN);
- new_jump = emit_jump_insn_after (gen_return (),
+ new_jump = emit_jump_insn_after (gen_return (),
BB_END (new_bb));
}
-
+
barrier = emit_barrier_after (new_jump);
JUMP_LABEL (new_jump) = old_label;
- new_bb->il.rtl->footer = unlink_insn_chain (barrier,
+ new_bb->il.rtl->footer = unlink_insn_chain (barrier,
barrier);
-
+
/* Make sure new bb is in same partition as source
of conditional branch. */
BB_COPY_PARTITION (new_bb, cur_bb);
}
-
+
/* Make old jump branch to new bb. */
-
+
redirect_jump (old_jump, new_label, 0);
-
+
/* Remove crossing_edge as predecessor of 'dest'. */
-
+
dest = crossing_edge->dest;
-
+
redirect_edge_succ (crossing_edge, new_bb);
-
+
/* Make a new edge from new_bb to old dest; new edge
will be a successor for new_bb and a predecessor
for 'dest'. */
-
+
if (EDGE_COUNT (new_bb->succs) == 0)
new_edge = make_edge (new_bb, dest, 0);
else
new_edge = EDGE_SUCC (new_bb, 0);
-
+
crossing_edge->flags &= ~EDGE_CROSSING;
new_edge->flags |= EDGE_CROSSING;
}
- }
+ }
}
}
succ = EDGE_SUCC (cur_bb, 0);
/* Check to see if bb ends in a crossing (unconditional) jump. At
- this point, no crossing jumps should be conditional. */
+ this point, no crossing jumps should be conditional. */
if (JUMP_P (last_insn)
&& (succ->flags & EDGE_CROSSING))
/* We have found a "crossing" unconditional branch. Now
we must convert it to an indirect jump. First create
reference of label, as target for jump. */
-
+
label = JUMP_LABEL (last_insn);
label_addr = gen_rtx_LABEL_REF (Pmode, label);
LABEL_NUSES (label) += 1;
-
+
/* Get a register to use for the indirect jump. */
-
+
new_reg = gen_reg_rtx (Pmode);
-
+
/* Generate indirect the jump sequence. */
-
+
start_sequence ();
emit_move_insn (new_reg, label_addr);
emit_indirect_jump (new_reg);
indirect_jump_sequence = get_insns ();
end_sequence ();
-
+
/* Make sure every instruction in the new jump sequence has
its basic block set to be cur_bb. */
-
+
for (cur_insn = indirect_jump_sequence; cur_insn;
cur_insn = NEXT_INSN (cur_insn))
{
if (JUMP_P (cur_insn))
jump_insn = cur_insn;
}
-
+
/* Insert the new (indirect) jump sequence immediately before
the unconditional jump, then delete the unconditional jump. */
-
+
emit_insn_before (indirect_jump_sequence, last_insn);
delete_insn (last_insn);
-
+
/* Make BB_END for cur_bb be the jump instruction (NOT the
barrier instruction at the end of the sequence...). */
-
+
BB_END (cur_bb) = jump_insn;
}
}
FOR_EACH_EDGE (e, ei, bb->succs)
if ((e->flags & EDGE_CROSSING)
&& JUMP_P (BB_END (e->src)))
- REG_NOTES (BB_END (e->src)) = gen_rtx_EXPR_LIST (REG_CROSSING_JUMP,
- NULL_RTX,
- REG_NOTES (BB_END
+ REG_NOTES (BB_END (e->src)) = gen_rtx_EXPR_LIST (REG_CROSSING_JUMP,
+ NULL_RTX,
+ REG_NOTES (BB_END
(e->src)));
}
sections are converted to indirect jumps.
The code for fixing up fall_thru edges that cross between hot and
- cold basic blocks does so by creating new basic blocks containing
- unconditional branches to the appropriate label in the "other"
+ cold basic blocks does so by creating new basic blocks containing
+ unconditional branches to the appropriate label in the "other"
section. The new basic block is then put in the same (hot or cold)
section as the original conditional branch, and the fall_thru edge
is modified to fall into the new basic block instead. By adding
this level of indirection we end up with only unconditional branches
- crossing between hot and cold sections.
-
+ crossing between hot and cold sections.
+
Conditional branches are dealt with by adding a level of indirection.
- A new basic block is added in the same (hot/cold) section as the
+ A new basic block is added in the same (hot/cold) section as the
conditional branch, and the conditional branch is retargeted to the
new basic block. The new basic block contains an unconditional branch
to the original target of the conditional branch (in the other section).
Unconditional branches are dealt with by converting them into
indirect jumps. */
-static void
-fix_edges_for_rarely_executed_code (edge *crossing_edges,
+static void
+fix_edges_for_rarely_executed_code (edge *crossing_edges,
int n_crossing_edges)
{
/* Make sure the source of any crossing edge ends in a jump and the
destination of any crossing edge has a label. */
-
+
add_labels_and_missing_jumps (crossing_edges, n_crossing_edges);
-
+
/* Convert all crossing fall_thru edges to non-crossing fall
thrus to unconditional jumps (that jump to the original fall
thru dest). */
-
+
fix_up_fall_thru_edges ();
-
+
/* If the architecture does not have conditional branches that can
span all of memory, convert crossing conditional branches into
crossing unconditional branches. */
-
+
if (!HAS_LONG_COND_BRANCH)
fix_crossing_conditional_branches ();
-
+
/* If the architecture does not have unconditional branches that
can span all of memory, convert crossing unconditional branches
into indirect jumps. Since adding an indirect jump also adds
a new register usage, update the register usage information as
well. */
-
+
if (!HAS_LONG_UNCOND_BRANCH)
{
fix_crossing_unconditional_branches ();
reg_scan (get_insns(), max_reg_num ());
}
-
+
add_reg_crossing_jump_notes ();
}
int err = 0;
bool switched_sections = false;
int current_partition = 0;
-
+
FOR_EACH_BB (bb)
{
if (!current_partition)
}
}
}
-
+
gcc_assert(!err);
}
basic_block bb;
rtx new_note;
int first_partition = 0;
-
+
if (flag_reorder_blocks_and_partition)
FOR_EACH_BB (bb)
{
bb->il.rtl->visited = 1;
/* BB must have one outgoing edge. That edge must not lead to
- the exit block or the next block.
+ the exit block or the next block.
The destination must have more than one predecessor. */
if (!single_succ_p (bb)
|| single_succ (bb) == EXIT_BLOCK_PTR
edge *crossing_edges;
int n_crossing_edges;
int max_edges = 2 * last_basic_block;
-
+
if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1)
return;
-
+
crossing_edges = XCNEWVEC (edge, max_edges);
cfg_layout_initialize (0);
-
+
FOR_EACH_BB (cur_bb)
if (cur_bb->index >= NUM_FIXED_BLOCKS
- && cur_bb->next_bb->index >= NUM_FIXED_BLOCKS)
+ && cur_bb->next_bb->index >= NUM_FIXED_BLOCKS)
cur_bb->aux = cur_bb->next_bb;
-
- find_rarely_executed_basic_blocks_and_crossing_edges (crossing_edges,
- &n_crossing_edges,
+
+ find_rarely_executed_basic_blocks_and_crossing_edges (crossing_edges,
+ &n_crossing_edges,
&max_edges);
if (n_crossing_edges > 0)
fix_edges_for_rarely_executed_code (crossing_edges, n_crossing_edges);
-
+
free (crossing_edges);
cfg_layout_finalize();
but should not be terribly bad. */
if (changed && HAVE_conditional_execution)
update_life_info (NULL, UPDATE_LIFE_GLOBAL_RM_NOTES,
- PROP_DEATH_NOTES);
+ PROP_DEATH_NOTES);
/* Add NOTE_INSN_SWITCH_TEXT_SECTIONS notes. */
insert_section_boundary_note ();
arises. */
return (flag_reorder_blocks_and_partition
- && !DECL_ONE_ONLY (current_function_decl)
- && !user_defined_section_attribute);
+ && !DECL_ONE_ONLY (current_function_decl)
+ && !user_defined_section_attribute);
}
/* Partition hot and cold basic blocks. */
partition_hot_cold_basic_blocks ();
allocate_reg_life_data ();
update_life_info (NULL, UPDATE_LIFE_GLOBAL_RM_NOTES,
- PROP_LOG_LINKS | PROP_REG_INFO | PROP_DEATH_NOTES);
+ PROP_LOG_LINKS | PROP_REG_INFO | PROP_DEATH_NOTES);
no_new_pseudos = 1;
return 0;
}
bitmap_elem_to_freelist (bitmap head, bitmap_element *elt)
{
bitmap_obstack *bit_obstack = head->obstack;
-
+
elt->next = NULL;
if (bit_obstack)
{
head->current = next != 0 ? next : prev;
if (head->current)
head->indx = head->current->indx;
- else
+ else
head->indx = 0;
}
bitmap_elem_to_freelist (head, elt);
{
bitmap_element *element;
bitmap_obstack *bit_obstack = head->obstack;
-
+
if (bit_obstack)
{
element = bit_obstack->elements;
-
+
if (element)
/* Use up the inner list first before looking at the next
element of the outer list. */
head->current = prev;
head->indx = prev->indx;
}
- }
+ }
else
{
head->first = NULL;
head->indx = 0;
}
- /* Put the entire list onto the free list in one operation. */
+ /* Put the entire list onto the free list in one operation. */
if (bit_obstack)
{
- elt->prev = bit_obstack->elements;
+ elt->prev = bit_obstack->elements;
bit_obstack->elements = elt;
}
else
{
if (!bit_obstack)
bit_obstack = &bitmap_default_obstack;
-
+
bit_obstack->elements = NULL;
bit_obstack->heads = NULL;
obstack_free (&bit_obstack->obstack, NULL);
\f
#if GCC_VERSION < 3400
/* Table of number of set bits in a character, indexed by value of char. */
-static unsigned char popcount_table[] =
+static unsigned char popcount_table[] =
{
0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,
1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,
of BITMAP_WORD is not material. */
count += __builtin_popcountl (elt->bits[ix]);
#else
- count += bitmap_popcount (elt->bits[ix]);
+ count += bitmap_popcount (elt->bits[ix]);
#endif
}
}
return count;
}
-
+
/* Return the bit number of the first set bit in the bitmap. The
unsigned bit_no;
BITMAP_WORD word;
unsigned ix;
-
+
gcc_assert (elt);
bit_no = elt->indx * BITMAP_ELEMENT_ALL_BITS;
for (ix = 0; ix != BITMAP_ELEMENT_WORDS; ix++)
word >>= 2, bit_no += 2;
if (!(word & 0x1))
word >>= 1, bit_no += 1;
-
+
gcc_assert (word & 1);
#endif
return bit_no;
if (!dst_elt)
dst_elt = bitmap_elt_insert_after (dst, dst_prev, a_elt->indx);
- else
+ else
dst_elt->indx = a_elt->indx;
for (ix = BITMAP_ELEMENT_WORDS; ix--;)
{
bitmap_element *b_elt = b->first;
bitmap_element *next;
- if (a == b)
+ if (a == b)
return;
while (a_elt && b_elt)
bitmap_element *dst_prev = NULL;
gcc_assert (dst != a && dst != b);
-
+
if (a == b)
{
bitmap_clear (dst);
if (!dst_elt)
dst_elt = bitmap_elt_insert_after (dst, dst_prev, a_elt->indx);
- else
+ else
dst_elt->indx = a_elt->indx;
for (ix = BITMAP_ELEMENT_WORDS; ix--;)
{
to the result. If the current is less than first index, find the
next one. Otherwise, just set elt to be current. */
if (!elt)
- {
+ {
if (head->current)
{
if (head->indx < first_index)
if (!elt)
return;
}
- else
+ else
elt = head->current;
}
else
if (elt_start_bit >= start && elt_end_bit_plus1 <= end_bit_plus1)
/* Get rid of the entire elt and go to the next one. */
bitmap_element_free (head, elt);
- else
+ else
{
/* Going to have to knock out some bits in this elt. */
- unsigned int first_word_to_mod;
- BITMAP_WORD first_mask;
+ unsigned int first_word_to_mod;
+ BITMAP_WORD first_mask;
unsigned int last_word_to_mod;
BITMAP_WORD last_mask;
unsigned int i;
first_word_to_mod = (start - elt_start_bit) / BITMAP_WORD_BITS;
/* This mask should have 1s in all bits >= start position. */
- first_mask =
+ first_mask =
(((BITMAP_WORD) 1) << ((start % BITMAP_WORD_BITS))) - 1;
first_mask = ~first_mask;
}
first_word_to_mod = 0;
first_mask = 0;
first_mask = ~first_mask;
- }
-
+ }
+
if (elt_end_bit_plus1 <= end_bit_plus1)
{
/* The last bit to turn off is beyond this elt. */
else
{
/* The last bit to turn off is inside to this elt. */
- last_word_to_mod =
+ last_word_to_mod =
(end_bit_plus1 - elt_start_bit) / BITMAP_WORD_BITS;
/* The last mask should have 1s below the end bit. */
- last_mask =
+ last_mask =
(((BITMAP_WORD) 1) << (((end_bit_plus1) % BITMAP_WORD_BITS))) - 1;
}
}
elt = next_elt;
}
-
+
if (elt)
{
head->current = elt;
bitmap_element *a_elt = a->first;
bitmap_element *b_elt = b->first;
bitmap_element *dst_prev = NULL;
- bool changed = false;
+ bool changed = false;
gcc_assert (dst != a && dst != b);
{
/* Matching elts, generate A | B. */
unsigned ix;
-
+
if (!changed && dst_elt && dst_elt->indx == a_elt->indx)
{
for (ix = BITMAP_ELEMENT_WORDS; ix--;)
changed = true;
if (!dst_elt)
dst_elt = bitmap_elt_insert_after (dst, dst_prev, a_elt->indx);
- else
+ else
dst_elt->indx = a_elt->indx;
for (ix = BITMAP_ELEMENT_WORDS; ix--;)
{
BITMAP_WORD r = a_elt->bits[ix] | b_elt->bits[ix];
-
+
dst_elt->bits[ix] = r;
}
}
if (!changed && dst_elt && dst_elt->indx == src->indx)
{
unsigned ix;
-
+
for (ix = BITMAP_ELEMENT_WORDS; ix--;)
if (src->bits[ix] != dst_elt->bits[ix])
{
changed = true;
if (!dst_elt)
dst_elt = bitmap_elt_insert_after (dst, dst_prev, src->indx);
- else
+ else
dst_elt->indx = src->indx;
memcpy (dst_elt->bits, src->bits, sizeof (dst_elt->bits));
}
-
+
dst_prev = dst_elt;
dst_elt = dst_elt->next;
}
for (ix = BITMAP_ELEMENT_WORDS; ix--;)
{
BITMAP_WORD r = a_elt->bits[ix] | b_elt->bits[ix];
-
+
a_elt->bits[ix] = r;
}
else
if (!dst_elt)
dst_elt = bitmap_elt_insert_after (dst, dst_prev, src->indx);
- else
+ else
dst_elt->indx = src->indx;
memcpy (dst_elt->bits, src->bits, sizeof (dst_elt->bits));
dst_prev = dst_elt;
bitmap_element *a_elt;
bitmap_element *b_elt;
unsigned ix;
-
+
for (a_elt = a->first, b_elt = b->first;
a_elt && b_elt;
a_elt = a_elt->next, b_elt = b_elt->next)
bitmap_element *a_elt;
bitmap_element *b_elt;
unsigned ix;
-
+
for (a_elt = a->first, b_elt = b->first;
a_elt && b_elt;)
{
{
bitmap_head tmp;
bool changed;
-
+
bitmap_initialize (&tmp, &bitmap_default_obstack);
bitmap_and_compl (&tmp, from1, from2);
changed = bitmap_ior_into (a, &tmp);
/* Bitmap set element. We use a linked list to hold only the bits that
are set. This allows for use to grow the bitset dynamically without
- having to realloc and copy a giant bit array.
+ having to realloc and copy a giant bit array.
The free list is implemented as a list of lists. There is one
outer list connected together by prev fields. Each element of that
/* Do any cleanup needed on a bitmap when it is no longer used. */
#define BITMAP_FREE(BITMAP) \
- ((void)(bitmap_obstack_free (BITMAP), (BITMAP) = NULL))
+ ((void)(bitmap_obstack_free (BITMAP), (BITMAP) = NULL))
/* Iterator for bitmaps. */
{
/* Pointer to the current bitmap element. */
bitmap_element *elt1;
-
+
/* Pointer to 2nd bitmap element when two are involved. */
bitmap_element *elt2;
/* Word within the current element. */
unsigned word_no;
-
+
/* Contents of the actually processed word. When finding next bit
it is shifted right, so that the actual bit is always the least
significant bit of ACTUAL. */
bi->elt1 = &bitmap_zero_bits;
break;
}
-
+
if (bi->elt1->indx >= start_bit / BITMAP_ELEMENT_ALL_BITS)
break;
bi->elt1 = bi->elt1->next;
/* We might have gone past the start bit, so reinitialize it. */
if (bi->elt1->indx != start_bit / BITMAP_ELEMENT_ALL_BITS)
start_bit = bi->elt1->indx * BITMAP_ELEMENT_ALL_BITS;
-
+
/* Initialize for what is now start_bit. */
bi->word_no = start_bit / BITMAP_WORD_BITS % BITMAP_ELEMENT_WORDS;
bi->bits = bi->elt1->bits[bi->word_no];
will fail. It won't matter if this increment moves us into the
next word. */
start_bit += !bi->bits;
-
+
*bit_no = start_bit;
}
bi->elt2 = NULL;
break;
}
-
+
if (bi->elt1->indx >= start_bit / BITMAP_ELEMENT_ALL_BITS)
break;
bi->elt1 = bi->elt1->next;
}
-
+
/* Advance elt2 until it is not before elt1. */
while (1)
{
bi->elt1 = bi->elt2 = &bitmap_zero_bits;
break;
}
-
+
if (bi->elt2->indx >= bi->elt1->indx)
break;
bi->elt2 = bi->elt2->next;
if (bi->elt1->indx == bi->elt2->indx)
{
/* We might have advanced beyond the start_bit, so reinitialize
- for that. */
+ for that. */
if (bi->elt1->indx != start_bit / BITMAP_ELEMENT_ALL_BITS)
start_bit = bi->elt1->indx * BITMAP_ELEMENT_ALL_BITS;
-
+
bi->word_no = start_bit / BITMAP_WORD_BITS % BITMAP_ELEMENT_WORDS;
bi->bits = bi->elt1->bits[bi->word_no] & bi->elt2->bits[bi->word_no];
bi->bits >>= start_bit % BITMAP_WORD_BITS;
bi->word_no = BITMAP_ELEMENT_WORDS - 1;
bi->bits = 0;
}
-
+
/* If this word is zero, we must make sure we're not pointing at the
first bit, otherwise our incrementing to the next word boundary
will fail. It won't matter if this increment moves us into the
next word. */
start_bit += !bi->bits;
-
+
*bit_no = start_bit;
}
bi->elt1 = &bitmap_zero_bits;
break;
}
-
+
if (bi->elt1->indx >= start_bit / BITMAP_ELEMENT_ALL_BITS)
break;
bi->elt1 = bi->elt1->next;
that. */
if (bi->elt1->indx != start_bit / BITMAP_ELEMENT_ALL_BITS)
start_bit = bi->elt1->indx * BITMAP_ELEMENT_ALL_BITS;
-
+
bi->word_no = start_bit / BITMAP_WORD_BITS % BITMAP_ELEMENT_WORDS;
bi->bits = bi->elt1->bits[bi->word_no];
if (bi->elt2 && bi->elt1->indx == bi->elt2->indx)
bi->bits &= ~bi->elt2->bits[bi->word_no];
bi->bits >>= start_bit % BITMAP_WORD_BITS;
-
+
/* If this word is zero, we must make sure we're not pointing at the
first bit, otherwise our incrementing to the next word boundary
will fail. It won't matter if this increment moves us into the
next word. */
start_bit += !bi->bits;
-
+
*bit_no = start_bit;
}
*bit_no += BITMAP_WORD_BITS;
bi->word_no++;
}
-
+
/* Advance to the next element. */
bi->elt1 = bi->elt1->next;
if (!bi->elt1)
*bit_no = ((*bit_no + BITMAP_WORD_BITS - 1)
/ BITMAP_WORD_BITS * BITMAP_WORD_BITS);
bi->word_no++;
-
+
while (1)
{
/* Find the next nonzero word in this elt. */
*bit_no += BITMAP_WORD_BITS;
bi->word_no++;
}
-
+
/* Advance to the next identical element. */
do
{
return false;
}
while (bi->elt1->indx < bi->elt2->indx);
-
+
/* Advance elt2 to be no less than elt1. This might not
advance. */
while (bi->elt2->indx < bi->elt1->indx)
}
}
while (bi->elt1->indx != bi->elt2->indx);
-
+
*bit_no = bi->elt1->indx * BITMAP_ELEMENT_ALL_BITS;
bi->word_no = 0;
}
*bit_no += BITMAP_WORD_BITS;
bi->word_no++;
}
-
+
/* Advance to the next element of elt1. */
bi->elt1 = bi->elt1->next;
if (!bi->elt1)
/* Advance elt2 until it is no less than elt1. */
while (bi->elt2 && bi->elt2->indx < bi->elt1->indx)
bi->elt2 = bi->elt2->next;
-
+
*bit_no = bi->elt1->indx * BITMAP_ELEMENT_ALL_BITS;
bi->word_no = 0;
}
loop state. */
#define EXECUTE_IF_AND_IN_BITMAP(BITMAP1, BITMAP2, MIN, BITNUM, ITER) \
- for (bmp_iter_and_init (&(ITER), (BITMAP1), (BITMAP2), (MIN), \
+ for (bmp_iter_and_init (&(ITER), (BITMAP1), (BITMAP2), (MIN), \
&(BITNUM)); \
bmp_iter_and (&(ITER), &(BITNUM)); \
bmp_iter_next (&(ITER), &(BITNUM)))
#define EXECUTE_IF_AND_COMPL_IN_BITMAP(BITMAP1, BITMAP2, MIN, BITNUM, ITER) \
for (bmp_iter_and_compl_init (&(ITER), (BITMAP1), (BITMAP2), (MIN), \
- &(BITNUM)); \
+ &(BITNUM)); \
bmp_iter_and_compl (&(ITER), &(BITNUM)); \
bmp_iter_next (&(ITER), &(BITNUM)))
&& TEST_HARD_REG_BIT (all_btrs, REGNO (dest)))
{
gcc_assert (!btr_referenced_p (src, NULL));
-
+
if (!check_const || CONSTANT_P (src))
{
if (regno)
int new_block = new_bb->index;
gcc_assert (dominated_by_p (CDI_DOMINATORS, head_bb, new_bb));
-
+
IOR_HARD_REG_SET (*btrs_live_in_range, btrs_live[head_bb->index]);
bitmap_set_bit (live_range, new_block);
/* A previous btr migration could have caused a register to be
- live just at the end of new_block which we need in full, so
- use trs_live_at_end even if full_range is set. */
+ live just at the end of new_block which we need in full, so
+ use trs_live_at_end even if full_range is set. */
IOR_HARD_REG_SET (*btrs_live_in_range, btrs_live_at_end[new_block]);
if (full_range)
IOR_HARD_REG_SET (*btrs_live_in_range, btrs_live[new_block]);
insp = BB_END (b);
for (insp = BB_END (b); ! INSN_P (insp); insp = PREV_INSN (insp))
gcc_assert (insp != BB_HEAD (b));
-
+
if (JUMP_P (insp) || can_throw_internal (insp))
insp = PREV_INSN (insp);
}
&& !warned)
{
warning (0, "branch target register load optimization is not intended "
- "to be run twice");
+ "to be run twice");
warned = 1;
}
NULL, /* sub */
NULL, /* next */
0, /* static_pass_number */
- 0, /* tv_id */
+ 0, /* tv_id */
0, /* properties_required */
0, /* properties_provided */
0, /* properties_destroyed */
DEF_FUNCTION_TYPE_VAR_1 (BT_INT_DOUBLE_VAR, BT_INT, BT_DOUBLE)
describes the type `int ()(double, ...)'.
-
+
DEF_POINTER_TYPE (ENUM, TYPE)
This macro describes a pointer type. ENUM is as above; TYPE is
DEF_FUNCTION_TYPE_1 (BT_FN_INTMAX_INTMAX, BT_INTMAX, BT_INTMAX)
DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT_FLOAT, BT_FLOAT, BT_FLOAT)
DEF_FUNCTION_TYPE_1 (BT_FN_DOUBLE_DOUBLE, BT_DOUBLE, BT_DOUBLE)
-DEF_FUNCTION_TYPE_1 (BT_FN_LONGDOUBLE_LONGDOUBLE,
- BT_LONGDOUBLE, BT_LONGDOUBLE)
-DEF_FUNCTION_TYPE_1 (BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT,
+DEF_FUNCTION_TYPE_1 (BT_FN_LONGDOUBLE_LONGDOUBLE,
+ BT_LONGDOUBLE, BT_LONGDOUBLE)
+DEF_FUNCTION_TYPE_1 (BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT,
BT_COMPLEX_FLOAT, BT_COMPLEX_FLOAT)
-DEF_FUNCTION_TYPE_1 (BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE,
+DEF_FUNCTION_TYPE_1 (BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE,
BT_COMPLEX_DOUBLE, BT_COMPLEX_DOUBLE)
-DEF_FUNCTION_TYPE_1 (BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE,
- BT_COMPLEX_LONGDOUBLE, BT_COMPLEX_LONGDOUBLE)
-DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT_COMPLEX_FLOAT,
- BT_FLOAT, BT_COMPLEX_FLOAT)
+DEF_FUNCTION_TYPE_1 (BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE,
+ BT_COMPLEX_LONGDOUBLE, BT_COMPLEX_LONGDOUBLE)
+DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT_COMPLEX_FLOAT,
+ BT_FLOAT, BT_COMPLEX_FLOAT)
DEF_FUNCTION_TYPE_1 (BT_FN_DOUBLE_COMPLEX_DOUBLE,
- BT_DOUBLE, BT_COMPLEX_DOUBLE)
+ BT_DOUBLE, BT_COMPLEX_DOUBLE)
DEF_FUNCTION_TYPE_1 (BT_FN_LONGDOUBLE_COMPLEX_LONGDOUBLE,
- BT_LONGDOUBLE, BT_COMPLEX_LONGDOUBLE)
+ BT_LONGDOUBLE, BT_COMPLEX_LONGDOUBLE)
DEF_FUNCTION_TYPE_1 (BT_FN_PTR_UINT, BT_PTR, BT_UINT)
DEF_FUNCTION_TYPE_1 (BT_FN_PTR_SIZE, BT_PTR, BT_SIZE)
DEF_FUNCTION_TYPE_1 (BT_FN_INT_INT, BT_INT, BT_INT)
DEF_POINTER_TYPE (BT_PTR_FN_VOID_PTR, BT_FN_VOID_PTR)
DEF_FUNCTION_TYPE_2 (BT_FN_VOID_PTR_INT, BT_VOID, BT_PTR, BT_INT)
-DEF_FUNCTION_TYPE_2 (BT_FN_STRING_STRING_CONST_STRING,
- BT_STRING, BT_STRING, BT_CONST_STRING)
+DEF_FUNCTION_TYPE_2 (BT_FN_STRING_STRING_CONST_STRING,
+ BT_STRING, BT_STRING, BT_CONST_STRING)
DEF_FUNCTION_TYPE_2 (BT_FN_INT_CONST_STRING_CONST_STRING,
- BT_INT, BT_CONST_STRING, BT_CONST_STRING)
+ BT_INT, BT_CONST_STRING, BT_CONST_STRING)
DEF_FUNCTION_TYPE_2 (BT_FN_STRING_CONST_STRING_CONST_STRING,
BT_STRING, BT_CONST_STRING, BT_CONST_STRING)
DEF_FUNCTION_TYPE_2 (BT_FN_SIZE_CONST_STRING_CONST_STRING,
- BT_SIZE, BT_CONST_STRING, BT_CONST_STRING)
+ BT_SIZE, BT_CONST_STRING, BT_CONST_STRING)
DEF_FUNCTION_TYPE_2 (BT_FN_STRING_CONST_STRING_INT,
- BT_STRING, BT_CONST_STRING, BT_INT)
+ BT_STRING, BT_CONST_STRING, BT_INT)
DEF_FUNCTION_TYPE_2 (BT_FN_STRING_CONST_STRING_SIZE,
- BT_STRING, BT_CONST_STRING, BT_SIZE)
+ BT_STRING, BT_CONST_STRING, BT_SIZE)
DEF_FUNCTION_TYPE_2 (BT_FN_INT_CONST_STRING_FILEPTR,
BT_INT, BT_CONST_STRING, BT_FILEPTR)
DEF_FUNCTION_TYPE_2 (BT_FN_INT_INT_FILEPTR,
BT_INT, BT_INT, BT_FILEPTR)
DEF_FUNCTION_TYPE_2 (BT_FN_VOID_PTRMODE_PTR,
BT_VOID, BT_PTRMODE, BT_PTR)
-DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VALIST_REF_VALIST_ARG,
+DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VALIST_REF_VALIST_ARG,
BT_VOID, BT_VALIST_REF, BT_VALIST_ARG)
DEF_FUNCTION_TYPE_2 (BT_FN_LONG_LONG_LONG,
BT_LONG, BT_LONG, BT_LONG)
BT_INT, BT_CONST_STRING, BT_VALIST_ARG)
DEF_FUNCTION_TYPE_2 (BT_FN_PTR_SIZE_SIZE,
BT_PTR, BT_SIZE, BT_SIZE)
-DEF_FUNCTION_TYPE_2 (BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT_COMPLEX_FLOAT,
+DEF_FUNCTION_TYPE_2 (BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT_COMPLEX_FLOAT,
BT_COMPLEX_FLOAT, BT_COMPLEX_FLOAT, BT_COMPLEX_FLOAT)
-DEF_FUNCTION_TYPE_2 (BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE_COMPLEX_DOUBLE,
+DEF_FUNCTION_TYPE_2 (BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE_COMPLEX_DOUBLE,
BT_COMPLEX_DOUBLE, BT_COMPLEX_DOUBLE, BT_COMPLEX_DOUBLE)
-DEF_FUNCTION_TYPE_2 (BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE,
- BT_COMPLEX_LONGDOUBLE, BT_COMPLEX_LONGDOUBLE, BT_COMPLEX_LONGDOUBLE)
+DEF_FUNCTION_TYPE_2 (BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE,
+ BT_COMPLEX_LONGDOUBLE, BT_COMPLEX_LONGDOUBLE, BT_COMPLEX_LONGDOUBLE)
DEF_FUNCTION_TYPE_2 (BT_FN_VOID_PTR_PTR, BT_VOID, BT_PTR, BT_PTR)
DEF_FUNCTION_TYPE_2 (BT_FN_INT_CONST_STRING_PTR_CONST_STRING,
BT_INT, BT_CONST_STRING, BT_PTR_CONST_STRING)
DEF_FUNCTION_TYPE_3 (BT_FN_PTR_PTR_CONST_PTR_SIZE,
BT_PTR, BT_PTR, BT_CONST_PTR, BT_SIZE)
DEF_FUNCTION_TYPE_3 (BT_FN_INT_CONST_PTR_CONST_PTR_SIZE,
- BT_INT, BT_CONST_PTR, BT_CONST_PTR, BT_SIZE)
+ BT_INT, BT_CONST_PTR, BT_CONST_PTR, BT_SIZE)
DEF_FUNCTION_TYPE_3 (BT_FN_PTR_PTR_INT_SIZE,
- BT_PTR, BT_PTR, BT_INT, BT_SIZE)
+ BT_PTR, BT_PTR, BT_INT, BT_SIZE)
DEF_FUNCTION_TYPE_3 (BT_FN_VOID_PTR_INT_INT,
BT_VOID, BT_PTR, BT_INT, BT_INT)
DEF_FUNCTION_TYPE_3 (BT_FN_VOID_CONST_PTR_PTR_SIZE,
DEF_FUNCTION_TYPE_VAR_0 (BT_FN_INT_VAR, BT_INT)
DEF_FUNCTION_TYPE_VAR_0 (BT_FN_PTR_VAR, BT_PTR)
-DEF_FUNCTION_TYPE_VAR_1 (BT_FN_VOID_VALIST_REF_VAR,
+DEF_FUNCTION_TYPE_VAR_1 (BT_FN_VOID_VALIST_REF_VAR,
BT_VOID, BT_VALIST_REF)
DEF_FUNCTION_TYPE_VAR_1 (BT_FN_VOID_CONST_PTR_VAR,
BT_VOID, BT_CONST_PTR)
DEF_FUNCTION_TYPE_VAR_1 (BT_FN_INT_CONST_STRING_VAR,
- BT_INT, BT_CONST_STRING)
+ BT_INT, BT_CONST_STRING)
DEF_FUNCTION_TYPE_VAR_2 (BT_FN_INT_FILEPTR_CONST_STRING_VAR,
- BT_INT, BT_FILEPTR, BT_CONST_STRING)
+ BT_INT, BT_FILEPTR, BT_CONST_STRING)
DEF_FUNCTION_TYPE_VAR_2 (BT_FN_INT_STRING_CONST_STRING_VAR,
BT_INT, BT_STRING, BT_CONST_STRING)
DEF_FUNCTION_TYPE_VAR_2 (BT_FN_INT_CONST_STRING_CONST_STRING_VAR,
while (handled_component_p (exp))
{
/* Fields in a structure can be packed, honor DECL_ALIGN
- of the FIELD_DECL. For all other references the conservative
+ of the FIELD_DECL. For all other references the conservative
alignment is the element type alignment. */
if (TREE_CODE (exp) == COMPONENT_REF)
inner = MIN (inner, DECL_ALIGN (TREE_OPERAND (exp, 1)));
we must disable frame pointer elimination. */
if (count == 0)
tem = frame_pointer_rtx;
- else
+ else
{
tem = hard_frame_pointer_rtx;
if (n < POWI_TABLE_SIZE)
{
if (cache[n])
- return cache[n];
+ return cache[n];
target = gen_reg_rtx (mode);
cache[n] = target;
return 0;
/* If either SRC is not a pointer type, don't do this
- operation in-line. */
+ operation in-line. */
if (src_align == 0)
return 0;
if (result)
return expand_expr (result, target, mode, EXPAND_NORMAL);
-
+
/* If either SRC or DEST is not a pointer type, don't do this
- operation in-line. */
+ operation in-line. */
if (dest_align == 0 || src_align == 0)
return 0;
return 0;
/* If either SRC is not a pointer type, don't do this
- operation in-line. */
+ operation in-line. */
if (src_align == 0)
return 0;
/* If src is categorized for a readonly section we can use
normal memcpy. */
if (readonly_data_expr (src))
- {
+ {
tree fn = implicit_built_in_decls[BUILT_IN_MEMCPY];
if (!fn)
return 0;
/* If length is 1 and we can expand memcpy call inline,
it is ok to use memcpy as well. */
if (integer_onep (len))
- {
+ {
rtx ret = expand_builtin_mempcpy (arglist, type, target, mode,
/*endp=*/0);
if (ret)
return ret;
- }
+ }
/* Otherwise, call the normal function. */
return 0;
rtx ret;
/* Ensure we get an actual string whose length can be evaluated at
- compile-time, not an expression containing a string. This is
- because the latter will potentially produce pessimized code
- when used to produce the return value. */
+ compile-time, not an expression containing a string. This is
+ because the latter will potentially produce pessimized code
+ when used to produce the return value. */
src = TREE_VALUE (TREE_CHAIN (arglist));
if (! c_getstr (src) || ! (len = c_strlen (src, 0)))
return expand_movstr (TREE_VALUE (arglist),
if (GET_CODE (len_rtx) == CONST_INT)
{
- ret = expand_builtin_strcpy (get_callee_fndecl (exp),
+ ret = expand_builtin_strcpy (get_callee_fndecl (exp),
arglist, target, mode);
if (ret)
tree slen = c_strlen (TREE_VALUE (TREE_CHAIN (arglist)), 1);
tree len = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
tree result = fold_builtin_strncpy (fndecl, arglist, slen);
-
+
if (result)
return expand_expr (result, target, mode, EXPAND_NORMAL);
slen = size_binop (PLUS_EXPR, slen, ssize_int (1));
/* We're required to pad with trailing zeros if the requested
- len is greater than strlen(s2)+1. In that case try to
+ len is greater than strlen(s2)+1. In that case try to
use store_by_pieces, if it fails, punt. */
if (tree_int_cst_lt (slen, len))
{
&& can_store_by_pieces (tree_low_cst (len, 1),
builtin_memset_read_str, &c, dest_align))
{
- val_rtx = force_reg (TYPE_MODE (unsigned_char_type_node),
+ val_rtx = force_reg (TYPE_MODE (unsigned_char_type_node),
val_rtx);
store_by_pieces (dest_mem, tree_low_cst (len, 1),
builtin_memset_gen_str, val_rtx, dest_align, 0);
}
- else if (!set_storage_via_setmem (dest_mem, len_rtx, val_rtx,
+ else if (!set_storage_via_setmem (dest_mem, len_rtx, val_rtx,
dest_align))
goto do_libcall;
rtx arg1_rtx, arg2_rtx;
rtx result, insn = NULL_RTX;
tree fndecl, fn;
-
+
tree arg1 = TREE_VALUE (arglist);
tree arg2 = TREE_VALUE (TREE_CHAIN (arglist));
int arg1_align
/* Try to call cmpstrsi. */
if (HAVE_cmpstrsi)
{
- enum machine_mode insn_mode
+ enum machine_mode insn_mode
= insn_data[(int) CODE_FOR_cmpstrsi].operand[0].mode;
/* Make a place to write the result of the instruction. */
#endif
#ifdef HAVE_cmpstrnsi
/* Try to determine at least one length and call cmpstrnsi. */
- if (!insn && HAVE_cmpstrnsi)
+ if (!insn && HAVE_cmpstrnsi)
{
tree len;
rtx arg3_rtx;
- enum machine_mode insn_mode
+ enum machine_mode insn_mode
= insn_data[(int) CODE_FOR_cmpstrnsi].operand[0].mode;
tree len1 = c_strlen (arg1, 1);
tree len2 = c_strlen (arg2, 1);
const char *p = c_getstr (src);
/* If the string length is zero, return the dst parameter. */
- if (p && *p == '\0')
+ if (p && *p == '\0')
return expand_expr (dst, target, mode, EXPAND_NORMAL);
-
+
if (!optimize_size)
{
/* See if we can store by pieces into (dst + strlen(dst)). */
newsrc = builtin_save_expr (src);
if (newsrc != src)
arglist = build_tree_list (NULL_TREE, newsrc);
- else
+ else
arglist = TREE_CHAIN (arglist); /* Reusing arglist if safe. */
dst = builtin_save_expr (dst);
end_sequence (); /* Stop sequence. */
return 0;
}
-
+
/* Output the entire sequence. */
insns = get_insns ();
end_sequence ();
emit_insn (insns);
-
+
return expand_expr (dst, target, mode, EXPAND_NORMAL);
}
{
/* If va_list is an array type, the argument may have decayed
to a pointer type, e.g. by being passed to another function.
- In that case, unwrap both types so that we can compare the
+ In that case, unwrap both types so that we can compare the
underlying records. */
if (TREE_CODE (have_va_type) == ARRAY_TYPE
|| POINTER_TYPE_P (have_va_type))
else
{
/* Make it easier for the backends by protecting the valist argument
- from multiple evaluations. */
+ from multiple evaluations. */
if (TREE_CODE (va_list_type_node) == ARRAY_TYPE)
{
/* For this case, the backends will be expecting a pointer to
probabilities. */
if (integer_zerop (arg1))
taken = 1 - taken;
- predict_insn_def (insn, PRED_BUILTIN_EXPECT, taken);
+ predict_insn_def (insn, PRED_BUILTIN_EXPECT, taken);
}
}
if (!init_target_chars())
return 0;
-
+
/* If the format specifier was "%s\n", call __builtin_puts(arg). */
if (strcmp (fmt_str, target_percent_s_newline) == 0)
{
if (! arglist
- || ! POINTER_TYPE_P (TREE_TYPE (TREE_VALUE (arglist)))
+ || ! POINTER_TYPE_P (TREE_TYPE (TREE_VALUE (arglist)))
|| TREE_CHAIN (arglist))
return 0;
fn = fn_puts;
{
/* We can't handle anything else with % args or %% ... yet. */
if (strchr (fmt_str, target_percent))
- return 0;
+ return 0;
if (arglist)
return 0;
call. */
static rtx
expand_builtin_fprintf (tree exp, rtx target, enum machine_mode mode,
- bool unlocked)
+ bool unlocked)
{
tree arglist = TREE_OPERAND (exp, 1);
/* If we're using an unlocked function, assume the other unlocked
if (!init_target_chars())
return 0;
-
+
/* If the format specifier was "%s", call __builtin_fputs(arg,fp). */
if (strcmp (fmt_str, target_percent_s) == 0)
{
if (! arglist
- || ! POINTER_TYPE_P (TREE_TYPE (TREE_VALUE (arglist)))
+ || ! POINTER_TYPE_P (TREE_TYPE (TREE_VALUE (arglist)))
|| TREE_CHAIN (arglist))
return 0;
arg = TREE_VALUE (arglist);
{
/* We can't handle anything else with % args or %% ... yet. */
if (strchr (fmt_str, target_percent))
- return 0;
+ return 0;
if (arglist)
return 0;
imode = word_mode;
/* Handle targets with different FP word orders. */
if (FLOAT_WORDS_BIG_ENDIAN)
- word = (GET_MODE_BITSIZE (fmode) - bitpos) / BITS_PER_WORD;
+ word = (GET_MODE_BITSIZE (fmode) - bitpos) / BITS_PER_WORD;
else
- word = bitpos / BITS_PER_WORD;
+ word = bitpos / BITS_PER_WORD;
temp = operand_subword_force (temp, word, fmode);
bitpos = bitpos % BITS_PER_WORD;
}
else
{
/* Perform a logical right shift to place the signbit in the least
- significant bit, then truncate the result to the desired mode
+ significant bit, then truncate the result to the desired mode
and mask just this bit. */
temp = expand_shift (RSHIFT_EXPR, imode, temp,
build_int_cst (NULL_TREE, bitpos), NULL_RTX, 1);
}
/* Expand the __sync_xxx_and_fetch and __sync_fetch_and_xxx intrinsics.
- ARGLIST is the operands list to the function. CODE is the rtx code
+ ARGLIST is the operands list to the function. CODE is the rtx code
that corresponds to the arithmetic or logical operation from the name;
an exception here is that NOT actually means NAND. TARGET is an optional
place for us to store the results; AFTER is true if this is the
CASE_FLT_FN (BUILT_IN_FABS):
target = expand_builtin_fabs (arglist, target, subtarget);
if (target)
- return target;
+ return target;
break;
CASE_FLT_FN (BUILT_IN_COPYSIGN):
/* Return the address of the first anonymous stack arg. */
case BUILT_IN_NEXT_ARG:
if (fold_builtin_next_arg (arglist))
- return const0_rtx;
+ return const0_rtx;
return expand_builtin_next_arg ();
case BUILT_IN_CLASSIFY_TYPE:
case REAL_CST:
if (! TREE_CONSTANT_OVERFLOW (t))
{
- REAL_VALUE_TYPE c, cint;
+ REAL_VALUE_TYPE c, cint;
c = TREE_REAL_CST (t);
real_trunc (&cint, TYPE_MODE (TREE_TYPE (t)), &c);
{
/* Optimize cbrt(expN(x)) -> expN(x/3). */
if (BUILTIN_EXPONENT_P (fcode))
- {
+ {
tree expfn = TREE_OPERAND (TREE_OPERAND (arg, 0), 0);
const REAL_VALUE_TYPE third_trunc =
real_value_truncate (TYPE_MODE (type), dconstthird);
/* Optimize cbrt(sqrt(x)) -> pow(x,1/6). */
if (BUILTIN_SQRT_P (fcode))
- {
+ {
tree powfn = mathfn_built_in (type, BUILT_IN_POW);
if (powfn)
/* Optimize cbrt(cbrt(x)) -> pow(x,1/9) iff x is nonnegative. */
if (BUILTIN_CBRT_P (fcode))
- {
+ {
tree arg0 = TREE_VALUE (TREE_OPERAND (arg, 1));
if (tree_expr_nonnegative_p (arg0))
{
tree powfn = mathfn_built_in (type, BUILT_IN_POW);
if (powfn)
- {
+ {
tree tree_root;
REAL_VALUE_TYPE dconstroot;
-
+
real_arithmetic (&dconstroot, MULT_EXPR, &dconstthird, &dconstthird);
dconstroot = real_value_truncate (TYPE_MODE (type), dconstroot);
tree_root = build_real (type, dconstroot);
}
}
}
-
+
/* Optimize cbrt(pow(x,y)) -> pow(x,y/3) iff x is nonnegative. */
if (fcode == BUILT_IN_POW || fcode == BUILT_IN_POWF
|| fcode == BUILT_IN_POWL)
- {
+ {
tree arg00 = TREE_VALUE (TREE_OPERAND (arg, 1));
tree arg01 = TREE_VALUE (TREE_CHAIN (TREE_OPERAND (arg, 1)));
if (tree_expr_nonnegative_p (arg00))
STRIP_NOPS (expr);
return ((TREE_CODE (expr) == REAL_CST
- && ! TREE_CONSTANT_OVERFLOW (expr)
- && REAL_VALUES_EQUAL (TREE_REAL_CST (expr), *value))
- || (TREE_CODE (expr) == COMPLEX_CST
- && real_dconstp (TREE_REALPART (expr), value)
- && real_zerop (TREE_IMAGPART (expr))));
+ && ! TREE_CONSTANT_OVERFLOW (expr)
+ && REAL_VALUES_EQUAL (TREE_REAL_CST (expr), *value))
+ || (TREE_CODE (expr) == COMPLEX_CST
+ && real_dconstp (TREE_REALPART (expr), value)
+ && real_zerop (TREE_IMAGPART (expr))));
}
/* A subroutine of fold_builtin to fold the various logarithmic
return build_real (type, dconst0);
/* Optimize logN(N) = 1.0. If N can't be truncated to MODE
- exactly, then only do this if flag_unsafe_math_optimizations. */
+ exactly, then only do this if flag_unsafe_math_optimizations. */
if (exact_real_truncate (TYPE_MODE (type), value)
|| flag_unsafe_math_optimizations)
- {
+ {
const REAL_VALUE_TYPE value_truncate =
real_value_truncate (TYPE_MODE (type), *value);
if (real_dconstp (arg, &value_truncate))
return fold_convert (type, TREE_VALUE (TREE_OPERAND (arg, 1)));
/* Optimize logN(func()) for various exponential functions. We
- want to determine the value "x" and the power "exponent" in
- order to transform logN(x**exponent) into exponent*logN(x). */
+ want to determine the value "x" and the power "exponent" in
+ order to transform logN(x**exponent) into exponent*logN(x). */
if (flag_unsafe_math_optimizations)
- {
+ {
tree exponent = 0, x = 0;
switch (fcode)
/* Optimize pow(expN(x),y) = expN(x*y). */
if (BUILTIN_EXPONENT_P (fcode))
- {
+ {
tree expfn = TREE_OPERAND (TREE_OPERAND (arg0, 0), 0);
tree arg = TREE_VALUE (TREE_OPERAND (arg0, 1));
arg = fold_build2 (MULT_EXPR, type, arg, arg1);
/* Optimize pow(sqrt(x),y) = pow(x,y*0.5). */
if (BUILTIN_SQRT_P (fcode))
- {
+ {
tree narg0 = TREE_VALUE (TREE_OPERAND (arg0, 1));
tree narg1 = fold_build2 (MULT_EXPR, type, arg1,
build_real (type, dconsthalf));
/* Optimize pow(cbrt(x),y) = pow(x,y/3) iff x is nonnegative. */
if (BUILTIN_CBRT_P (fcode))
- {
+ {
tree arg = TREE_VALUE (TREE_OPERAND (arg0, 1));
if (tree_expr_nonnegative_p (arg))
{
return build_function_call_expr (fndecl, arglist);
}
}
-
+
/* Optimize pow(pow(x,y),z) = pow(x,y*z). */
if (fcode == BUILT_IN_POW || fcode == BUILT_IN_POWF
|| fcode == BUILT_IN_POWL)
- {
+ {
tree arg00 = TREE_VALUE (TREE_OPERAND (arg0, 1));
tree arg01 = TREE_VALUE (TREE_CHAIN (TREE_OPERAND (arg0, 1)));
tree narg1 = fold_build2 (MULT_EXPR, type, arg01, arg1);
/* Optimize expN(1.0) = N. */
if (real_onep (arg))
- {
+ {
REAL_VALUE_TYPE cst;
real_convert (&cst, TYPE_MODE (type), value);
if (flag_unsafe_math_optimizations
&& TREE_CODE (arg) == REAL_CST
&& ! TREE_CONSTANT_OVERFLOW (arg))
- {
+ {
REAL_VALUE_TYPE cint;
REAL_VALUE_TYPE c;
HOST_WIDE_INT n;
/* Optimize expN(logN(x)) = x. */
if (flag_unsafe_math_optimizations)
- {
+ {
const enum built_in_function fcode = builtin_mathfn_code (arg);
if ((value == &dconste
/* If SRC and DEST are the same (and not volatile), return DEST+LEN. */
if (operand_equal_p (src, dest, 0))
- {
+ {
if (endp == 0)
return omit_one_operand (type, dest, len);
if (endp == 2)
len = fold_build2 (MINUS_EXPR, TREE_TYPE (len), len,
ssize_int (1));
-
+
len = fold_convert (TREE_TYPE (dest), len);
len = fold_build2 (PLUS_EXPR, TREE_TYPE (dest), dest, len);
return fold_convert (type, len);
arg, integer_zero_node);
if (in_gimple_form && !TREE_CONSTANT (arg))
- return NULL_TREE;
+ return NULL_TREE;
else
- return arg;
+ return arg;
}
}
arg = fold_build2 (LE_EXPR, integer_type_node, arg,
build_int_cst (unsigned_type_node, 9));
if (in_gimple_form && !TREE_CONSTANT (arg))
- return NULL_TREE;
+ return NULL_TREE;
else
- return arg;
+ return arg;
}
}
{
case BUILT_IN_ISINF:
if (!MODE_HAS_INFINITIES (TYPE_MODE (TREE_TYPE (arg))))
- return omit_one_operand (type, integer_zero_node, arg);
+ return omit_one_operand (type, integer_zero_node, arg);
if (TREE_CODE (arg) == REAL_CST)
{
case BUILT_IN_FINITE:
if (!MODE_HAS_NANS (TYPE_MODE (TREE_TYPE (arg)))
- && !MODE_HAS_INFINITIES (TYPE_MODE (TREE_TYPE (arg))))
- return omit_one_operand (type, integer_zero_node, arg);
+ && !MODE_HAS_INFINITIES (TYPE_MODE (TREE_TYPE (arg))))
+ return omit_one_operand (type, integer_zero_node, arg);
if (TREE_CODE (arg) == REAL_CST)
{
case BUILT_IN_ISNAN:
if (!MODE_HAS_NANS (TYPE_MODE (TREE_TYPE (arg))))
- return omit_one_operand (type, integer_zero_node, arg);
+ return omit_one_operand (type, integer_zero_node, arg);
if (TREE_CODE (arg) == REAL_CST)
{
arg0 = TREE_VALUE (arglist);
arg1 = TREE_VALUE (TREE_CHAIN (arglist));
-
+
type0 = TREE_TYPE (arg0);
type1 = TREE_TYPE (arg1);
-
+
code0 = TREE_CODE (type0);
code1 = TREE_CODE (type1);
-
+
if (code0 == REAL_TYPE && code1 == REAL_TYPE)
/* Choose the wider of two real types. */
cmp_type = TYPE_PRECISION (type0) >= TYPE_PRECISION (type1)
IDENTIFIER_POINTER (DECL_NAME (fndecl)));
return error_mark_node;
}
-
+
arg0 = fold_convert (cmp_type, arg0);
arg1 = fold_convert (cmp_type, arg1);
CASE_FLT_FN (BUILT_IN_CREAL):
if (validate_arglist (arglist, COMPLEX_TYPE, VOID_TYPE))
- return non_lvalue (fold_build1 (REALPART_EXPR, type,
+ return non_lvalue (fold_build1 (REALPART_EXPR, type,
TREE_VALUE (arglist)));
break;
CASE_FLT_FN (BUILT_IN_CIMAG):
if (validate_arglist (arglist, COMPLEX_TYPE, VOID_TYPE))
- return non_lvalue (fold_build1 (IMAGPART_EXPR, type,
+ return non_lvalue (fold_build1 (IMAGPART_EXPR, type,
TREE_VALUE (arglist)));
break;
CASE_FLT_FN (BUILT_IN_LLCEIL):
CASE_FLT_FN (BUILT_IN_LFLOOR):
CASE_FLT_FN (BUILT_IN_LLFLOOR):
- CASE_FLT_FN (BUILT_IN_LROUND):
+ CASE_FLT_FN (BUILT_IN_LROUND):
CASE_FLT_FN (BUILT_IN_LLROUND):
return fold_builtin_int_roundingfn (fndecl, arglist);
/* If the requested length is zero, or the src parameter string
length is zero, return the dst parameter. */
if (integer_zerop (len) || (p && *p == '\0'))
- return omit_two_operands (TREE_TYPE (dst), dst, src, len);
+ return omit_two_operands (TREE_TYPE (dst), dst, src, len);
/* If the requested len is greater than or equal to the string
- length, call strcat. */
+ length, call strcat. */
if (TREE_CODE (len) == INTEGER_CST && p
&& compare_tree_int (len, strlen (p)) >= 0)
{
/* We use __builtin_va_start (ap, 0, 0) or __builtin_next_arg (0, 0)
when we checked the arguments and if needed issued a warning. */
else if (!TREE_CHAIN (arglist)
- || !integer_zerop (TREE_VALUE (arglist))
- || !integer_zerop (TREE_VALUE (TREE_CHAIN (arglist)))
- || TREE_CHAIN (TREE_CHAIN (arglist)))
+ || !integer_zerop (TREE_VALUE (arglist))
+ || !integer_zerop (TREE_VALUE (TREE_CHAIN (arglist)))
+ || TREE_CHAIN (TREE_CHAIN (arglist)))
{
tree last_parm = tree_last (DECL_ARGUMENTS (current_function_decl));
tree arg = TREE_VALUE (arglist);
if (TREE_CHAIN (arglist))
- {
- error ("%<va_start%> used with too many arguments");
- return true;
- }
+ {
+ error ("%<va_start%> used with too many arguments");
+ return true;
+ }
/* Strip off all nops for the sake of the comparison. This
is not quite the same as STRIP_NOPS. It does more.
|| TREE_CODE (arg) == INDIRECT_REF)
arg = TREE_OPERAND (arg, 0);
if (arg != last_parm)
- {
+ {
/* FIXME: Sometimes with the tree optimizers we can get the
not the last argument even though the user used the last
argument. We just warn and set the arg to be the last
warning (0, "second parameter of %<va_start%> not last named argument");
}
/* We want to verify the second parameter just once before the tree
- optimizers are run and then avoid keeping it in the tree,
- as otherwise we could warn even for correct code like:
- void foo (int i, ...)
- { va_list ap; i++; va_start (ap, i); va_end (ap); } */
+ optimizers are run and then avoid keeping it in the tree,
+ as otherwise we could warn even for correct code like:
+ void foo (int i, ...)
+ { va_list ap; i++; va_start (ap, i); va_end (ap); } */
TREE_VALUE (arglist) = integer_zero_node;
TREE_CHAIN (arglist) = build_tree_list (NULL, integer_zero_node);
}
/* If SRC and DEST are the same (and not volatile), return DEST. */
if (fcode == BUILT_IN_STRCPY_CHK && operand_equal_p (src, dest, 0))
return fold_convert (TREE_TYPE (TREE_TYPE (fndecl)), dest);
-
+
if (! host_integerp (size, 1))
return 0;
if (!init_target_chars())
return 0;
-
+
if (strcmp (fmt_str, target_percent_s) == 0 || strchr (fmt_str, target_percent) == NULL)
{
const char *str;
if (!init_target_chars())
return 0;
-
+
/* If the format doesn't contain % args or %%, use strcpy. */
if (strchr (fmt_str, target_percent) == NULL)
{
target_percent_s_newline[1] = target_s;
target_percent_s_newline[2] = target_newline;
target_percent_s_newline[3] = '\0';
-
+
init = true;
}
return true;
exist when compiling in ANSI conformant mode.
ATTRs is an attribute list as defined in builtin-attrs.def that
- describes the attributes of this builtin function.
+ describes the attributes of this builtin function.
IMPLICIT specifies condition when the builtin can be produced by
compiler. For instance C90 reserves floorf function, but does not
#undef DEF_GCC_BUILTIN
#define DEF_GCC_BUILTIN(ENUM, NAME, TYPE, ATTRS) \
DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, BT_LAST, \
- false, false, false, ATTRS, true, true)
+ false, false, false, ATTRS, true, true)
/* Like DEF_GCC_BUILTIN, except we don't prepend "__builtin_". */
#undef DEF_SYNC_BUILTIN
#define DEF_SYNC_BUILTIN(ENUM, NAME, TYPE, ATTRS) \
DEF_BUILTIN (ENUM, NAME, BUILT_IN_NORMAL, TYPE, BT_LAST, \
- false, false, false, ATTRS, true, true)
+ false, false, false, ATTRS, true, true)
/* A library builtin (like __builtin_strchr) is a builtin equivalent
of an ANSI/ISO standard library function. In addition to the
`strchr') as well. If we cannot compute the answer using the
builtin function, we will fall back to the standard library
version. */
-#undef DEF_LIB_BUILTIN
+#undef DEF_LIB_BUILTIN
#define DEF_LIB_BUILTIN(ENUM, NAME, TYPE, ATTRS) \
DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \
true, true, false, ATTRS, true, true)
specified by ANSI/ISO C. So, when we're being fully conformant we
ignore the version of these builtins that does not begin with
__builtin. */
-#undef DEF_EXT_LIB_BUILTIN
+#undef DEF_EXT_LIB_BUILTIN
#define DEF_EXT_LIB_BUILTIN(ENUM, NAME, TYPE, ATTRS) \
DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \
- true, true, true, ATTRS, false, true)
+ true, true, true, ATTRS, false, true)
/* Like DEF_LIB_BUILTIN, except that the function is only a part of
the standard in C94 or above. */
-#undef DEF_C94_BUILTIN
+#undef DEF_C94_BUILTIN
#define DEF_C94_BUILTIN(ENUM, NAME, TYPE, ATTRS) \
DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \
- true, true, !flag_isoc94, ATTRS, TARGET_C99_FUNCTIONS, true)
+ true, true, !flag_isoc94, ATTRS, TARGET_C99_FUNCTIONS, true)
/* Like DEF_LIB_BUILTIN, except that the function is only a part of
the standard in C99 or above. */
-#undef DEF_C99_BUILTIN
+#undef DEF_C99_BUILTIN
#define DEF_C99_BUILTIN(ENUM, NAME, TYPE, ATTRS) \
DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \
- true, true, !flag_isoc99, ATTRS, TARGET_C99_FUNCTIONS, true)
+ true, true, !flag_isoc99, ATTRS, TARGET_C99_FUNCTIONS, true)
/* Builtin that is specified by C99 and C90 reserve the name for future use.
We can still recognize the builtin in C90 mode but we can't produce it
implicitly. */
-#undef DEF_C99_C90RES_BUILTIN
+#undef DEF_C99_C90RES_BUILTIN
#define DEF_C99_C90RES_BUILTIN(ENUM, NAME, TYPE, ATTRS) \
DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \
- true, true, !flag_isoc99, ATTRS, TARGET_C99_FUNCTIONS, true)
+ true, true, !flag_isoc99, ATTRS, TARGET_C99_FUNCTIONS, true)
/* Builtin that C99 reserve the name for future use. We can still recognize
the builtin in C99 mode but we can't produce it implicitly. */
#undef DEF_EXT_C99RES_BUILTIN
#define DEF_EXT_C99RES_BUILTIN(ENUM, NAME, TYPE, ATTRS) \
DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \
- true, true, true, ATTRS, false, true)
+ true, true, true, ATTRS, false, true)
/* Allocate the enum and the name for a builtin, but do not actually
define it here at all. */
for (;;)
{
if (!strncmp (p, "volatile ", 9))
- {
- p += 9;
- continue;
- }
+ {
+ p += 9;
+ continue;
+ }
if (!strncmp (p, "const ", 6))
- {
- p += 6;
- continue;
- }
+ {
+ p += 6;
+ continue;
+ }
break;
}
const char *this_type;
if (*formal_list)
- formal_list = concat (formal_list, ", ", NULL);
+ formal_list = concat (formal_list, ", ", NULL);
this_type = gen_type ("", TREE_VALUE (formal_type), ansi);
formal_list
if (!*formal_list)
{
if (TYPE_ARG_TYPES (fntype))
- /* assert (TREE_VALUE (TYPE_ARG_TYPES (fntype)) == void_type_node); */
- formal_list = "void";
+ /* assert (TREE_VALUE (TYPE_ARG_TYPES (fntype)) == void_type_node); */
+ formal_list = "void";
else
- formal_list = "/* ??? */";
+ formal_list = "/* ??? */";
}
else
{
/* If there were at least some parameters, and if the formals-types-list
- petered out to a NULL (i.e. without being terminated by a
- void_type_node) then we need to tack on an ellipsis. */
+ petered out to a NULL (i.e. without being terminated by a
+ void_type_node) then we need to tack on an ellipsis. */
if (!formal_type)
- formal_list = concat (formal_list, ", ...", NULL);
+ formal_list = concat (formal_list, ", ...", NULL);
}
return concat (" (", formal_list, ")", NULL);
const char *this_formal;
if (*formal_list && ((style == ansi) || (style == k_and_r_names)))
- formal_list = concat (formal_list, ", ", NULL);
+ formal_list = concat (formal_list, ", ", NULL);
this_formal = gen_decl (formal_decl, 0, style);
if (style == k_and_r_decls)
- formal_list = concat (formal_list, this_formal, "; ", NULL);
+ formal_list = concat (formal_list, this_formal, "; ", NULL);
else
- formal_list = concat (formal_list, this_formal, NULL);
+ formal_list = concat (formal_list, this_formal, NULL);
formal_decl = TREE_CHAIN (formal_decl);
}
if (style == ansi)
{
if (!DECL_ARGUMENTS (fndecl))
- formal_list = concat (formal_list, "void", NULL);
+ formal_list = concat (formal_list, "void", NULL);
if (deserves_ellipsis (TREE_TYPE (fndecl)))
- formal_list = concat (formal_list, ", ...", NULL);
+ formal_list = concat (formal_list, ", ...", NULL);
}
if ((style == ansi) || (style == k_and_r_names))
formal_list = concat (" (", formal_list, ")", NULL);
else
{
switch (TREE_CODE (t))
- {
- case POINTER_TYPE:
- if (TYPE_READONLY (t))
- ret_val = concat ("const ", ret_val, NULL);
- if (TYPE_VOLATILE (t))
- ret_val = concat ("volatile ", ret_val, NULL);
+ {
+ case POINTER_TYPE:
+ if (TYPE_READONLY (t))
+ ret_val = concat ("const ", ret_val, NULL);
+ if (TYPE_VOLATILE (t))
+ ret_val = concat ("volatile ", ret_val, NULL);
- ret_val = concat ("*", ret_val, NULL);
+ ret_val = concat ("*", ret_val, NULL);
if (TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE || TREE_CODE (TREE_TYPE (t)) == FUNCTION_TYPE)
ret_val = concat ("(", ret_val, ")", NULL);
- ret_val = gen_type (ret_val, TREE_TYPE (t), style);
+ ret_val = gen_type (ret_val, TREE_TYPE (t), style);
- return ret_val;
+ return ret_val;
- case ARRAY_TYPE:
+ case ARRAY_TYPE:
if (!COMPLETE_TYPE_P (t) || TREE_CODE (TYPE_SIZE (t)) != INTEGER_CST)
ret_val = gen_type (concat (ret_val, "[]", NULL),
TREE_TYPE (t), style);
ret_val = gen_type (concat (ret_val, buff, NULL),
TREE_TYPE (t), style);
}
- break;
+ break;
- case FUNCTION_TYPE:
- ret_val = gen_type (concat (ret_val,
+ case FUNCTION_TYPE:
+ ret_val = gen_type (concat (ret_val,
gen_formal_list_for_type (t, style),
NULL),
TREE_TYPE (t), style);
- break;
+ break;
- case IDENTIFIER_NODE:
- data_type = IDENTIFIER_POINTER (t);
- break;
+ case IDENTIFIER_NODE:
+ data_type = IDENTIFIER_POINTER (t);
+ break;
/* The following three cases are complicated by the fact that a
- user may do something really stupid, like creating a brand new
- "anonymous" type specification in a formal argument list (or as
- part of a function return type specification). For example:
+ user may do something really stupid, like creating a brand new
+ "anonymous" type specification in a formal argument list (or as
+ part of a function return type specification). For example:
int f (enum { red, green, blue } color);
to represent the (anonymous) type. Thus, we have to generate the
whole darn type specification. Yuck! */
- case RECORD_TYPE:
+ case RECORD_TYPE:
if (TYPE_NAME (t))
data_type = IDENTIFIER_POINTER (TYPE_NAME (t));
else
data_type = concat ("struct ", data_type, NULL);
break;
- case UNION_TYPE:
+ case UNION_TYPE:
if (TYPE_NAME (t))
data_type = IDENTIFIER_POINTER (TYPE_NAME (t));
else
data_type = concat ("union ", data_type, NULL);
break;
- case ENUMERAL_TYPE:
+ case ENUMERAL_TYPE:
if (TYPE_NAME (t))
data_type = IDENTIFIER_POINTER (TYPE_NAME (t));
else
data_type = concat ("enum ", data_type, NULL);
break;
- case TYPE_DECL:
- data_type = IDENTIFIER_POINTER (DECL_NAME (t));
- break;
+ case TYPE_DECL:
+ data_type = IDENTIFIER_POINTER (DECL_NAME (t));
+ break;
- case INTEGER_TYPE:
- data_type = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (t)));
- /* Normally, `unsigned' is part of the deal. Not so if it comes
+ case INTEGER_TYPE:
+ data_type = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (t)));
+ /* Normally, `unsigned' is part of the deal. Not so if it comes
with a type qualifier. */
- if (TYPE_UNSIGNED (t) && TYPE_QUALS (t))
+ if (TYPE_UNSIGNED (t) && TYPE_QUALS (t))
data_type = concat ("unsigned ", data_type, NULL);
break;
- case REAL_TYPE:
- data_type = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (t)));
- break;
+ case REAL_TYPE:
+ data_type = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (t)));
+ break;
- case VOID_TYPE:
- data_type = "void";
- break;
+ case VOID_TYPE:
+ data_type = "void";
+ break;
case ERROR_MARK:
data_type = "[ERROR]";
break;
- default:
- gcc_unreachable ();
- }
+ default:
+ gcc_unreachable ();
+ }
}
if (TYPE_READONLY (t))
ret_val = concat ("const ", ret_val, NULL);
NULL);
/* Since we have already added in the formals list stuff, here we don't
- add the whole "type" of the function we are considering (which
- would include its parameter-list info), rather, we only add in
- the "type" of the "type" of the function, which is really just
- the return-type of the function (and does not include the parameter
- list info). */
+ add the whole "type" of the function we are considering (which
+ would include its parameter-list info), rather, we only add in
+ the "type" of the "type" of the function, which is really just
+ the return-type of the function (and does not include the parameter
+ list info). */
ret_val = gen_type (ret_val, TREE_TYPE (TREE_TYPE (decl)), style);
}
{ "always_inline", 0, 0, true, false, false,
handle_always_inline_attribute },
{ "flatten", 0, 0, true, false, false,
- handle_flatten_attribute },
+ handle_flatten_attribute },
{ "used", 0, 0, true, false, false,
handle_used_attribute },
{ "unused", 0, 0, false, false, false,
{
/* Let the back-end know about this variable. */
if (!anon_aggr_type_p (TREE_TYPE (decl)))
- emit_local_var (decl);
+ emit_local_var (decl);
else
- expand_anon_union_decl (decl, NULL_TREE,
- DECL_ANON_UNION_ELEMS (decl));
+ expand_anon_union_decl (decl, NULL_TREE,
+ DECL_ANON_UNION_ELEMS (decl));
}
else
return 0;
{
return targetm.vector_opaque_p (t1)
|| targetm.vector_opaque_p (t2)
- || (tree_int_cst_equal (TYPE_SIZE (t1), TYPE_SIZE (t2))
- && (TREE_CODE (TREE_TYPE (t1)) != REAL_TYPE ||
+ || (tree_int_cst_equal (TYPE_SIZE (t1), TYPE_SIZE (t2))
+ && (TREE_CODE (TREE_TYPE (t1)) != REAL_TYPE ||
TYPE_PRECISION (t1) == TYPE_PRECISION (t2))
&& INTEGRAL_TYPE_P (TREE_TYPE (t1))
== INTEGRAL_TYPE_P (TREE_TYPE (t2)));
default:
/* For other expressions, simply recurse on their operands.
- Manual tail recursion for unary expressions.
+ Manual tail recursion for unary expressions.
Other non-expressions need not be processed. */
if (cl == tcc_unary)
{
But, the standard is wrong. In particular, this code is
legal C++:
- int *ip;
- int **ipp = &ip;
- const int* const* cipp = ipp;
+ int *ip;
+ int **ipp = &ip;
+ const int* const* cipp = ipp;
And, it doesn't make sense for that to be legal unless you
can dereference IPP and CIPP. So, we ignore cv-qualifiers on
NONANSI_P, ATTRS, IMPLICIT, COND) \
if (NAME && COND) \
def_builtin_1 (ENUM, NAME, CLASS, \
- builtin_types[(int) TYPE], \
- builtin_types[(int) LIBTYPE], \
- BOTH_P, FALLBACK_P, NONANSI_P, \
- built_in_attributes[(int) ATTRS], IMPLICIT);
+ builtin_types[(int) TYPE], \
+ builtin_types[(int) LIBTYPE], \
+ BOTH_P, FALLBACK_P, NONANSI_P, \
+ built_in_attributes[(int) ATTRS], IMPLICIT);
#include "builtins.def"
#undef DEF_BUILTIN
if (low_value && high_value)
{
/* If the LOW_VALUE and HIGH_VALUE are the same, then this isn't
- really a case range, even though it was written that way.
- Remove the HIGH_VALUE to simplify later processing. */
+ really a case range, even though it was written that way.
+ Remove the HIGH_VALUE to simplify later processing. */
if (tree_int_cst_equal (low_value, high_value))
high_value = NULL_TREE;
else if (!tree_int_cst_lt (low_value, high_value))
(splay_tree_key) low_value);
high_bound = splay_tree_successor (cases,
(splay_tree_key) low_value);
-
+
/* It is smaller than the LOW_VALUE, so there is no need to check
- unless the LOW_BOUND is in fact itself a case range. */
+ unless the LOW_BOUND is in fact itself a case range. */
if (low_bound
&& CASE_HIGH ((tree) low_bound->value)
&& tree_int_cst_compare (CASE_HIGH ((tree) low_bound->value),
if (TYPE_MAIN_VARIANT (*node) == *node)
{
/* If it is the main variant, then pack the other variants
- too. This happens in,
+ too. This happens in,
struct Foo {
struct Foo const *ptr; // creates a variant w/o packed flag
static tree
handle_flatten_attribute (tree *node, tree name,
- tree args ATTRIBUTE_UNUSED,
- int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
+ tree args ATTRIBUTE_UNUSED,
+ int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
{
if (TREE_CODE (*node) == FUNCTION_DECL)
/* Do nothing else, just set the attribute. We'll get at
return NULL_TREE;
}
- if (TREE_CODE (type) == POINTER_TYPE)
+ if (TREE_CODE (type) == POINTER_TYPE)
fn = build_pointer_type_for_mode;
else
fn = build_reference_type_for_mode;
typefm = fn (TREE_TYPE (type), mode, false);
}
else
- typefm = lang_hooks.types.type_for_mode (mode, TYPE_UNSIGNED (type));
+ typefm = lang_hooks.types.type_for_mode (mode, TYPE_UNSIGNED (type));
if (typefm == NULL_TREE)
{
{
if (TREE_CODE (*node) != RECORD_TYPE && TREE_CODE (*node) != UNION_TYPE)
{
- warning (OPT_Wattributes, "%qE attribute ignored on non-class types",
+ warning (OPT_Wattributes, "%qE attribute ignored on non-class types",
name);
- return NULL_TREE;
+ return NULL_TREE;
}
}
else if (decl_function_context (decl) != 0 || !TREE_PUBLIC (decl))
{
decl = TYPE_NAME (decl);
if (!decl)
- return NULL_TREE;
+ return NULL_TREE;
if (TREE_CODE (decl) == IDENTIFIER_NODE)
{
warning (OPT_Wattributes, "%qE attribute ignored on types",
typelist = TREE_CHAIN (typelist);
params = TREE_CHAIN (params);
}
-
+
if (typelist || !params)
warning (OPT_Wformat,
"not enough variable arguments to fit a sentinel");
else
- {
+ {
tree sentinel, end;
unsigned pos = 0;
-
+
if (TREE_VALUE (attr))
{
tree p = TREE_VALUE (TREE_VALUE (attr));
as wide as a pointer, and we don't want to force
users to cast the NULL they have written there.
We warn with -Wstrict-null-sentinel, though. */
- && (warn_strict_null_sentinel
+ && (warn_strict_null_sentinel
|| null_node != TREE_VALUE (sentinel)))
warning (OPT_Wformat, "missing sentinel in function call");
}
params = TREE_CHAIN (params);
if (VOID_TYPE_P (TREE_VALUE (params)))
- {
+ {
warning (OPT_Wattributes,
"%qE attribute only applies to variadic functions", name);
*no_add_attrs = true;
}
}
-
+
if (args)
{
tree position = TREE_VALUE (args);
if (TREE_CODE (position) != INTEGER_CST)
- {
+ {
warning (0, "requested position is not an integer constant");
*no_add_attrs = true;
}
else
- {
+ {
if (tree_int_cst_lt (position, integer_zero_node))
{
warning (0, "requested position is less than zero");
}
}
}
-
+
return NULL_TREE;
}
\f
unsigned int val = TREE_INT_CST_LOW (value);
const char *const ell = (token == CPP_CHAR) ? "" : "L";
if (val <= UCHAR_MAX && ISGRAPH (val))
- message = catenate_messages (gmsgid, " before %s'%c'");
+ message = catenate_messages (gmsgid, " before %s'%c'");
else
- message = catenate_messages (gmsgid, " before %s'\\x%x'");
+ message = catenate_messages (gmsgid, " before %s'\\x%x'");
error (message, ell, val);
free (message);
error (message);
free (message);
}
-#undef catenate_messages
+#undef catenate_messages
}
/* Walk a gimplified function and warn for functions whose return value is
return 0;
}
-/* A helper function for resolve_overloaded_builtin. Adds casts to
+/* A helper function for resolve_overloaded_builtin. Adds casts to
PARAMS to make arguments match up with those of FUNCTION. Drops
the variadic arguments at the end. Returns false if some error
was encountered; true on success. */
return true;
}
-/* A helper function for resolve_overloaded_builtin. Adds a cast to
+/* A helper function for resolve_overloaded_builtin. Adds a cast to
RESULT to make it match the type of the first pointer argument in
PARAMS. */
break;
case BUILT_IN_MD:
if (targetm.resolve_overloaded_builtin)
- return targetm.resolve_overloaded_builtin (function, params);
+ return targetm.resolve_overloaded_builtin (function, params);
else
- return NULL_TREE;
+ return NULL_TREE;
default:
return NULL_TREE;
}
-
+
/* Handle BUILT_IN_NORMAL here. */
switch (orig_code)
{
RID_PUBLIC, RID_PRIVATE, RID_PROTECTED,
RID_TEMPLATE, RID_NULL, RID_CATCH,
RID_DELETE, RID_FALSE, RID_NAMESPACE,
- RID_NEW, RID_OFFSETOF, RID_OPERATOR,
- RID_THIS, RID_THROW, RID_TRUE,
- RID_TRY, RID_TYPENAME, RID_TYPEID,
+ RID_NEW, RID_OFFSETOF, RID_OPERATOR,
+ RID_THIS, RID_THROW, RID_TRUE,
+ RID_TRY, RID_TYPENAME, RID_TYPEID,
RID_USING,
/* casts */
RID_AT_ENCODE, RID_AT_END,
RID_AT_CLASS, RID_AT_ALIAS, RID_AT_DEFS,
RID_AT_PRIVATE, RID_AT_PROTECTED, RID_AT_PUBLIC,
- RID_AT_PROTOCOL, RID_AT_SELECTOR,
+ RID_AT_PROTOCOL, RID_AT_SELECTOR,
RID_AT_THROW, RID_AT_TRY, RID_AT_CATCH,
RID_AT_FINALLY, RID_AT_SYNCHRONIZED,
RID_AT_INTERFACE,
CTI_PRETTY_FUNCTION_NAME_DECL,
CTI_C99_FUNCTION_NAME_DECL,
CTI_SAVED_FUNCTION_NAME_DECLS,
-
+
CTI_VOID_ZERO,
CTI_NULL,
extern int c_expand_decl (tree);
extern int field_decl_cmp (const void *, const void *);
-extern void resort_sorted_fields (void *, void *, gt_pointer_operator,
- void *);
+extern void resort_sorted_fields (void *, void *, gt_pointer_operator,
+ void *);
extern bool has_c_linkage (tree decl);
\f
/* Switches common to the C front ends. */
extern int flag_signed_bitfields;
-/* Warn about #pragma directives that are not recognized. */
+/* Warn about #pragma directives that are not recognized. */
-extern int warn_unknown_pragmas; /* Tri state variable. */
+extern int warn_unknown_pragmas; /* Tri state variable. */
/* Warn about format/argument anomalies in calls to formatted I/O functions
(*printf, *scanf, strftime, strfmon, etc.). */
/* This is an IBM extended double format, so 1.0 + any double is
representable precisely. */
sprintf (buf, "0x1p%d", (fmt->emin - fmt->p) * fmt->log2_b);
- else
+ else
sprintf (buf, "0x1p%d", (1 - fmt->p) * fmt->log2_b);
builtin_define_with_hex_fp_value (name, type, decimal_dig, buf, fp_suffix, fp_cast);
builtin_define_with_value_n ("__GNUG__", q, v - q);
gcc_assert (*v == '.' && ISDIGIT (v[1]));
-
+
q = ++v;
while (ISDIGIT (*v))
v++;
if (c_dialect_cxx ())
{
- if (flag_weak && SUPPORTS_ONE_ONLY)
+ if (flag_weak && SUPPORTS_ONE_ONLY)
cpp_define (pfile, "__GXX_WEAK__=1");
else
cpp_define (pfile, "__GXX_WEAK__=0");
if (flag_abi_version == 0)
/* Use a very large value so that:
- #if __GXX_ABI_VERSION >= <value for version X>
+ #if __GXX_ABI_VERSION >= <value for version X>
will work whether the user explicitly says "-fabi-version=x" or
"-fabi-version=0". Do not use INT_MAX because that will be
builtin_define_with_int_value ("__GXX_ABI_VERSION", 102);
else
/* Newer versions have values 1002, 1003, .... */
- builtin_define_with_int_value ("__GXX_ABI_VERSION",
+ builtin_define_with_int_value ("__GXX_ABI_VERSION",
1000 + flag_abi_version);
/* libgcc needs to know this. */
TREE_CHAIN (p) = BLOCK_VARS (block);
BLOCK_VARS (block) = p;
gcc_assert (I_LABEL_BINDING (b->id) == b);
- I_LABEL_BINDING (b->id) = b->shadowed;
- break;
+ I_LABEL_BINDING (b->id) = b->shadowed;
+ break;
case ENUMERAL_TYPE:
case UNION_TYPE:
gcc_assert (I_TAG_BINDING (b->id) == b);
I_TAG_BINDING (b->id) = b->shadowed;
}
- break;
+ break;
case FUNCTION_DECL:
/* Propagate TREE_ADDRESSABLE from nested functions to their
&& DECL_NAME (p)
&& !DECL_ARTIFICIAL (p)
&& scope != file_scope
- && scope != external_scope)
+ && scope != external_scope)
warning (OPT_Wunused_variable, "unused variable %q+D", p);
if (b->inner_comp)
/* warnings */
/* All decls must agree on a visibility. */
- if (CODE_CONTAINS_STRUCT (TREE_CODE (newdecl), TS_DECL_WITH_VIS)
+ if (CODE_CONTAINS_STRUCT (TREE_CODE (newdecl), TS_DECL_WITH_VIS)
&& DECL_VISIBILITY_SPECIFIED (newdecl) && DECL_VISIBILITY_SPECIFIED (olddecl)
&& DECL_VISIBILITY (newdecl) != DECL_VISIBILITY (olddecl))
{
are assigned. */
if (DECL_SECTION_NAME (newdecl) == NULL_TREE)
DECL_SECTION_NAME (newdecl) = DECL_SECTION_NAME (olddecl);
-
+
/* Copy the assembler name.
Currently, it can only be defined in the prototype. */
COPY_DECL_ASSEMBLER_NAME (olddecl, newdecl);
DECL_VISIBILITY (newdecl) = DECL_VISIBILITY (olddecl);
DECL_VISIBILITY_SPECIFIED (newdecl) = 1;
}
-
+
if (TREE_CODE (newdecl) == FUNCTION_DECL)
{
DECL_STATIC_CONSTRUCTOR(newdecl) |= DECL_STATIC_CONSTRUCTOR(olddecl);
DECL_IS_PURE (newdecl) |= DECL_IS_PURE (olddecl);
DECL_IS_NOVOPS (newdecl) |= DECL_IS_NOVOPS (olddecl);
}
-
+
/* Merge the storage class information. */
merge_weak (newdecl, olddecl);
TREE_PUBLIC (DECL_NAME (olddecl)) = 0;
}
}
-
+
if (DECL_EXTERNAL (newdecl))
{
TREE_STATIC (newdecl) = TREE_STATIC (olddecl);
TREE_STATIC (olddecl) = TREE_STATIC (newdecl);
TREE_PUBLIC (olddecl) = TREE_PUBLIC (newdecl);
}
-
- if (TREE_CODE (newdecl) == FUNCTION_DECL)
+
+ if (TREE_CODE (newdecl) == FUNCTION_DECL)
{
/* If we're redefining a function previously defined as extern
inline, make sure we emit debug info for the inline before we
DECL_INLINE (newdecl) = 1;
}
}
-
+
/* Copy most of the decl-specific fields of NEWDECL into OLDDECL.
But preserve OLDDECL's DECL_UID and DECL_CONTEXT. */
{
unsigned olddecl_uid = DECL_UID (olddecl);
tree olddecl_context = DECL_CONTEXT (olddecl);
-
+
memcpy ((char *) olddecl + sizeof (struct tree_common),
(char *) newdecl + sizeof (struct tree_common),
sizeof (struct tree_decl_common) - sizeof (struct tree_common));
break;
default:
-
+
memcpy ((char *) olddecl + sizeof (struct tree_decl_common),
(char *) newdecl + sizeof (struct tree_decl_common),
sizeof (struct tree_decl_non_common) - sizeof (struct tree_decl_common));
translation and get back the corresponding typedef name. For
example, given:
- typedef struct S MY_TYPE;
+ typedef struct S MY_TYPE;
MY_TYPE object;
Later parts of the compiler might only know that `object' was of
}
/* If we are parsing old-style parameter decls, current_function_decl
- will be nonnull but current_function_scope will be null. */
+ will be nonnull but current_function_scope will be null. */
scope = current_function_scope ? current_function_scope : current_scope;
}
bind (id, error_mark_node, scope, /*invisible=*/false, /*nested=*/false);
if (!in_system_header && lookup_name (name))
warning (OPT_Wtraditional, "%Htraditional C lacks a separate namespace "
- "for labels, identifier %qE conflicts", &location, name);
+ "for labels, identifier %qE conflicts", &location, name);
nlist_se = XOBNEW (&parser_obstack, struct c_label_list);
nlist_se->next = label_context_stack_se->labels_def;
{
if (pending_invalid_xref != 0)
error ("%H%qE defined as wrong kind of tag",
- &pending_invalid_xref_location, pending_invalid_xref);
+ &pending_invalid_xref_location, pending_invalid_xref);
pending_invalid_xref = 0;
}
maybe_apply_pragma_weak (decl);
/* If this is a variable definition, determine its ELF visibility. */
- if (TREE_CODE (decl) == VAR_DECL
- && TREE_STATIC (decl)
+ if (TREE_CODE (decl) == VAR_DECL
+ && TREE_STATIC (decl)
&& !DECL_EXTERNAL (decl))
c_determine_visibility (decl);
if (c_dialect_objc ())
objc_check_decl (decl);
- if (asmspec)
+ if (asmspec)
{
/* If this is not a static variable, issue a warning.
It doesn't make any sense to give an ASMSPEC for an
else
set_user_assembler_name (decl, asmspec);
}
-
+
if (DECL_FILE_SCOPE_P (decl))
{
if (DECL_INITIAL (decl) == NULL_TREE
add_stmt (build_stmt (DECL_EXPR, decl));
}
}
-
+
if (!DECL_FILE_SCOPE_P (decl))
{
{
struct lang_type *lt = TYPE_LANG_SPECIFIC (*type);
if (!lt
- || w < min_precision (lt->enum_min, TYPE_UNSIGNED (*type))
+ || w < min_precision (lt->enum_min, TYPE_UNSIGNED (*type))
|| w < min_precision (lt->enum_max, TYPE_UNSIGNED (*type)))
warning (0, "%qs is narrower than values of its type", name);
}
declarator = declarator->declarator;
/* Check for some types that there cannot be arrays of. */
-
+
if (VOID_TYPE_P (type))
{
error ("declaration of %qs as array of voids", name);
type = error_mark_node;
}
-
+
if (TREE_CODE (type) == FUNCTION_TYPE)
{
error ("declaration of %qs as array of functions", name);
type = error_mark_node;
}
-
+
if (pedantic && !in_system_header && flexible_array_type_p (type))
pedwarn ("invalid use of structure with flexible array member");
-
+
if (size == error_mark_node)
type = error_mark_node;
-
+
if (type == error_mark_node)
continue;
/* Strip NON_LVALUE_EXPRs since we aren't using as an
lvalue. */
STRIP_TYPE_NOPS (size);
-
+
if (!INTEGRAL_TYPE_P (TREE_TYPE (size)))
{
error ("size of array %qs has non-integer type", name);
size = integer_one_node;
}
-
+
if (pedantic && integer_zerop (size))
pedwarn ("ISO C forbids zero-size array %qs", name);
-
+
if (TREE_CODE (size) == INTEGER_CST)
{
constant_expression_warning (size);
nonconstant even if it is (eg) a const variable
with known value. */
size_varies = 1;
-
+
if (!flag_isoc99 && pedantic)
{
if (TREE_CONSTANT (size))
if (integer_zerop (size))
{
- /* A zero-length array cannot be represented with
- an unsigned index type, which is what we'll
- get with build_index_type. Create an
- open-ended range instead. */
+ /* A zero-length array cannot be represented with
+ an unsigned index type, which is what we'll
+ get with build_index_type. Create an
+ open-ended range instead. */
itype = build_range_type (sizetype, size, NULL_TREE);
}
else
convert (index_type,
size_one_node));
- /* If that overflowed, the array is too big. ???
+ /* If that overflowed, the array is too big. ???
While a size of INT_MAX+1 technically shouldn't
cause an overflow (because we subtract 1), the
overflow is recorded during the conversion to
type = error_mark_node;
continue;
}
-
+
itype = build_index_type (itype);
}
}
if (!COMPLETE_TYPE_P (type))
{
error ("array type has incomplete element type");
- type = error_mark_node;
+ type = error_mark_node;
}
else
type = build_array_type (type, itype);
type for the function to return. */
if (type == error_mark_node)
continue;
-
+
size_varies = 0;
/* Warn about some types functions can't return. */
qualify the return type, not the function type. */
if (type_quals)
{
- /* Type qualifiers on a function return type are
+ /* Type qualifiers on a function return type are
normally permitted by the standard but have no
effect, so give a warning at -Wreturn-type.
Qualifiers on a void return type are banned on
else
warning (OPT_Wreturn_type,
"type qualifiers ignored on function return type");
-
+
type = c_build_qualified_type (type, type_quals);
}
type_quals = TYPE_UNQUALIFIED;
-
+
type = build_function_type (type, arg_types);
declarator = declarator->declarator;
-
+
/* Set the TYPE_CONTEXTs for each tagged type which is local to
the formal parameter list of this FUNCTION_TYPE to point to
the FUNCTION_TYPE node itself. */
{
tree link;
-
+
for (link = arg_info->tags;
link;
link = TREE_CHAIN (link))
size_varies = 0;
type = build_pointer_type (type);
-
+
/* Process type qualifiers (such as const or volatile)
that were given inside the `*'. */
type_quals = declarator->u.pointer_quals;
type = build_pointer_type (type);
}
else if (TREE_CODE (type) != ERROR_MARK
- && !COMPLETE_OR_UNBOUND_ARRAY_TYPE_P (type))
+ && !COMPLETE_OR_UNBOUND_ARRAY_TYPE_P (type))
{
error ("field %qs has incomplete type", name);
type = error_mark_node;
}
else if (storage_class == csc_static)
{
- error ("invalid storage class for function %qs", name);
- if (funcdef_flag)
+ error ("invalid storage class for function %qs", name);
+ if (funcdef_flag)
storage_class = declspecs->storage_class = csc_none;
else
return 0;
(by 'const' or 'volatile'), or has a storage class specifier
('register'), then the behavior is undefined; issue an error.
Typedefs for 'void' are OK (see DR#157). */
- if (b->prev == 0 /* one binding */
+ if (b->prev == 0 /* one binding */
&& TREE_CODE (b->decl) == PARM_DECL /* which is a parameter */
&& !DECL_NAME (b->decl) /* anonymous */
&& VOID_TYPE_P (TREE_TYPE (b->decl))) /* of void type */
if (ref && TREE_CODE (ref) == code)
{
if (TYPE_SIZE (ref))
- {
+ {
if (code == UNION_TYPE)
error ("redefinition of %<union %E%>", name);
- else
+ else
error ("redefinition of %<struct %E%>", name);
}
else if (C_TYPE_BEING_DEFINED (ref))
{
if (code == UNION_TYPE)
error ("nested redefinition of %<union %E%>", name);
- else
+ else
error ("nested redefinition of %<struct %E%>", name);
}
}
for (x = fieldlist; x; x = TREE_CHAIN (x))
{
- if (len > 15 || DECL_NAME (x) == NULL)
- break;
- len += 1;
+ if (len > 15 || DECL_NAME (x) == NULL)
+ break;
+ len += 1;
}
if (len > 15)
{
- tree *field_array;
- struct lang_type *space;
- struct sorted_fields_type *space2;
+ tree *field_array;
+ struct lang_type *space;
+ struct sorted_fields_type *space2;
- len += list_length (x);
+ len += list_length (x);
- /* Use the same allocation policy here that make_node uses, to
- ensure that this lives as long as the rest of the struct decl.
- All decls in an inline function need to be saved. */
+ /* Use the same allocation policy here that make_node uses, to
+ ensure that this lives as long as the rest of the struct decl.
+ All decls in an inline function need to be saved. */
- space = GGC_CNEW (struct lang_type);
- space2 = GGC_NEWVAR (struct sorted_fields_type,
+ space = GGC_CNEW (struct lang_type);
+ space2 = GGC_NEWVAR (struct sorted_fields_type,
sizeof (struct sorted_fields_type) + len * sizeof (tree));
- len = 0;
+ len = 0;
space->s = space2;
field_array = &space2->elts[0];
- for (x = fieldlist; x; x = TREE_CHAIN (x))
- {
- field_array[len++] = x;
-
- /* If there is anonymous struct or union, break out of the loop. */
- if (DECL_NAME (x) == NULL)
- break;
- }
- /* Found no anonymous struct/union. Add the TYPE_LANG_SPECIFIC. */
- if (x == NULL)
- {
- TYPE_LANG_SPECIFIC (t) = space;
- TYPE_LANG_SPECIFIC (t)->s->len = len;
- field_array = TYPE_LANG_SPECIFIC (t)->s->elts;
- qsort (field_array, len, sizeof (tree), field_decl_cmp);
- }
+ for (x = fieldlist; x; x = TREE_CHAIN (x))
+ {
+ field_array[len++] = x;
+
+ /* If there is anonymous struct or union, break out of the loop. */
+ if (DECL_NAME (x) == NULL)
+ break;
+ }
+ /* Found no anonymous struct/union. Add the TYPE_LANG_SPECIFIC. */
+ if (x == NULL)
+ {
+ TYPE_LANG_SPECIFIC (t) = space;
+ TYPE_LANG_SPECIFIC (t)->s->len = len;
+ field_array = TYPE_LANG_SPECIFIC (t)->s->elts;
+ qsort (field_array, len, sizeof (tree), field_decl_cmp);
+ }
}
}
|| (TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (type)))
!= char_type_node))
pedwarn ("second argument of %q+D should be %<char **%>",
- decl1);
+ decl1);
break;
case 3:
|| (TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (type)))
!= char_type_node))
pedwarn ("third argument of %q+D should probably be "
- "%<char **%>", decl1);
+ "%<char **%>", decl1);
break;
}
}
{
tree args = DECL_ARGUMENTS (fndecl);
for (; args; args = TREE_CHAIN (args))
- {
- tree type = TREE_TYPE (args);
- if (INTEGRAL_TYPE_P (type)
- && TYPE_PRECISION (type) < TYPE_PRECISION (integer_type_node))
- DECL_ARG_TYPE (args) = integer_type_node;
- }
+ {
+ tree type = TREE_TYPE (args);
+ if (INTEGRAL_TYPE_P (type)
+ && TYPE_PRECISION (type) < TYPE_PRECISION (integer_type_node))
+ DECL_ARG_TYPE (args) = integer_type_node;
+ }
}
if (DECL_INITIAL (fndecl) && DECL_INITIAL (fndecl) != error_mark_node)
&& !undef_nested_function)
{
if (!decl_function_context (fndecl))
- {
- c_genericize (fndecl);
- c_gimple_diagnostics_recursively (fndecl);
+ {
+ c_genericize (fndecl);
+ c_gimple_diagnostics_recursively (fndecl);
/* ??? Objc emits functions after finalizing the compilation unit.
This should be cleaned up later and this conditional removed. */
}
cgraph_finalize_function (fndecl, false);
- }
+ }
else
- {
- /* Register this function with cgraph just far enough to get it
- added to our parent's nested function list. Handy, since the
- C front end doesn't have such a list. */
- (void) cgraph_node (fndecl);
- }
+ {
+ /* Register this function with cgraph just far enough to get it
+ added to our parent's nested function list. Handy, since the
+ C front end doesn't have such a list. */
+ (void) cgraph_node (fndecl);
+ }
}
if (!decl_function_context (fndecl))
if (DECL_STATIC_CONSTRUCTOR (fndecl)
&& targetm.have_ctors_dtors)
targetm.asm_out.constructor (XEXP (DECL_RTL (fndecl), 0),
- DEFAULT_INIT_PRIORITY);
+ DEFAULT_INIT_PRIORITY);
if (DECL_STATIC_DESTRUCTOR (fndecl)
&& targetm.have_ctors_dtors)
targetm.asm_out.destructor (XEXP (DECL_RTL (fndecl), 0),
- DEFAULT_INIT_PRIORITY);
+ DEFAULT_INIT_PRIORITY);
}
\f
/* Check the declarations given in a for-loop for satisfying the C99
int flags;
FILE * stream = dump_begin (TDI_tu, &flags);
if (stream && tmp)
- {
- dump_node (tmp, flags & ~TDF_SLIM, stream);
- dump_end (TDI_tu, stream);
- }
+ {
+ dump_node (tmp, flags & ~TDF_SLIM, stream);
+ dump_end (TDI_tu, stream);
+ }
}
/* Process all file scopes in this compilation, and the external_scope,
{
diagnostic_info diagnostic;
va_list ap;
-
+
va_start (ap, gmsgid);
diagnostic_set_info (&diagnostic, gmsgid, &ap, input_location,
- flag_isoc99 ? pedantic_error_kind () : DK_WARNING);
+ flag_isoc99 ? pedantic_error_kind () : DK_WARNING);
report_diagnostic (&diagnostic);
va_end (ap);
}
va_start (ap, gmsgid);
diagnostic_set_info (&diagnostic, gmsgid, &ap, input_location,
- flag_isoc99 ? DK_WARNING : pedantic_error_kind ());
+ flag_isoc99 ? DK_WARNING : pedantic_error_kind ());
report_diagnostic (&diagnostic);
va_end (ap);
}
{
/* C89 conversion specifiers. */
{ "ABZab", 0, STD_C89, NOLENGTHS, "^#", "", NULL },
- { "cx", 0, STD_C89, NOLENGTHS, "E", "3", NULL },
+ { "cx", 0, STD_C89, NOLENGTHS, "E", "3", NULL },
{ "HIMSUWdmw", 0, STD_C89, NOLENGTHS, "-_0Ow", "", NULL },
{ "j", 0, STD_C89, NOLENGTHS, "-_0Ow", "o", NULL },
{ "p", 0, STD_C89, NOLENGTHS, "#", "", NULL },
{ "X", 0, STD_C89, NOLENGTHS, "E", "", NULL },
- { "y", 0, STD_C89, NOLENGTHS, "EO-_0w", "4", NULL },
+ { "y", 0, STD_C89, NOLENGTHS, "EO-_0w", "4", NULL },
{ "Y", 0, STD_C89, NOLENGTHS, "-_0EOw", "o", NULL },
{ "%", 0, STD_C89, NOLENGTHS, "", "", NULL },
/* C99 conversion specifiers. */
{ "C", 0, STD_C99, NOLENGTHS, "-_0EOw", "o", NULL },
- { "D", 0, STD_C99, NOLENGTHS, "", "2", NULL },
+ { "D", 0, STD_C99, NOLENGTHS, "", "2", NULL },
{ "eVu", 0, STD_C99, NOLENGTHS, "-_0Ow", "", NULL },
{ "FRTnrt", 0, STD_C99, NOLENGTHS, "", "", NULL },
- { "g", 0, STD_C99, NOLENGTHS, "O-_0w", "2o", NULL },
+ { "g", 0, STD_C99, NOLENGTHS, "O-_0w", "2o", NULL },
{ "G", 0, STD_C99, NOLENGTHS, "-_0Ow", "o", NULL },
{ "h", 0, STD_C99, NOLENGTHS, "^#", "", NULL },
{ "z", 0, STD_C99, NOLENGTHS, "O", "o", NULL },
/* This must be in the same order as enum format_type. */
static const format_kind_info format_types_orig[] =
{
- { "printf", printf_length_specs, print_char_table, " +#0-'I", NULL,
+ { "printf", printf_length_specs, print_char_table, " +#0-'I", NULL,
printf_flag_specs, printf_flag_pairs,
FMT_FLAG_ARG_CONVERT|FMT_FLAG_DOLLAR_MULTIPLE|FMT_FLAG_USE_DOLLAR|FMT_FLAG_EMPTY_PREC_OK,
'w', 0, 'p', 0, 'L',
&integer_type_node, &integer_type_node
},
- { "asm_fprintf", asm_fprintf_length_specs, asm_fprintf_char_table, " +#0-", NULL,
+ { "asm_fprintf", asm_fprintf_length_specs, asm_fprintf_char_table, " +#0-", NULL,
asm_fprintf_flag_specs, asm_fprintf_flag_pairs,
FMT_FLAG_ARG_CONVERT|FMT_FLAG_EMPTY_PREC_OK,
'w', 0, 'p', 0, 'L',
NULL, NULL
},
- { "gcc_diag", gcc_diag_length_specs, gcc_diag_char_table, "q+", NULL,
+ { "gcc_diag", gcc_diag_length_specs, gcc_diag_char_table, "q+", NULL,
gcc_diag_flag_specs, gcc_diag_flag_pairs,
FMT_FLAG_ARG_CONVERT,
0, 0, 'p', 0, 'L',
NULL, &integer_type_node
},
- { "gcc_tdiag", gcc_tdiag_length_specs, gcc_tdiag_char_table, "q+", NULL,
+ { "gcc_tdiag", gcc_tdiag_length_specs, gcc_tdiag_char_table, "q+", NULL,
gcc_tdiag_flag_specs, gcc_tdiag_flag_pairs,
FMT_FLAG_ARG_CONVERT,
0, 0, 'p', 0, 'L',
NULL, &integer_type_node
},
- { "gcc_cdiag", gcc_cdiag_length_specs, gcc_cdiag_char_table, "q+", NULL,
+ { "gcc_cdiag", gcc_cdiag_length_specs, gcc_cdiag_char_table, "q+", NULL,
gcc_cdiag_flag_specs, gcc_cdiag_flag_pairs,
FMT_FLAG_ARG_CONVERT,
0, 0, 'p', 0, 'L',
NULL, &integer_type_node
},
- { "gcc_cxxdiag", gcc_cxxdiag_length_specs, gcc_cxxdiag_char_table, "q+#", NULL,
+ { "gcc_cxxdiag", gcc_cxxdiag_length_specs, gcc_cxxdiag_char_table, "q+#", NULL,
gcc_cxxdiag_flag_specs, gcc_cxxdiag_flag_pairs,
FMT_FLAG_ARG_CONVERT,
0, 0, 'p', 0, 'L',
NULL, &integer_type_node
},
- { "gcc_gfc", NULL, gcc_gfc_char_table, "", NULL,
+ { "gcc_gfc", NULL, gcc_gfc_char_table, "", NULL,
NULL, gcc_gfc_flag_pairs,
FMT_FLAG_ARG_CONVERT,
0, 0, 0, 0, 0,
NULL, NULL
},
- { "scanf", scanf_length_specs, scan_char_table, "*'I", NULL,
+ { "scanf", scanf_length_specs, scan_char_table, "*'I", NULL,
scanf_flag_specs, scanf_flag_pairs,
FMT_FLAG_ARG_CONVERT|FMT_FLAG_SCANF_A_KLUDGE|FMT_FLAG_USE_DOLLAR|FMT_FLAG_ZERO_WIDTH_BAD|FMT_FLAG_DOLLAR_GAP_POINTER_OK,
'w', 0, 0, '*', 'L',
FMT_FLAG_FANCY_PERCENT_OK, 'w', 0, 0, 0, 0,
NULL, NULL
},
- { "strfmon", strfmon_length_specs, monetary_char_table, "=^+(!-", NULL,
+ { "strfmon", strfmon_length_specs, monetary_char_table, "=^+(!-", NULL,
strfmon_flag_specs, strfmon_flag_pairs,
FMT_FLAG_ARG_CONVERT, 'w', '#', 'p', 0, 'L',
NULL, NULL
{
/* Variable length arrays can't be initialized. */
gcc_assert (TREE_CODE (array_size) == INTEGER_CST);
-
+
if (host_integerp (array_size, 0))
{
HOST_WIDE_INT array_size_value = TREE_INT_CST_LOW (array_size);
++fci;
if (fci->format_chars == 0)
{
- if (ISGRAPH (format_char))
+ if (ISGRAPH (format_char))
warning (OPT_Wformat, "unknown conversion type character %qc in format",
format_char);
else
for (i = 0; fci->format_chars; i++, fci++)
if (strchr (fci->format_chars, c))
return i;
-
+
/* We shouldn't be looking for a non-existent specifier. */
gcc_unreachable ();
}
for (i = 0; fli->name; i++, fli++)
if (strchr (fli->name, c))
return i;
-
+
/* We shouldn't be looking for a non-existent modifier. */
gcc_unreachable ();
}
{
format_length_info *new_asm_fprintf_length_specs;
unsigned int i;
-
+
/* Find the underlying type for HOST_WIDE_INT. For the %w
length modifier to work, one must have issued: "typedef
HOST_WIDE_INT __gcc_host_wide_int__;" in one's source code
init_dynamic_gfc_info (void)
{
static tree locus;
-
+
if (!locus)
{
static format_char_info *gfc_fci;
sizeof (gcc_gfc_char_table),
sizeof (gcc_gfc_char_table));
if (locus)
- {
+ {
const unsigned i = find_char_info_specifier_index (gfc_fci, 'L');
gfc_fci[i].types[0].type = &locus;
gfc_fci[i].pointer_count = 1;
t = TREE_TYPE (TREE_TYPE (t));
}
}
-
+
/* Find the underlying type for HOST_WIDE_INT. For the %w
length modifier to work, one must have issued: "typedef
HOST_WIDE_INT __gcc_host_wide_int__;" in one's source code
}
}
}
-
+
/* Assign the new data for use. */
/* All the GCC diag formats use the same length specs. */
diag_ls = (format_length_info *)
xmemdup (gcc_diag_length_specs,
sizeof (gcc_diag_length_specs),
- sizeof (gcc_diag_length_specs));
+ sizeof (gcc_diag_length_specs));
if (hwi)
- {
+ {
/* HOST_WIDE_INT must be one of 'long' or 'long long'. */
i = find_length_info_modifier_index (diag_ls, 'w');
if (hwi == long_integer_type_node)
sizeof (gcc_diag_char_table),
sizeof (gcc_diag_char_table));
if (loc)
- {
+ {
i = find_char_info_specifier_index (diag_fci, 'H');
diag_fci[i].types[0].type = &loc;
diag_fci[i].pointer_count = 1;
}
if (t)
- {
+ {
i = find_char_info_specifier_index (diag_fci, 'J');
diag_fci[i].types[0].type = &t;
diag_fci[i].pointer_count = 1;
sizeof (gcc_tdiag_char_table),
sizeof (gcc_tdiag_char_table));
if (loc)
- {
+ {
i = find_char_info_specifier_index (tdiag_fci, 'H');
tdiag_fci[i].types[0].type = &loc;
tdiag_fci[i].pointer_count = 1;
}
if (t)
- {
+ {
/* All specifiers taking a tree share the same struct. */
i = find_char_info_specifier_index (tdiag_fci, 'D');
tdiag_fci[i].types[0].type = &t;
sizeof (gcc_cdiag_char_table),
sizeof (gcc_cdiag_char_table));
if (loc)
- {
+ {
i = find_char_info_specifier_index (cdiag_fci, 'H');
cdiag_fci[i].types[0].type = &loc;
cdiag_fci[i].pointer_count = 1;
}
if (t)
- {
+ {
/* All specifiers taking a tree share the same struct. */
i = find_char_info_specifier_index (cdiag_fci, 'D');
cdiag_fci[i].types[0].type = &t;
sizeof (gcc_cxxdiag_char_table),
sizeof (gcc_cxxdiag_char_table));
if (loc)
- {
+ {
i = find_char_info_specifier_index (cxxdiag_fci, 'H');
cxxdiag_fci[i].types[0].type = &loc;
cxxdiag_fci[i].pointer_count = 1;
}
if (t)
- {
+ {
/* All specifiers taking a tree share the same struct. */
i = find_char_info_specifier_index (cxxdiag_fci, 'D');
cxxdiag_fci[i].types[0].type = &t;
|| info.format_type == gcc_cxxdiag_format_type)
{
/* Our first time through, we have to make sure that our
- format_type data is allocated dynamically and is modifiable. */
+ format_type data is allocated dynamically and is modifiable. */
if (!dynamic_format_types)
format_types = dynamic_format_types = (format_kind_info *)
xmemdup (format_types_orig, sizeof (format_types_orig),
sizeof (format_types_orig));
/* If this is format __asm_fprintf__, we have to initialize
- GCC's notion of HOST_WIDE_INT for checking %wd. */
+ GCC's notion of HOST_WIDE_INT for checking %wd. */
if (info.format_type == asm_fprintf_format_type)
init_dynamic_asm_fprintf_info ();
/* If this is format __gcc_gfc__, we have to initialize GCC's
else if (info.format_type == gcc_gfc_format_type)
init_dynamic_gfc_info ();
/* If this is one of the diagnostic attributes, then we have to
- initialize 'location_t' and 'tree' at runtime. */
+ initialize 'location_t' and 'tree' at runtime. */
else if (info.format_type == gcc_diag_format_type
|| info.format_type == gcc_tdiag_format_type
|| info.format_type == gcc_cdiag_format_type
&& !warn_init_self)
TREE_NO_WARNING (DECL_EXPR_DECL (*expr_p)) = 1;
return GS_UNHANDLED;
-
+
case COMPOUND_LITERAL_EXPR:
return gimplify_compound_literal_expr (expr_p, pre_p);
/* Remove this one if it is in the system chain. */
reason = REASON_DUP_SYS;
for (tmp = system; tmp; tmp = tmp->next)
- if (INO_T_EQ (tmp->ino, cur->ino) && tmp->dev == cur->dev
- && cur->construct == tmp->construct)
+ if (INO_T_EQ (tmp->ino, cur->ino) && tmp->dev == cur->dev
+ && cur->construct == tmp->construct)
break;
if (!tmp)
/* Duplicate of something earlier in the same chain? */
reason = REASON_DUP;
for (tmp = head; tmp != cur; tmp = tmp->next)
- if (INO_T_EQ (cur->ino, tmp->ino) && cur->dev == tmp->dev
- && cur->construct == tmp->construct)
+ if (INO_T_EQ (cur->ino, tmp->ino) && cur->dev == tmp->dev
+ && cur->construct == tmp->construct)
break;
if (tmp == cur
/* Last in the chain and duplicate of JOIN? */
&& !(cur->next == NULL && join
&& INO_T_EQ (cur->ino, join->ino)
- && cur->dev == join->dev
- && cur->construct == join->construct))
+ && cur->dev == join->dev
+ && cur->construct == join->construct))
{
/* Unique, so keep this directory. */
pcur = &cur->next;
include chain. */
add_env_var_paths ("CPATH", BRACKET);
add_env_var_paths (lang_env_vars[idx], SYSTEM);
-
+
target_c_incpath.extra_pre_includes (sysroot, iprefix, stdinc);
/* Finally chain on the standard directories. */
/* Set the debug callbacks if we can use them. */
if (debug_info_level == DINFO_LEVEL_VERBOSE
&& (write_symbols == DWARF2_DEBUG
- || write_symbols == VMS_AND_DWARF2_DEBUG))
+ || write_symbols == VMS_AND_DWARF2_DEBUG))
{
cb->define = cb_define;
cb->undef = cb_undef;
if (!MAIN_FILE_P (new_map))
{
#ifdef USE_MAPPED_LOCATION
- int included_at = LAST_SOURCE_LINE_LOCATION (new_map - 1);
+ int included_at = LAST_SOURCE_LINE_LOCATION (new_map - 1);
input_location = included_at;
push_srcloc (new_map->start_location);
#else
- int included_at = LAST_SOURCE_LINE (new_map - 1);
+ int included_at = LAST_SOURCE_LINE (new_map - 1);
input_line = included_at;
push_srcloc (new_map->to_file, 1);
retry:
tok = cpp_get_token (parse_in);
type = tok->type;
-
+
retry_after_at:
#ifdef USE_MAPPED_LOCATION
*loc = tok->src_loc;
{
case CPP_PADDING:
goto retry;
-
+
case CPP_NAME:
*value = HT_IDENT_TO_GCC_IDENT (HT_NODE (tok->val.node));
break;
if (c_dialect_objc ())
{
location_t atloc = input_location;
-
+
retry_at:
tok = cpp_get_token (parse_in);
type = tok->type;
{
case CPP_PADDING:
goto retry_at;
-
+
case CPP_STRING:
case CPP_WSTRING:
type = lex_string (tok, value, true);
case CPP_PASTE:
{
unsigned char name[4];
-
+
*cpp_spell_token (parse_in, tok, name, true) = 0;
-
+
error ("stray %qs in program", name);
}
-
+
goto retry;
case CPP_OTHER:
no_more_pch = true;
c_common_no_more_pch ();
}
-
+
timevar_pop (TV_CPP);
-
+
return type;
}
for (; itk < itk_none; itk += 2 /* skip signed types */)
{
tree upper = TYPE_MAX_VALUE (integer_types[itk]);
-
+
if ((unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (upper) > high
|| ((unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (upper) == high
&& TREE_INT_CST_LOW (upper) >= low))
goto retry;
}
/* FALLTHROUGH */
-
+
default:
break;
-
+
case CPP_WSTRING:
wide = true;
/* FALLTHROUGH */
-
+
case CPP_STRING:
if (!concats)
{
gcc_obstack_init (&str_ob);
obstack_grow (&str_ob, &str, sizeof (cpp_string));
}
-
+
concats++;
obstack_grow (&str_ob, &tok->val.str, sizeof (cpp_string));
goto retry;
/* Assume that, if we managed to translate the string above,
then the untranslated parsing will always succeed. */
gcc_assert (xlated);
-
+
if (TREE_STRING_LENGTH (value) != (int) istr.len
|| 0 != strncmp (TREE_STRING_POINTER (value), (char *) istr.text,
istr.len))
case 'T':
gcc_assert (TYPE_P (t));
name = TYPE_NAME (t);
-
+
if (name && TREE_CODE (name) == TYPE_DECL)
{
if (DECL_NAME (name))
case OPT_freplace_objc_classes:
flag_replace_objc_classes = value;
break;
-
+
case OPT_frepo:
flag_use_repository = value;
if (value)
case OPT_fuse_cxa_get_exception_ptr:
flag_use_cxa_get_exception_ptr = value;
break;
-
+
case OPT_fvisibility_inlines_hidden:
visibility_options.inlines_hidden = value;
break;
this_input_filename
= cpp_read_main_file (parse_in, in_fnames[i]);
/* If an input file is missing, abandon further compilation.
- cpplib has issued a diagnostic. */
+ cpplib has issued a diagnostic. */
if (!this_input_filename)
break;
}
parameter-type-list[opt] )
direct-abstract-declarator:
- direct-abstract-declarator[opt] ( parameter-forward-declarations
+ direct-abstract-declarator[opt] ( parameter-forward-declarations
parameter-type-list[opt] )
parameter-forward-declarations:
names for the error message. The possible values for *flag_var must
fit in a 'signed char'. */
-static const struct c_pch_matching
+static const struct c_pch_matching
{
int *flag_var;
const char *flag_name;
size_t target_data_length;
};
-struct c_pch_header
+struct c_pch_header
{
unsigned long asm_size;
};
static char result[IDENT_LENGTH];
static const char template[IDENT_LENGTH] = "gpch.013";
static const char c_language_chars[] = "Co+O";
-
+
memcpy (result, template, IDENT_LENGTH);
result[4] = c_language_chars[c_language];
}
/* Prepare to write a PCH file, if one is being written. This is
- called at the start of compilation.
+ called at the start of compilation.
Also, print out the executable checksum if -fverbose-asm is in effect. */
struct c_pch_validity v;
void *target_validity;
static const char partial_pch[IDENT_LENGTH] = "gpcWrite";
-
+
#ifdef ASM_COMMENT_START
if (flag_verbose_asm)
{
fputc ('\n', asm_out_file);
}
#endif
-
+
if (!pch_file)
return;
-
+
f = fopen (pch_file, "w+b");
if (f == NULL)
fatal_error ("can%'t create precompiled header %s: %m", pch_file);
pch_outfile = f;
gcc_assert (memcmp (executable_checksum, no_checksum, 16) != 0);
-
+
v.debug_info_type = write_symbols;
{
size_t i;
}
v.pch_init = &pch_init;
target_validity = targetm.get_pch_validity (&v.target_data_length);
-
+
if (fwrite (partial_pch, IDENT_LENGTH, 1, f) != 1
|| fwrite (executable_checksum, 16, 1, f) != 1
|| fwrite (&v, sizeof (v), 1, f) != 1
if (asm_file_name == NULL
|| strcmp (asm_file_name, "-") == 0)
fatal_error ("%qs is not a valid output file", asm_file_name);
-
+
asm_file_startpos = ftell (asm_out_file);
-
+
/* Let the debugging format deal with the PCHness. */
(*debug_hooks->handle_pch) (0);
-
+
cpp_save_state (parse_in, f);
}
asm_file_end = ftell (asm_out_file);
h.asm_size = asm_file_end - asm_file_startpos;
-
+
if (fwrite (&h, sizeof (h), 1, pch_outfile) != 1)
fatal_error ("can%'t write %s: %m", pch_file);
-
+
buf = XNEWVEC (char, 16384);
if (fseek (asm_out_file, asm_file_startpos, SEEK_SET) != 0)
name);
return 2;
}
-
+
pch_ident = get_ident();
if (memcmp (ident, pch_ident, IDENT_LENGTH) != 0)
{
if (memcmp (ident, pch_ident, 5) == 0)
/* It's a PCH, for the right language, but has the wrong version.
*/
- cpp_error (pfile, CPP_DL_WARNING,
+ cpp_error (pfile, CPP_DL_WARNING,
"%s: not compatible with this GCC version", name);
else if (memcmp (ident, pch_ident, 4) == 0)
/* It's a PCH for the wrong language. */
cpp_error (pfile, CPP_DL_WARNING, "%s: not for %s", name,
lang_hooks.name);
- else
+ else
/* Not any kind of PCH. */
cpp_error (pfile, CPP_DL_WARNING, "%s: not a PCH file", name);
}
&& write_symbols != NO_DEBUG)
{
if (cpp_get_options (pfile)->warn_invalid_pch)
- cpp_error (pfile, CPP_DL_WARNING,
+ cpp_error (pfile, CPP_DL_WARNING,
"%s: created with -g%s, but used with -g%s", name,
debug_type_names[v.debug_info_type],
debug_type_names[write_symbols]);
if (*pch_matching[i].flag_var != v.match[i])
{
if (cpp_get_options (pfile)->warn_invalid_pch)
- cpp_error (pfile, CPP_DL_WARNING,
+ cpp_error (pfile, CPP_DL_WARNING,
"%s: settings for %s do not match", name,
pch_matching[i].flag_name);
return 2;
/* If the text segment was not loaded at the same address as it was
when the PCH file was created, function pointers loaded from the
PCH will not be valid. We could in theory remap all the function
- pointers, but no support for that exists at present.
+ pointers, but no support for that exists at present.
Since we have the same executable, it should only be necessary to
check one function. */
if (v.pch_init != &pch_init)
{
if (cpp_get_options (pfile)->warn_invalid_pch)
- cpp_error (pfile, CPP_DL_WARNING,
+ cpp_error (pfile, CPP_DL_WARNING,
"%s: had text segment at different address", name);
return 2;
}
{
void *this_file_data = xmalloc (v.target_data_length);
const char *msg;
-
+
if ((size_t) read (fd, this_file_data, v.target_data_length)
!= v.target_data_length)
fatal_error ("can%'t read %s: %m", name);
/* Check the preprocessor macros are the same as when the PCH was
generated. */
-
+
result = cpp_valid_state (pfile, name, fd);
if (result == -1)
return 2;
FILE *f;
struct c_pch_header h;
struct save_macro_data *smd;
-
+
f = fdopen (fd, "rb");
if (f == NULL)
{
return;
fclose (f);
-
+
/* Give the front end a chance to take action after a PCH file has
been loaded. */
if (lang_post_pch_load)
fd = open (name, O_RDONLY | O_BINARY, 0666);
if (fd == -1)
fatal_error ("%s: couldn%'t open PCH file: %m", name);
-
+
if (c_common_valid_pch (pfile, name, fd) != 1)
{
if (!cpp_get_options (pfile)->warn_invalid_pch)
inform ("use -Winvalid-pch for more information");
fatal_error ("%s: PCH file was invalid", name);
}
-
+
c_common_read_pch (pfile, name, fd, name);
-
+
close (fd);
}
int fd, const char *orig_name ATTRIBUTE_UNUSED)
{
c_common_read_pch (pfile, name, fd, orig_name);
-
+
fprintf (print.outf, "#pragma GCC pch_preprocess \"%s\"\n", name);
print.src_line++;
}
typedef struct align_stack GTY(())
{
- int alignment;
- tree id;
+ int alignment;
+ tree id;
struct align_stack * prev;
} align_stack;
#ifdef HANDLE_PRAGMA_PACK_PUSH_POP
/* If we have a "global" #pragma pack(<n>) in effect when the first
- #pragma pack(push,<n>) is encountered, this stores the value of
- maximum_field_alignment in effect. When the final pop_alignment()
+ #pragma pack(push,<n>) is encountered, this stores the value of
+ maximum_field_alignment in effect. When the final pop_alignment()
happens, we restore the value to this, not to a value of 0 for
maximum_field_alignment. Value is in bits. */
static int default_alignment;
entry = GGC_NEW (align_stack);
entry->alignment = alignment;
- entry->id = id;
- entry->prev = alignment_stack;
-
- /* The current value of maximum_field_alignment is not necessarily
- 0 since there may be a #pragma pack(<n>) in effect; remember it
+ entry->id = id;
+ entry->prev = alignment_stack;
+
+ /* The current value of maximum_field_alignment is not necessarily
+ 0 since there may be a #pragma pack(<n>) in effect; remember it
so that we can restore it after the final #pragma pop(). */
if (alignment_stack == NULL)
default_alignment = maximum_field_alignment;
-
+
alignment_stack = entry;
maximum_field_alignment = alignment;
pop_alignment (tree id)
{
align_stack * entry;
-
+
if (alignment_stack == NULL)
GCC_BAD ("#pragma pack (pop) encountered without matching #pragma pack (push)");
/* #pragma pack ()
#pragma pack (N)
-
+
#pragma pack (push)
#pragma pack (push, N)
#pragma pack (push, ID)
{
case set: SET_GLOBAL_ALIGNMENT (align); break;
case push: push_alignment (align, id); break;
- case pop: pop_alignment (id); break;
+ case pop: pop_alignment (id); break;
}
}
#endif /* HANDLE_PRAGMA_PACK */
&& !DECL_WEAK (decl) /* Don't complain about a redundant #pragma. */
&& TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
warning (OPT_Wpragmas, "applying #pragma weak %q+D after first use "
- "results in unspecified behavior", decl);
+ "results in unspecified behavior", decl);
declare_weak (decl);
}
"conflict with previous #pragma redefine_extname");
return;
}
-
+
pending_redefine_extname
= tree_cons (oldname, newname, pending_redefine_extname);
}
*p = TREE_CHAIN (t);
/* If we already have an asmname, #pragma redefine_extname is
- ignored (with a warning if it conflicts). */
+ ignored (with a warning if it conflicts). */
if (asmname)
{
if (strcmp (TREE_STRING_POINTER (asmname),
const char *id = IDENTIFIER_POINTER (DECL_NAME (decl));
size_t ilen = IDENTIFIER_LENGTH (DECL_NAME (decl));
-
+
char *newname = (char *) alloca (plen + ilen + 1);
memcpy (newname, prefix, plen);
else if (!strcmp (str, "internal"))
default_visibility = VISIBILITY_INTERNAL;
else if (!strcmp (str, "hidden"))
- default_visibility = VISIBILITY_HIDDEN;
+ default_visibility = VISIBILITY_HIDDEN;
else if (!strcmp (str, "protected"))
default_visibility = VISIBILITY_PROTECTED;
else
default_visibility = VEC_pop (visibility, visstack);
visibility_options.inpragma
= VEC_length (visibility, visstack) != 0;
-}
+}
/* Sets the default visibility for symbols to something other than that
specified on the command line. */
tree x;
enum cpp_ttype token;
enum { bad, push, pop } action = bad;
-
+
token = pragma_lex (&x);
if (token == CPP_NAME)
{
const char *op = IDENTIFIER_POINTER (x);
if (!strcmp (op, "push"))
- action = push;
+ action = push;
else if (!strcmp (op, "pop"))
- action = pop;
+ action = pop;
}
if (bad == action)
GCC_BAD ("#pragma GCC visibility must be followed by push or pop");
else
{
if (pop == action)
- {
- if (!VEC_length (visibility, visstack))
+ {
+ if (!VEC_length (visibility, visstack))
GCC_BAD ("no matching push for %<#pragma GCC visibility pop%>");
- else
+ else
pop_visibility ();
- }
+ }
else
- {
- if (pragma_lex (&x) != CPP_OPEN_PAREN)
- GCC_BAD ("missing %<(%> after %<#pragma GCC visibility push%> - ignored");
- token = pragma_lex (&x);
- if (token != CPP_NAME)
+ {
+ if (pragma_lex (&x) != CPP_OPEN_PAREN)
+ GCC_BAD ("missing %<(%> after %<#pragma GCC visibility push%> - ignored");
+ token = pragma_lex (&x);
+ if (token != CPP_NAME)
GCC_BAD ("malformed #pragma GCC visibility push");
- else
+ else
push_visibility (IDENTIFIER_POINTER (x));
- if (pragma_lex (&x) != CPP_CLOSE_PAREN)
- GCC_BAD ("missing %<(%> after %<#pragma GCC visibility push%> - ignored");
- }
+ if (pragma_lex (&x) != CPP_CLOSE_PAREN)
+ GCC_BAD ("missing %<(%> after %<#pragma GCC visibility push%> - ignored");
+ }
}
if (pragma_lex (&x) != CPP_EOF)
warning (OPT_Wpragmas, "junk at end of %<#pragma GCC visibility%>");
having enum cpp_ttype declared. */
extern enum cpp_ttype c_lex_with_flags (tree *, location_t *, unsigned char *);
-/* If 1, then lex strings into the execution character set.
+/* If 1, then lex strings into the execution character set.
If 0, lex strings into the host character set.
If -1, lex both, and chain them together, such that the former
is the TREE_CHAIN of the latter. */
{
tree pointee = strip_pointer_operator (TREE_TYPE (t));
if (TREE_CODE (pointee) != ARRAY_TYPE
- && TREE_CODE (pointee) != FUNCTION_TYPE)
- pp_c_whitespace (pp);
+ && TREE_CODE (pointee) != FUNCTION_TYPE)
+ pp_c_whitespace (pp);
}
}
/* It is easier to handle C++ reference types here. */
case REFERENCE_TYPE:
if (TREE_CODE (TREE_TYPE (t)) == POINTER_TYPE)
- pp_c_pointer (pp, TREE_TYPE (t));
+ pp_c_pointer (pp, TREE_TYPE (t));
if (TREE_CODE (t) == POINTER_TYPE)
- pp_c_star (pp);
+ pp_c_star (pp);
else
- pp_c_ampersand (pp);
+ pp_c_ampersand (pp);
pp_c_type_qualifier_list (pp, t);
break;
case REFERENCE_TYPE:
case POINTER_TYPE:
{
- /* Get the types-specifier of this type. */
- tree pointee = strip_pointer_operator (TREE_TYPE (t));
- pp_c_specifier_qualifier_list (pp, pointee);
- if (TREE_CODE (pointee) == ARRAY_TYPE
- || TREE_CODE (pointee) == FUNCTION_TYPE)
- {
- pp_c_whitespace (pp);
- pp_c_left_paren (pp);
- }
+ /* Get the types-specifier of this type. */
+ tree pointee = strip_pointer_operator (TREE_TYPE (t));
+ pp_c_specifier_qualifier_list (pp, pointee);
+ if (TREE_CODE (pointee) == ARRAY_TYPE
+ || TREE_CODE (pointee) == FUNCTION_TYPE)
+ {
+ pp_c_whitespace (pp);
+ pp_c_left_paren (pp);
+ }
else if (!c_dialect_cxx ())
pp_c_whitespace (pp);
- pp_ptr_operator (pp, t);
+ pp_ptr_operator (pp, t);
}
break;
case COMPLEX_TYPE:
pp_c_specifier_qualifier_list (pp, TREE_TYPE (t));
if (code == COMPLEX_TYPE)
- pp_c_identifier (pp, flag_isoc99 ? "_Complex" : "__complex__");
+ pp_c_identifier (pp, flag_isoc99 ? "_Complex" : "__complex__");
else if (code == VECTOR_TYPE)
- pp_c_identifier (pp, "__vector__");
+ pp_c_identifier (pp, "__vector__");
break;
default:
{
bool first = true;
for ( ; parms && parms != void_list_node; parms = TREE_CHAIN (parms))
- {
- if (!first)
- pp_separate_with (pp, ',');
- first = false;
- pp_declaration_specifiers
- (pp, want_parm_decl ? parms : TREE_VALUE (parms));
- if (want_parm_decl)
- pp_declarator (pp, parms);
- else
- pp_abstract_declarator (pp, TREE_VALUE (parms));
- }
+ {
+ if (!first)
+ pp_separate_with (pp, ',');
+ first = false;
+ pp_declaration_specifiers
+ (pp, want_parm_decl ? parms : TREE_VALUE (parms));
+ if (want_parm_decl)
+ pp_declarator (pp, parms);
+ else
+ pp_abstract_declarator (pp, TREE_VALUE (parms));
+ }
}
pp_c_right_paren (pp);
}
if (TREE_CODE (t) == POINTER_TYPE)
{
if (TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE
- || TREE_CODE (TREE_TYPE (t)) == FUNCTION_TYPE)
- pp_c_right_paren (pp);
+ || TREE_CODE (TREE_TYPE (t)) == FUNCTION_TYPE)
+ pp_c_right_paren (pp);
t = TREE_TYPE (t);
}
else if (DECL_P (t))
{
if (DECL_REGISTER (t))
- pp_c_identifier (pp, "register");
+ pp_c_identifier (pp, "register");
else if (TREE_STATIC (t) && TREE_CODE (t) == VAR_DECL)
- pp_c_identifier (pp, "static");
+ pp_c_identifier (pp, "static");
}
}
pp_c_space_for_pointer_operator (pp, TREE_TYPE (TREE_TYPE (t)));
pp_c_tree_decl_identifier (pp, t);
if (pp_c_base (pp)->flags & pp_c_flag_abstract)
- pp_abstract_declarator (pp, TREE_TYPE (t));
+ pp_abstract_declarator (pp, TREE_TYPE (t));
else
- {
- pp_parameter_list (pp, t);
- pp_abstract_declarator (pp, TREE_TYPE (TREE_TYPE (t)));
- }
+ {
+ pp_parameter_list (pp, t);
+ pp_abstract_declarator (pp, TREE_TYPE (TREE_TYPE (t)));
+ }
break;
case INTEGER_TYPE:
{
pp_tree_identifier (pp, TREE_PURPOSE (attributes));
if (TREE_VALUE (attributes))
- pp_c_call_argument_list (pp, TREE_VALUE (attributes));
+ pp_c_call_argument_list (pp, TREE_VALUE (attributes));
if (TREE_CHAIN (attributes))
pp_separate_with (pp, ',');
else
{
if (tree_int_cst_sgn (i) < 0)
- {
- pp_character (pp, '-');
- i = build_int_cst_wide (NULL_TREE,
+ {
+ pp_character (pp, '-');
+ i = build_int_cst_wide (NULL_TREE,
-TREE_INT_CST_LOW (i),
~TREE_INT_CST_HIGH (i)
+ !TREE_INT_CST_LOW (i));
- }
+ }
sprintf (pp_buffer (pp)->digit_buffer,
- HOST_WIDE_INT_PRINT_DOUBLE_HEX,
- TREE_INT_CST_HIGH (i), TREE_INT_CST_LOW (i));
+ HOST_WIDE_INT_PRINT_DOUBLE_HEX,
+ TREE_INT_CST_HIGH (i), TREE_INT_CST_LOW (i));
pp_string (pp, pp_buffer (pp)->digit_buffer);
}
if (TYPE_UNSIGNED (type))
if (type == long_integer_type_node || type == long_unsigned_type_node)
pp_character (pp, 'l');
else if (type == long_long_integer_type_node
- || type == long_long_unsigned_type_node)
+ || type == long_long_unsigned_type_node)
pp_string (pp, "ll");
}
{
case INTEGER_CST:
{
- tree type = TREE_TYPE (e);
- if (type == boolean_type_node)
- pp_c_bool_constant (pp, e);
- else if (type == char_type_node)
- pp_c_character_constant (pp, e);
- else if (TREE_CODE (type) == ENUMERAL_TYPE
- && pp_c_enumeration_constant (pp, e))
- ;
- else
- pp_c_integer_constant (pp, e);
+ tree type = TREE_TYPE (e);
+ if (type == boolean_type_node)
+ pp_c_bool_constant (pp, e);
+ else if (type == char_type_node)
+ pp_c_character_constant (pp, e);
+ else if (TREE_CODE (type) == ENUMERAL_TYPE
+ && pp_c_enumeration_constant (pp, e))
+ ;
+ else
+ pp_c_integer_constant (pp, e);
}
break;
{
tree init = DECL_INITIAL (t);
/* This C++ bit is handled here because it is easier to do so.
- In templates, the C++ parser builds a TREE_LIST for a
- direct-initialization; the TREE_PURPOSE is the variable to
- initialize and the TREE_VALUE is the initializer. */
+ In templates, the C++ parser builds a TREE_LIST for a
+ direct-initialization; the TREE_PURPOSE is the variable to
+ initialize and the TREE_VALUE is the initializer. */
if (TREE_CODE (init) == TREE_LIST)
- {
- pp_c_left_paren (pp);
- pp_expression (pp, TREE_VALUE (init));
- pp_right_paren (pp);
- }
+ {
+ pp_c_left_paren (pp);
+ pp_expression (pp, TREE_VALUE (init));
+ pp_right_paren (pp);
+ }
else
- {
- pp_space (pp);
- pp_equal (pp);
- pp_space (pp);
- pp_c_initializer (pp, init);
- }
+ {
+ pp_space (pp);
+ pp_equal (pp);
+ pp_space (pp);
+ pp_c_initializer (pp, init);
+ }
}
}
case UNION_TYPE:
case ARRAY_TYPE:
{
- tree init = TREE_OPERAND (e, 0);
- for (; init != NULL_TREE; init = TREE_CHAIN (init))
- {
- if (code == RECORD_TYPE || code == UNION_TYPE)
- {
- pp_c_dot (pp);
- pp_c_primary_expression (pp, TREE_PURPOSE (init));
- }
- else
- {
- pp_c_left_bracket (pp);
- if (TREE_PURPOSE (init))
- pp_c_constant (pp, TREE_PURPOSE (init));
- pp_c_right_bracket (pp);
- }
- pp_c_whitespace (pp);
- pp_equal (pp);
- pp_c_whitespace (pp);
- pp_initializer (pp, TREE_VALUE (init));
- if (TREE_CHAIN (init))
- pp_separate_with (pp, ',');
- }
+ tree init = TREE_OPERAND (e, 0);
+ for (; init != NULL_TREE; init = TREE_CHAIN (init))
+ {
+ if (code == RECORD_TYPE || code == UNION_TYPE)
+ {
+ pp_c_dot (pp);
+ pp_c_primary_expression (pp, TREE_PURPOSE (init));
+ }
+ else
+ {
+ pp_c_left_bracket (pp);
+ if (TREE_PURPOSE (init))
+ pp_c_constant (pp, TREE_PURPOSE (init));
+ pp_c_right_bracket (pp);
+ }
+ pp_c_whitespace (pp);
+ pp_equal (pp);
+ pp_c_whitespace (pp);
+ pp_initializer (pp, TREE_VALUE (init));
+ if (TREE_CHAIN (init))
+ pp_separate_with (pp, ',');
+ }
}
return;
case VECTOR_TYPE:
if (TREE_CODE (e) == VECTOR_CST)
- pp_c_expression_list (pp, TREE_VECTOR_CST_ELTS (e));
+ pp_c_expression_list (pp, TREE_VECTOR_CST_ELTS (e));
else if (TREE_CODE (e) == CONSTRUCTOR)
- pp_c_constructor_elts (pp, CONSTRUCTOR_ELTS (e));
+ pp_c_constructor_elts (pp, CONSTRUCTOR_ELTS (e));
else
- break;
+ break;
return;
case COMPLEX_TYPE:
case ADDR_EXPR:
if (TREE_CODE (TREE_OPERAND (e, 0)) == FUNCTION_DECL)
- {
- pp_c_id_expression (pp, TREE_OPERAND (e, 0));
- break;
- }
+ {
+ pp_c_id_expression (pp, TREE_OPERAND (e, 0));
+ break;
+ }
/* else fall through. */
default:
typedef enum
{
pp_c_flag_abstract = 1 << 1,
- pp_c_flag_last_bit = 2
+ pp_c_flag_last_bit = 2
} pp_c_pretty_print_flags;
int *offset_list;
pp_flags flags;
-
+
/* These must be overridden by each of the C and C++ front-end to
reflect their understanding of syntactic productions when they differ. */
c_pretty_print_fn declaration;
cur_stmt_list = chain;
/* If the statement list is completely empty, just return it. This is
- just as good small as build_empty_stmt, with the advantage that
+ just as good small as build_empty_stmt, with the advantage that
statement lists are merged when they appended to one another. So
using the STATEMENT_LIST avoids pathological buildup of EMPTY_STMT_P
statements. */
{
tree t = va_arg (p, tree);
if (t && !TYPE_P (t))
- side_effects |= TREE_SIDE_EFFECTS (t);
+ side_effects |= TREE_SIDE_EFFECTS (t);
TREE_OPERAND (ret, i) = t;
}
/* Build expressions with type checking for C compiler.
Copyright (C) 1987, 1988, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006
+ 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006
Free Software Foundation, Inc.
This file is part of GCC.
{
/* Preserve unsignedness if not really getting any wider. */
if (TYPE_UNSIGNED (type)
- && (TYPE_PRECISION (type) == TYPE_PRECISION (integer_type_node)))
- return unsigned_type_node;
+ && (TYPE_PRECISION (type) == TYPE_PRECISION (integer_type_node)))
+ return unsigned_type_node;
return integer_type_node;
}
/* We should not have any type quals on arrays at all. */
gcc_assert (!TYPE_QUALS (t1) && !TYPE_QUALS (t2));
-
+
d1_zero = d1 == 0 || !TYPE_MAX_VALUE (d1);
d2_zero = d2 == 0 || !TYPE_MAX_VALUE (d2);
if (elt == TREE_TYPE (t2) && TYPE_DOMAIN (t2)
&& (d1_variable || d1_zero || !d2_variable))
return build_type_attribute_variant (t2, attributes);
-
+
if (elt == TREE_TYPE (t1) && !TYPE_DOMAIN (t2) && !TYPE_DOMAIN (t1))
return build_type_attribute_variant (t1, attributes);
if (elt == TREE_TYPE (t2) && !TYPE_DOMAIN (t2) && !TYPE_DOMAIN (t1))
return build_type_attribute_variant (t2, attributes);
-
+
/* Merge the element types, and have a size if either arg has
one. We may have qualifiers on the element types. To set
up TYPE_MAIN_VARIANT correctly, we need to form the
return t1;
gcc_assert (TREE_CODE (t1) == POINTER_TYPE
- && TREE_CODE (t2) == POINTER_TYPE);
+ && TREE_CODE (t2) == POINTER_TYPE);
/* Merge the attributes. */
attributes = targetm.merge_type_attributes (t1, t2);
if (TYPE_UNSIGNED (t1) || TYPE_UNSIGNED (t2))
return long_long_unsigned_type_node;
else
- return long_long_integer_type_node;
+ return long_long_integer_type_node;
}
if (TYPE_MAIN_VARIANT (t1) == long_unsigned_type_node
val = comptypes_internal (type1, type2);
free_all_tagged_tu_seen_up_to (tagged_tu_seen_base1);
-
+
return val;
-}\f
+}
+\f
/* Return 1 if TYPE1 and TYPE2 are compatible types for assignment
or various other operations. Return 2 if they are compatible
but a warning may be needed if you use them together. This
|| !tree_int_cst_equal (TYPE_MAX_VALUE (d1), TYPE_MAX_VALUE (d2)))
val = 0;
- break;
+ break;
}
case ENUMERAL_TYPE:
case RECORD_TYPE:
case UNION_TYPE:
if (val != 1 && !same_translation_unit_p (t1, t2))
- {
+ {
if (attrval != 2)
return tagged_types_tu_compatible_p (t1, t2);
val = tagged_types_tu_compatible_p (t1, t2);
tu->next = tagged_tu_seen_base;
tu->t1 = t1;
tu->t2 = t2;
-
+
tagged_tu_seen_base = tu;
-
+
/* The C standard says that two structures in different translation
units are compatible with each other only if the types of their
fields are compatible (among other things). We assume that they
case ENUMERAL_TYPE:
{
struct tagged_tu_seen_cache *tu = alloc_tagged_tu_seen_cache (t1, t2);
- /* Speed up the case where the type values are in the same order. */
- tree tv1 = TYPE_VALUES (t1);
- tree tv2 = TYPE_VALUES (t2);
+ /* Speed up the case where the type values are in the same order. */
+ tree tv1 = TYPE_VALUES (t1);
+ tree tv2 = TYPE_VALUES (t2);
- if (tv1 == tv2)
+ if (tv1 == tv2)
{
return 1;
}
- for (;tv1 && tv2; tv1 = TREE_CHAIN (tv1), tv2 = TREE_CHAIN (tv2))
- {
- if (TREE_PURPOSE (tv1) != TREE_PURPOSE (tv2))
- break;
- if (simple_cst_equal (TREE_VALUE (tv1), TREE_VALUE (tv2)) != 1)
+ for (;tv1 && tv2; tv1 = TREE_CHAIN (tv1), tv2 = TREE_CHAIN (tv2))
+ {
+ if (TREE_PURPOSE (tv1) != TREE_PURPOSE (tv2))
+ break;
+ if (simple_cst_equal (TREE_VALUE (tv1), TREE_VALUE (tv2)) != 1)
{
- tu->val = 0;
+ tu->val = 0;
return 0;
}
- }
+ }
- if (tv1 == NULL_TREE && tv2 == NULL_TREE)
+ if (tv1 == NULL_TREE && tv2 == NULL_TREE)
{
return 1;
}
- if (tv1 == NULL_TREE || tv2 == NULL_TREE)
+ if (tv1 == NULL_TREE || tv2 == NULL_TREE)
{
tu->val = 0;
return 0;
tu->val = 0;
return 0;
}
-
+
/* Speed up the common case where the fields are in the same order. */
for (s1 = TYPE_FIELDS (t1), s2 = TYPE_FIELDS (t2); s1 && s2;
s1 = TREE_CHAIN (s1), s2 = TREE_CHAIN (s2))
{
int result;
-
-
+
+
if (DECL_NAME (s1) == NULL
- || DECL_NAME (s1) != DECL_NAME (s2))
+ || DECL_NAME (s1) != DECL_NAME (s2))
break;
result = comptypes_internal (TREE_TYPE (s1), TREE_TYPE (s2));
if (result == 0)
case RECORD_TYPE:
{
- struct tagged_tu_seen_cache *tu = alloc_tagged_tu_seen_cache (t1, t2);
+ struct tagged_tu_seen_cache *tu = alloc_tagged_tu_seen_cache (t1, t2);
for (s1 = TYPE_FIELDS (t1), s2 = TYPE_FIELDS (t2);
s1 && s2;
type = TYPE_MAIN_VARIANT (type);
rval = build4 (ARRAY_REF, type, array, index, NULL_TREE, NULL_TREE);
/* Array ref is const/volatile if the array elements are
- or if the array is. */
+ or if the array is. */
TREE_READONLY (rval)
|= (TYPE_READONLY (TREE_TYPE (TREE_TYPE (array)))
| TREE_READONLY (array));
if (require_constant_value)
{
result = fold_build3_initializer (CALL_EXPR, TREE_TYPE (fntype),
- function, coerced_params, NULL_TREE);
+ function, coerced_params, NULL_TREE);
if (TREE_CONSTANT (result)
&& (name == NULL_TREE
}
else
result = fold_build3 (CALL_EXPR, TREE_TYPE (fntype),
- function, coerced_params, NULL_TREE);
+ function, coerced_params, NULL_TREE);
if (VOID_TYPE_P (TREE_TYPE (result)))
return result;
else if (type != TREE_TYPE (val)
&& (type == dfloat32_type_node
|| type == dfloat64_type_node
- || type == dfloat128_type_node
+ || type == dfloat128_type_node
|| TREE_TYPE (val) == dfloat32_type_node
|| TREE_TYPE (val) == dfloat64_type_node
|| TREE_TYPE (val) == dfloat128_type_node)
- && (formal_prec
+ && (formal_prec
<= TYPE_PRECISION (TREE_TYPE (val))
|| (type == dfloat128_type_node
&& (TREE_TYPE (val)
- != dfloat64_type_node
- && (TREE_TYPE (val)
+ != dfloat64_type_node
+ && (TREE_TYPE (val)
!= dfloat32_type_node)))
|| (type == dfloat64_type_node
&& (TREE_TYPE (val)
result = tree_cons (NULL_TREE, parmval, result);
}
else if (TREE_CODE (TREE_TYPE (val)) == REAL_TYPE
- && (TYPE_PRECISION (TREE_TYPE (val))
- < TYPE_PRECISION (double_type_node))
+ && (TYPE_PRECISION (TREE_TYPE (val))
+ < TYPE_PRECISION (double_type_node))
&& !DECIMAL_FLOAT_MODE_P (TYPE_MODE (TREE_TYPE (val))))
/* Convert `float' to `double'. */
result = tree_cons (NULL_TREE, convert (double_type_node, val), result);
- else if ((invalid_func_diag =
- targetm.calls.invalid_arg_for_unprototyped_fn (typelist, fundecl, val)))
+ else if ((invalid_func_diag =
+ targetm.calls.invalid_arg_for_unprototyped_fn (typelist, fundecl, val)))
{
error (invalid_func_diag);
- return error_mark_node;
+ return error_mark_node;
}
else
/* Convert `short' and `char' to full-size `int'. */
&& typecode != INTEGER_TYPE && typecode != REAL_TYPE)
{
if (code == PREINCREMENT_EXPR || code == POSTINCREMENT_EXPR)
- error ("wrong type argument to increment");
- else
- error ("wrong type argument to decrement");
+ error ("wrong type argument to increment");
+ else
+ error ("wrong type argument to decrement");
return error_mark_node;
}
else if ((pedantic || warn_pointer_arith)
&& (TREE_CODE (TREE_TYPE (result_type)) == FUNCTION_TYPE
|| TREE_CODE (TREE_TYPE (result_type)) == VOID_TYPE))
- {
+ {
if (code == PREINCREMENT_EXPR || code == POSTINCREMENT_EXPR)
pedwarn ("wrong type argument to increment");
else
argtype = TREE_TYPE (arg);
/* If the lvalue is const or volatile, merge that into the type
- to which the address will point. Note that you can't get a
+ to which the address will point. Note that you can't get a
restricted pointer by taking the address of something, so we
only have to deal with `const' and `volatile' here. */
if ((DECL_P (arg) || REFERENCE_CLASS_P (arg))
if (argtype == 0)
argtype = TREE_TYPE (arg);
return require_constant_value ? fold_build1_initializer (code, argtype, arg)
- : fold_build1 (code, argtype, arg);
+ : fold_build1 (code, argtype, arg);
}
/* Return nonzero if REF is an lvalue valid for this language.
ensures that all the format strings are checked at compile
time. */
#define READONLY_MSG(A, I, D, AS) (use == lv_assign ? (A) \
- : (use == lv_increment ? (I) \
+ : (use == lv_increment ? (I) \
: (use == lv_decrement ? (D) : (AS))))
if (TREE_CODE (arg) == COMPONENT_REF)
{
result_type = TYPE_MAIN_VARIANT (type1);
}
else if ((code1 == INTEGER_TYPE || code1 == REAL_TYPE
- || code1 == COMPLEX_TYPE)
- && (code2 == INTEGER_TYPE || code2 == REAL_TYPE
- || code2 == COMPLEX_TYPE))
+ || code1 == COMPLEX_TYPE)
+ && (code2 == INTEGER_TYPE || code2 == REAL_TYPE
+ || code2 == COMPLEX_TYPE))
{
result_type = c_common_type (type1, type2);
if (!TREE_SIDE_EFFECTS (expr1))
{
/* The left-hand operand of a comma expression is like an expression
- statement: with -Wextra or -Wunused, we should warn if it doesn't have
+ statement: with -Wextra or -Wunused, we should warn if it doesn't have
any side-effects, unless it was explicitly cast to (void). */
if (warn_unused_value)
{
if (TREE_CODE (type) == INTEGER_TYPE
&& TREE_CODE (otype) == POINTER_TYPE
&& TYPE_PRECISION (type) != TYPE_PRECISION (otype))
- /* Unlike conversion of integers to pointers, where the
- warning is disabled for converting constants because
- of cases such as SIG_*, warn about converting constant
- pointers to integers. In some cases it may cause unwanted
+ /* Unlike conversion of integers to pointers, where the
+ warning is disabled for converting constants because
+ of cases such as SIG_*, warn about converting constant
+ pointers to integers. In some cases it may cause unwanted
sign extension, and a warning is appropriate. */
warning (OPT_Wpointer_to_int_cast,
"cast from pointer to integer of different size");
return build_c_cast (type, expr);
}
-
\f
/* Build an assignment expression of lvalue LHS from value RHS.
MODIFYCODE is the code for a binary operator that we use
}
/* Some types can interconvert without explicit casts. */
else if (codel == VECTOR_TYPE && coder == VECTOR_TYPE
- && vector_types_convertible_p (type, TREE_TYPE (rhs)))
+ && vector_types_convertible_p (type, TREE_TYPE (rhs)))
return convert (type, rhs);
/* Arithmetic types all interconvert, and enum is treated like int. */
else if ((codel == INTEGER_TYPE || codel == REAL_TYPE
mvr = TYPE_MAIN_VARIANT (mvr);
/* Opaque pointers are treated like void pointers. */
is_opaque_pointer = (targetm.vector_opaque_p (type)
- || targetm.vector_opaque_p (rhstype))
- && TREE_CODE (ttl) == VECTOR_TYPE
- && TREE_CODE (ttr) == VECTOR_TYPE;
-
+ || targetm.vector_opaque_p (rhstype))
+ && TREE_CODE (ttl) == VECTOR_TYPE
+ && TREE_CODE (ttr) == VECTOR_TYPE;
+
/* C++ does not allow the implicit conversion void* -> T*. However,
- for the purpose of reducing the number of false positives, we
- tolerate the special case of
+ for the purpose of reducing the number of false positives, we
+ tolerate the special case of
- int *p = NULL;
+ int *p = NULL;
- where NULL is typically defined in C to be '(void *) 0'. */
+ where NULL is typically defined in C to be '(void *) 0'. */
if (VOID_TYPE_P (ttr) && rhs != null_pointer_node && !VOID_TYPE_P (ttl))
- warning (OPT_Wc___compat, "request for implicit conversion from "
- "%qT to %qT not permitted in C++", rhstype, type);
+ warning (OPT_Wc___compat, "request for implicit conversion from "
+ "%qT to %qT not permitted in C++", rhstype, type);
/* Check if the right-hand side has a format attribute but the
left-hand side doesn't. */
if (warn_missing_format_attribute
&& check_missing_format_attribute (type, rhstype))
- {
+ {
switch (errtype)
{
case ic_argpass:
gcc_unreachable ();
}
}
-
+
/* Any non-function converts to a [const][volatile] void *
and vice versa; otherwise, targets must be the same.
Meanwhile, the lhs target must have all the qualifiers of the rhs. */
else
{
gcc_assert (!TYPE_SIZE (constructor_type));
-
+
if (constructor_depth > 2)
error_init ("initialization of flexible array member in a nested context");
else if (pedantic)
else if (bitpos == HOST_BITS_PER_WIDE_INT)
{
if (val[1] < 0)
- val[0] = -1;
+ val[0] = -1;
}
else if (val[0] & (((HOST_WIDE_INT) 1)
<< (bitpos - 1 - HOST_BITS_PER_WIDE_INT)))
&& integer_zerop (constructor_unfilled_index))
{
if (constructor_stack->replacement_value.value)
- error_init ("excess elements in char array initializer");
+ error_init ("excess elements in char array initializer");
constructor_stack->replacement_value = value;
return;
}
{
/* For a record, keep track of end position of last field. */
if (DECL_SIZE (constructor_fields))
- constructor_bit_index
+ constructor_bit_index
= size_binop (PLUS_EXPR,
- bit_position (constructor_fields),
- DECL_SIZE (constructor_fields));
+ bit_position (constructor_fields),
+ DECL_SIZE (constructor_fields));
/* If the current field was the first one not yet written out,
it isn't now, so update. */
{
tree elttype = TYPE_MAIN_VARIANT (TREE_TYPE (constructor_type));
- /* Do a basic check of initializer size. Note that vectors
- always have a fixed size derived from their type. */
+ /* Do a basic check of initializer size. Note that vectors
+ always have a fixed size derived from their type. */
if (tree_int_cst_lt (constructor_max_index, constructor_index))
{
pedwarn_init ("excess elements in vector initializer");
output = error_mark_node;
}
else
- output = error_mark_node;
+ output = error_mark_node;
TREE_VALUE (tail) = output;
}
inner = TREE_OPERAND (inner, 0);
while (REFERENCE_CLASS_P (inner)
- && TREE_CODE (inner) != INDIRECT_REF)
+ && TREE_CODE (inner) != INDIRECT_REF)
inner = TREE_OPERAND (inner, 0);
if (DECL_P (inner)
else
{
tree top = build1 (LABEL_EXPR, void_type_node, NULL_TREE);
-
+
/* If we have an exit condition, then we build an IF with gotos either
- out of the loop, or to the top of it. If there's no exit condition,
- then we just build a jump back to the top. */
+ out of the loop, or to the top of it. If there's no exit condition,
+ then we just build a jump back to the top. */
exit = build_and_jump (&LABEL_EXPR_LABEL (top));
-
+
if (cond && !integer_nonzerop (cond))
- {
- /* Canonicalize the loop condition to the end. This means
- generating a branch to the loop condition. Reuse the
- continue label, if possible. */
- if (cond_is_first)
- {
- if (incr || !clab)
- {
- entry = build1 (LABEL_EXPR, void_type_node, NULL_TREE);
- t = build_and_jump (&LABEL_EXPR_LABEL (entry));
- }
- else
- t = build1 (GOTO_EXPR, void_type_node, clab);
+ {
+ /* Canonicalize the loop condition to the end. This means
+ generating a branch to the loop condition. Reuse the
+ continue label, if possible. */
+ if (cond_is_first)
+ {
+ if (incr || !clab)
+ {
+ entry = build1 (LABEL_EXPR, void_type_node, NULL_TREE);
+ t = build_and_jump (&LABEL_EXPR_LABEL (entry));
+ }
+ else
+ t = build1 (GOTO_EXPR, void_type_node, clab);
SET_EXPR_LOCATION (t, start_locus);
- add_stmt (t);
- }
-
+ add_stmt (t);
+ }
+
t = build_and_jump (&blab);
- exit = fold_build3 (COND_EXPR, void_type_node, cond, exit, t);
+ exit = fold_build3 (COND_EXPR, void_type_node, cond, exit, t);
if (cond_is_first)
- SET_EXPR_LOCATION (exit, start_locus);
+ SET_EXPR_LOCATION (exit, start_locus);
else
- SET_EXPR_LOCATION (exit, input_location);
- }
-
+ SET_EXPR_LOCATION (exit, input_location);
+ }
+
add_stmt (top);
}
-
+
if (body)
add_stmt (body);
if (clab)
if (is_break)
error ("break statement not within loop or switch");
else
- error ("continue statement not within a loop");
+ error ("continue statement not within a loop");
return NULL_TREE;
case 1:
else if (code0 == POINTER_TYPE && null_pointer_constant_p (orig_op1))
{
if (TREE_CODE (op0) == ADDR_EXPR
- && DECL_P (TREE_OPERAND (op0, 0))
+ && DECL_P (TREE_OPERAND (op0, 0))
&& !DECL_WEAK (TREE_OPERAND (op0, 0)))
warning (OPT_Walways_true, "the address of %qD will never be NULL",
TREE_OPERAND (op0, 0));
}
else if (code1 == POINTER_TYPE && null_pointer_constant_p (orig_op0))
{
- if (TREE_CODE (op1) == ADDR_EXPR
+ if (TREE_CODE (op1) == ADDR_EXPR
&& DECL_P (TREE_OPERAND (op1, 0))
&& !DECL_WEAK (TREE_OPERAND (op1, 0)))
warning (OPT_Walways_true, "the address of %qD will never be NULL",
all the values of the unsigned type. */
if (!TYPE_UNSIGNED (result_type))
/* OK */;
- /* Do not warn if both operands are the same signedness. */
- else if (op0_signed == op1_signed)
- /* OK */;
+ /* Do not warn if both operands are the same signedness. */
+ else if (op0_signed == op1_signed)
+ /* OK */;
else
{
tree sop, uop;
else
return expr;
}
-
\f
/* Like c_begin_compound_stmt, except force the retention of the BLOCK. */
; the terms of the GNU General Public License as published by the Free
; Software Foundation; either version 2, or (at your option) any later
; version.
-;
+;
; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
; WARRANTY; without even the implied warranty of MERCHANTABILITY or
; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
; for more details.
-;
+;
; You should have received a copy of the GNU General Public License
; along with GCC; see the file COPYING. If not, write to the Free
; Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
-ftabstop=<number> Distance between tab stops for column reporting
ftemplate-depth-
-C++ ObjC++ Joined RejectNegative UInteger
+C++ ObjC++ Joined RejectNegative UInteger
-ftemplate-depth-<number> Specify maximum template instantiation depth
fthis-is-variable
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
for (mode = 0 ; mode < MAX_MACHINE_MODE; mode++)
if (HARD_REGNO_MODE_OK (i, mode))
- {
+ {
int ok;
/* Update the register number and modes of the register
reg_restore_code[i][mode] = recog_memoized (restinsn);
/* Now extract both insns and see if we can meet their
- constraints. */
+ constraints. */
ok = (reg_save_code[i][mode] != -1
&& reg_restore_code[i][mode] != -1);
if (ok)
reg_save_code[i][mode] = -1;
reg_restore_code[i][mode] = -1;
}
- }
+ }
else
{
reg_save_code[i][mode] = -1;
registers from the live sets, and observe REG_UNUSED notes. */
COPY_REG_SET (&new->live_throughout, &chain->live_throughout);
/* Registers that are set in CHAIN->INSN live in the new insn.
- (Unless there is a REG_UNUSED note for them, but we don't
+ (Unless there is a REG_UNUSED note for them, but we don't
look for them here.) */
note_stores (PATTERN (chain->insn), add_stored_regs,
&new->live_throughout);
if (ecf_flags & ECF_RETURNS_TWICE)
{
REG_NOTES (call_insn) = gen_rtx_EXPR_LIST (REG_SETJMP, const0_rtx,
- REG_NOTES (call_insn));
+ REG_NOTES (call_insn));
current_function_calls_setjmp = 1;
}
/* Exclude functions not at the file scope, or not `extern',
since they are not the magic functions we would otherwise
think they are.
- FIXME: this should be handled with attributes, not with this
- hacky imitation of DECL_ASSEMBLER_NAME. It's (also) wrong
- because you can declare fork() inside a function if you
- wish. */
+ FIXME: this should be handled with attributes, not with this
+ hacky imitation of DECL_ASSEMBLER_NAME. It's (also) wrong
+ because you can declare fork() inside a function if you
+ wish. */
&& (DECL_CONTEXT (fndecl) == NULL_TREE
|| TREE_CODE (DECL_CONTEXT (fndecl)) == TRANSLATION_UNIT_DECL)
&& TREE_PUBLIC (fndecl))
into a sibcall. */
|| !targetm.function_ok_for_sibcall (fndecl, exp)
/* Functions that do not return exactly once may not be sibcall
- optimized. */
+ optimized. */
|| (flags & (ECF_RETURNS_TWICE | ECF_NORETURN))
|| TYPE_VOLATILE (TREE_TYPE (TREE_TYPE (addr)))
/* If the called function is nested in the current one, it might access
- some of the caller's arguments, but could clobber them beforehand if
- the argument areas are shared. */
+ some of the caller's arguments, but could clobber them beforehand if
+ the argument areas are shared. */
|| (fndecl && decl_function_context (fndecl) == current_function_decl)
/* If this function requires more stack slots than the current
function, we cannot change it into a sibling call.
old_stack_allocated = stack_pointer_delta - pending_stack_adjust;
/* The argument block when performing a sibling call is the
- incoming argument block. */
+ incoming argument block. */
if (pass == 0)
{
argblock = virtual_incoming_args_rtx;
rtx insn;
bool failed = valreg == 0 || GET_CODE (valreg) == PARALLEL;
- insns = get_insns ();
+ insns = get_insns ();
/* Expansion of block moves possibly introduced a loop that may
not appear inside libcall block. */
int unsignedp = TYPE_UNSIGNED (type);
int offset = 0;
enum machine_mode pmode;
-
+
pmode = promote_mode (type, TYPE_MODE (type), &unsignedp, 1);
/* If we don't promote as expected, something is wrong. */
gcc_assert (GET_MODE (target) == pmode);
-
+
if ((WORDS_BIG_ENDIAN || BYTES_BIG_ENDIAN)
&& (GET_MODE_SIZE (GET_MODE (target))
> GET_MODE_SIZE (TYPE_MODE (type))))
tree type = TREE_TYPE (TREE_VALUE (p));
if (type && TREE_CODE (type) == COMPLEX_TYPE
&& targetm.calls.split_complex_arg (type))
- goto found;
+ goto found;
}
return values;
tree type = TREE_VALUE (p);
if (TREE_CODE (type) == COMPLEX_TYPE
&& targetm.calls.split_complex_arg (type))
- goto found;
+ goto found;
}
return types;
if (mem_value && struct_value == 0 && ! pcc_struct_value)
{
rtx addr = XEXP (mem_value, 0);
-
+
nargs++;
/* Make sure it is a reasonable operand for a move or push insn. */
locate_and_pad_parm (Pmode, NULL_TREE,
#ifdef STACK_PARMS_IN_REG_PARM_AREA
- 1,
+ 1,
#else
argvec[count].reg != 0,
#endif
needed = 0;
/* We must be careful to use virtual regs before they're instantiated,
- and real regs afterwards. Loop optimization, for example, can create
+ and real regs afterwards. Loop optimization, for example, can create
new libcalls after we've instantiated the virtual regs, and if we
use virtuals anyway, they won't match the rtl patterns. */
{
argvec[argnum].save_area
= assign_stack_temp (BLKmode,
- argvec[argnum].locate.size.constant,
+ argvec[argnum].locate.size.constant,
0);
emit_block_move (validize_mem (argvec[argnum].save_area),
- stack_area,
+ stack_area,
GEN_INT (argvec[argnum].locate.size.constant),
BLOCK_OP_CALL_PARM);
}
auto-increment causes confusion. So we merely indicate
that we access something with a known mode somewhere on
the stack. */
- use = gen_rtx_PLUS (Pmode, virtual_outgoing_args_rtx,
+ use = gen_rtx_PLUS (Pmode, virtual_outgoing_args_rtx,
gen_rtx_SCRATCH (Pmode));
use = gen_rtx_MEM (argvec[argnum].mode, use);
use = gen_rtx_USE (VOIDmode, use);
if (save_mode == BLKmode)
emit_block_move (stack_area,
- validize_mem (argvec[count].save_area),
+ validize_mem (argvec[count].save_area),
GEN_INT (argvec[count].locate.size.constant),
BLOCK_OP_CALL_PARM);
else
/* Being passed entirely in a register. We shouldn't be called in
this case. */
gcc_assert (reg == 0 || partial != 0);
-
+
/* If this arg needs special alignment, don't load the registers
here. */
if (arg->n_aligned_regs != 0)
else
/* Check whether AUX data are still allocated. */
gcc_assert (!first_block_aux_obj);
-
+
first_block_aux_obj = obstack_alloc (&block_aux_obstack, 0);
if (size)
{
/* An edge originally destinating BB of FREQUENCY and COUNT has been proved to
leave the block by TAKEN_EDGE. Update profile of BB such that edge E can be
- redirected to destination of TAKEN_EDGE.
+ redirected to destination of TAKEN_EDGE.
This function may leave the profile inconsistent in the case TAKEN_EDGE
frequency or count is believed to be lower than FREQUENCY or COUNT
by NUM/DEN, in gcov_type arithmetic. More accurate than previous
function but considerably slower. */
void
-scale_bbs_frequencies_gcov_type (basic_block *bbs, int nbbs, gcov_type num,
- gcov_type den)
+scale_bbs_frequencies_gcov_type (basic_block *bbs, int nbbs, gcov_type num,
+ gcov_type den)
{
int i;
edge e;
edge_iterator ei;
lbb = st[--sp];
if (reverse)
- {
+ {
FOR_EACH_EDGE (e, ei, lbb->preds)
if (!VISITED_P (e->src) && predicate (e->src, data))
{
- gcc_assert (tv != rslt_max);
- rslt[tv++] = st[sp++] = e->src;
- MARK_VISITED (e->src);
+ gcc_assert (tv != rslt_max);
+ rslt[tv++] = st[sp++] = e->src;
+ MARK_VISITED (e->src);
}
- }
+ }
else
- {
+ {
FOR_EACH_EDGE (e, ei, lbb->succs)
if (!VISITED_P (e->dest) && predicate (e->dest, data))
{
- gcc_assert (tv != rslt_max);
- rslt[tv++] = st[sp++] = e->dest;
- MARK_VISITED (e->dest);
+ gcc_assert (tv != rslt_max);
+ rslt[tv++] = st[sp++] = e->dest;
+ MARK_VISITED (e->dest);
}
}
}
/* Compute dominance frontiers, ala Harvey, Ferrante, et al.
-
+
This algorithm can be found in Timothy Harvey's PhD thesis, at
http://www.cs.rice.edu/~harv/dissertation.pdf in the section on iterative
dominance algorithms.
First, we identify each join point, j (any node with more than one
- incoming edge is a join point).
+ incoming edge is a join point).
We then examine each predecessor, p, of j and walk up the dominator tree
- starting at p.
-
+ starting at p.
+
We stop the walk when we reach j's immediate dominator - j is in the
dominance frontier of each of the nodes in the walk, except for j's
immediate dominator. Intuitively, all of the rest of j's dominators are
shared by j's predecessors as well.
Since they dominate j, they will not have j in their dominance frontiers.
- The number of nodes touched by this algorithm is equal to the size
+ The number of nodes touched by this algorithm is equal to the size
of the dominance frontiers, no more, no less.
*/
basic_block domsb;
if (runner == ENTRY_BLOCK_PTR)
continue;
-
+
domsb = get_immediate_dominator (CDI_DOMINATORS, b);
while (runner != domsb)
{
- bitmap_set_bit (frontiers[runner->index],
+ bitmap_set_bit (frontiers[runner->index],
b->index);
runner = get_immediate_dominator (CDI_DOMINATORS,
runner);
}
}
}
-}
-
+}
+
void
compute_dominance_frontiers (bitmap *frontiers)
Available functionality:
- CFG construction
- find_basic_blocks */
+ find_basic_blocks */
\f
#include "config.h"
#include "system.h"
case BARRIER:
/* It is nonsense to reach barrier when looking for the
- end of basic block, but before dead code is eliminated
- this may happen. */
+ end of basic block, but before dead code is eliminated
+ this may happen. */
return false;
default:
for (insn = f; insn; insn = NEXT_INSN (insn))
{
/* Code labels and barriers causes current basic block to be
- terminated at previous real insn. */
+ terminated at previous real insn. */
if ((LABEL_P (insn) || BARRIER_P (insn))
&& saw_insn)
count++, saw_insn = false;
for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
{
if (FULL_STATE (e->dest) & BLOCK_USED_BY_TABLEJUMP)
- SET_STATE (e->dest, FULL_STATE (e->dest)
- & ~(size_t) BLOCK_USED_BY_TABLEJUMP);
+ SET_STATE (e->dest, FULL_STATE (e->dest)
+ & ~(size_t) BLOCK_USED_BY_TABLEJUMP);
else if (!(e->flags & (EDGE_ABNORMAL | EDGE_EH)))
- {
- remove_edge (e);
- continue;
- }
+ {
+ remove_edge (e);
+ continue;
+ }
ei_next (&ei);
}
}
#include "expr.h"
#define FORWARDER_BLOCK_P(BB) ((BB)->flags & BB_FORWARDER_BLOCK)
-
+
/* Set to true when we are running first pass of try_optimize_cfg loop. */
static bool first_pass;
static bool try_crossjump_to_edge (int, edge, edge);
/* If we are partitioning hot/cold basic blocks, we don't want to
mess up unconditional or indirect jumps that cross between hot
- and cold sections.
+ and cold sections.
Basic block partitioning may result in some jumps that appear to
- be optimizable (or blocks that appear to be mergeable), but which really
- must be left untouched (they are required to make it safely across
- partition boundaries). See the comments at the top of
+ be optimizable (or blocks that appear to be mergeable), but which really
+ must be left untouched (they are required to make it safely across
+ partition boundaries). See the comments at the top of
bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
if (BB_PARTITION (jump_block) != BB_PARTITION (jump_dest_block)
switch (GET_CODE (exp))
{
/* In case we do clobber the register, mark it as equal, as we know the
- value is dead so it don't have to match. */
+ value is dead so it don't have to match. */
case CLOBBER:
if (REG_P (XEXP (exp, 0)))
{
/* If we are partitioning hot/cold basic blocks, we don't want to
mess up unconditional or indirect jumps that cross between hot
- and cold sections.
-
+ and cold sections.
+
Basic block partitioning may result in some jumps that appear to
be optimizable (or blocks that appear to be mergeable), but which really m
- ust be left untouched (they are required to make it safely across
- partition boundaries). See the comments at the top of
+ ust be left untouched (they are required to make it safely across
+ partition boundaries). See the comments at the top of
bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
if (find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX))
/* Skip complex edges because we don't know how to update them.
- Still handle fallthru edges, as we can succeed to forward fallthru
- edge to the same place as the branch edge of conditional branch
- and turn conditional branch to an unconditional branch. */
+ Still handle fallthru edges, as we can succeed to forward fallthru
+ edge to the same place as the branch edge of conditional branch
+ and turn conditional branch to an unconditional branch. */
if (e->flags & EDGE_COMPLEX)
{
ei_next (&ei);
up jumps that cross between hot/cold sections.
Basic block partitioning may result in some jumps that appear
- to be optimizable (or blocks that appear to be mergeable), but which
- really must be left untouched (they are required to make it safely
+ to be optimizable (or blocks that appear to be mergeable), but which
+ really must be left untouched (they are required to make it safely
across partition boundaries). See the comments at the top of
bb-reorder.c:partition_hot_cold_basic_blocks for complete
details. */
may_thread |= target->flags & BB_DIRTY;
if (FORWARDER_BLOCK_P (target)
- && !(single_succ_edge (target)->flags & EDGE_CROSSING)
+ && !(single_succ_edge (target)->flags & EDGE_CROSSING)
&& single_succ (target) != EXIT_BLOCK_PTR)
{
/* Bypass trivial infinite loops. */
/* If we are partitioning hot/cold basic blocks, we don't want to
mess up unconditional or indirect jumps that cross between hot
and cold sections.
-
+
Basic block partitioning may result in some jumps that appear to
- be optimizable (or blocks that appear to be mergeable), but which really
- must be left untouched (they are required to make it safely across
- partition boundaries). See the comments at the top of
+ be optimizable (or blocks that appear to be mergeable), but which really
+ must be left untouched (they are required to make it safely across
+ partition boundaries). See the comments at the top of
bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
if (BB_PARTITION (a) != BB_PARTITION (b))
/* If we are partitioning hot/cold basic blocks, we don't want to
mess up unconditional or indirect jumps that cross between hot
- and cold sections.
-
+ and cold sections.
+
Basic block partitioning may result in some jumps that appear to
- be optimizable (or blocks that appear to be mergeable), but which really
- must be left untouched (they are required to make it safely across
- partition boundaries). See the comments at the top of
+ be optimizable (or blocks that appear to be mergeable), but which really
+ must be left untouched (they are required to make it safely across
+ partition boundaries). See the comments at the top of
bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
if (BB_PARTITION (a) != BB_PARTITION (b))
necessary. */
only_notes = squeeze_notes (&BB_HEAD (b), &BB_END (b));
gcc_assert (!only_notes);
-
+
/* Scramble the insn chain. */
reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a));
/* If we are partitioning hot/cold basic blocks, we don't want to
mess up unconditional or indirect jumps that cross between hot
- and cold sections.
-
+ and cold sections.
+
Basic block partitioning may result in some jumps that appear to
- be optimizable (or blocks that appear to be mergeable), but which really
- must be left untouched (they are required to make it safely across
- partition boundaries). See the comments at the top of
+ be optimizable (or blocks that appear to be mergeable), but which really
+ must be left untouched (they are required to make it safely across
+ partition boundaries). See the comments at the top of
bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
if (BB_PARTITION (b) != BB_PARTITION (c))
return NULL;
-
-
+
+
/* If B has a fallthru edge to C, no need to move anything. */
if (e->flags & EDGE_FALLTHRU)
edge_iterator ei;
/* Avoid overactive code motion, as the forwarder blocks should be
- eliminated by edge redirection instead. One exception might have
+ eliminated by edge redirection instead. One exception might have
been if B is a forwarder block and C has no fallthru edge, but
that should be cleaned up by bb-reorder instead. */
if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
if (! c_has_outgoing_fallthru)
{
merge_blocks_move_successor_nojumps (b, c);
- return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
+ return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
}
/* If B does not have an incoming fallthru, then it can be moved
MEM_ATTRS (y) = 0;
else if (! MEM_ATTRS (y))
MEM_ATTRS (x) = 0;
- else
+ else
{
rtx mem_size;
set_mem_alias_set (x, 0);
set_mem_alias_set (y, 0);
}
-
+
if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y)))
{
set_mem_expr (x, 0);
set_mem_offset (x, 0);
set_mem_offset (y, 0);
}
-
+
if (!MEM_SIZE (x))
mem_size = NULL_RTX;
else if (!MEM_SIZE (y))
set_mem_align (y, MEM_ALIGN (x));
}
}
-
+
fmt = GET_RTX_FORMAT (code);
for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
{
if (CALL_P (i1)
&& (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
- CALL_INSN_FUNCTION_USAGE (i2))
+ CALL_INSN_FUNCTION_USAGE (i2))
|| SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2)))
return false;
if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
{
/* If register stack conversion has already been done, then
- death notes must also be compared before it is certain that
- the two instruction streams match. */
+ death notes must also be compared before it is certain that
+ the two instruction streams match. */
rtx note;
HARD_REG_SET i1_regset, i2_regset;
f2 = FALLTHRU_EDGE (bb2);
/* Get around possible forwarders on fallthru edges. Other cases
- should be optimized out already. */
+ should be optimized out already. */
if (FORWARDER_BLOCK_P (f1->dest))
f1 = single_succ_edge (f1->dest);
FOR_EACH_EDGE (e1, ei, bb1->succs)
{
e2 = EDGE_SUCC (bb2, ei.index);
-
+
if (e1->flags & EDGE_EH)
nehedges1++;
newpos1 = newpos2 = NULL_RTX;
/* If we have partitioned hot/cold basic blocks, it is a bad idea
- to try this optimization.
+ to try this optimization.
Basic block partitioning may result in some jumps that appear to
- be optimizable (or blocks that appear to be mergeable), but which really
- must be left untouched (they are required to make it safely across
- partition boundaries). See the comments at the top of
+ be optimizable (or blocks that appear to be mergeable), but which really
+ must be left untouched (they are required to make it safely across
+ partition boundaries). See the comments at the top of
bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
if (flag_reorder_blocks_and_partition && no_new_pseudos)
s->count += s2->count;
/* Take care to update possible forwarder blocks. We verified
- that there is no more than one in the chain, so we can't run
- into infinite loop. */
+ that there is no more than one in the chain, so we can't run
+ into infinite loop. */
if (FORWARDER_BLOCK_P (s->dest))
{
single_succ_edge (s->dest)->count += s2->count;
/* If we are partitioning hot/cold basic blocks, we don't want to
mess up unconditional or indirect jumps that cross between hot
- and cold sections.
-
+ and cold sections.
+
Basic block partitioning may result in some jumps that appear to
- be optimizable (or blocks that appear to be mergeable), but which really
- must be left untouched (they are required to make it safely across
- partition boundaries). See the comments at the top of
+ be optimizable (or blocks that appear to be mergeable), but which really
+ must be left untouched (they are required to make it safely across
+ partition boundaries). See the comments at the top of
bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
- if (BB_PARTITION (EDGE_PRED (bb, 0)->src) !=
- BB_PARTITION (EDGE_PRED (bb, 1)->src)
+ if (BB_PARTITION (EDGE_PRED (bb, 0)->src) !=
+ BB_PARTITION (EDGE_PRED (bb, 1)->src)
|| (EDGE_PRED (bb, 0)->flags & EDGE_CROSSING))
return false;
FOR_EACH_EDGE (e, ei, bb->preds)
{
if (e->flags & EDGE_FALLTHRU)
- fallthru = e;
+ fallthru = e;
}
changed = false;
does not fit merge_blocks interface and is kept here in
hope that it will become useless once more of compiler
is transformed to use cfg_layout mode. */
-
+
if ((mode & CLEANUP_CFGLAYOUT)
&& can_merge_blocks_p (b, c))
{
PROP_DEATH_NOTES
| PROP_SCAN_DEAD_CODE
| PROP_KILL_DEAD_CODE
- | ((mode & CLEANUP_LOG_LINKS)
+ | ((mode & CLEANUP_LOG_LINKS)
? PROP_LOG_LINKS : 0)))
break;
}
struct tree_opt_pass pass_jump =
{
"sibling", /* name */
- NULL, /* gate */
- rest_of_handle_jump, /* execute */
+ NULL, /* gate */
+ rest_of_handle_jump, /* execute */
NULL, /* sub */
NULL, /* next */
0, /* static_pass_number */
if (dump_file)
dump_flow_info (dump_file, dump_flags);
cleanup_cfg ((optimize ? CLEANUP_EXPENSIVE : 0)
- | (flag_thread_jumps ? CLEANUP_THREADING : 0));
+ | (flag_thread_jumps ? CLEANUP_THREADING : 0));
purge_line_number_notes ();
struct tree_opt_pass pass_jump2 =
{
"jump", /* name */
- NULL, /* gate */
- rest_of_handle_jump2, /* execute */
+ NULL, /* gate */
+ rest_of_handle_jump2, /* execute */
NULL, /* sub */
NULL, /* next */
0, /* static_pass_number */
{
HOST_WIDE_INT align;
rtx x;
-
+
/* If this fails, we've overflowed the stack frame. Error nicely? */
gcc_assert (offset == trunc_int_for_mode (offset, Pmode));
if (DECL_RTL (stack_vars[i].decl) != pc_rtx)
continue;
- /* Check the predicate to see whether this variable should be
+ /* Check the predicate to see whether this variable should be
allocated in this pass. */
if (pred && !pred (stack_vars[i].decl))
continue;
SET_DECL_RTL (var, x);
}
-/* A subroutine of expand_one_var. VAR is a variable that will be
+/* A subroutine of expand_one_var. VAR is a variable that will be
allocated to the local stack frame. Return true if we wish to
add VAR to STACK_VARS so that it will be coalesced with other
variables. Return false to allocate VAR immediately.
/* Without optimization, *most* variables are allocated from the
stack, which makes the quadratic problem large exactly when we
- want compilation to proceed as quickly as possible. On the
+ want compilation to proceed as quickly as possible. On the
other hand, we don't want the function's stack frame size to
get completely out of hand. So we avoid adding scalars and
"small" aggregates to the list at all. */
if (stack_vars_num > 0)
{
/* Due to the way alias sets work, no variables with non-conflicting
- alias sets may be assigned the same address. Add conflicts to
+ alias sets may be assigned the same address. Add conflicts to
reflect this. */
add_alias_set_conflicts ();
- /* If stack protection is enabled, we don't share space between
+ /* If stack protection is enabled, we don't share space between
vulnerable data and non-vulnerable data. */
if (flag_stack_protect)
add_stack_protection_conflicts ();
- /* Now that we have collected all stack variables, and have computed a
+ /* Now that we have collected all stack variables, and have computed a
minimal interference graph, attempt to save some stack space. */
partition_stack_vars ();
if (dump_file)
{
/* Reorder decls to be protected by iterating over the variables
array multiple times, and allocating out of each phase in turn. */
- /* ??? We could probably integrate this into the qsort we did
+ /* ??? We could probably integrate this into the qsort we did
earlier, such that we naturally see these variables first,
and thus naturally allocate things in the right order. */
if (has_protected_decls)
update_bb_for_insn (new_bb);
maybe_dump_rtl_for_tree_stmt (stmt, last2);
-
+
if (EXPR_LOCUS (else_exp))
emit_line_note (*(EXPR_LOCUS (else_exp)));
e->dest->count -= e->count;
e->dest->frequency -= EDGE_FREQUENCY (e);
if (e->dest->count < 0)
- e->dest->count = 0;
+ e->dest->count = 0;
if (e->dest->frequency < 0)
- e->dest->frequency = 0;
+ e->dest->frequency = 0;
}
count += e->count;
probability += e->probability;
expand_expr_stmt (stmt);
/* Java emits line number notes in the top of labels.
- ??? Make this go away once line number notes are obsoleted. */
+ ??? Make this go away once line number notes are obsoleted. */
BB_HEAD (bb) = NEXT_INSN (last);
if (NOTE_P (BB_HEAD (bb)))
BB_HEAD (bb) = NEXT_INSN (BB_HEAD (bb));
e->flags &= ~EDGE_EXECUTABLE;
/* At the moment not all abnormal edges match the RTL representation.
- It is safe to remove them here as find_many_sub_basic_blocks will
- rediscover them. In the future we should get this fixed properly. */
+ It is safe to remove them here as find_many_sub_basic_blocks will
+ rediscover them. In the future we should get this fixed properly. */
if (e->flags & EDGE_ABNORMAL)
remove_edge (e);
else
FOR_EACH_EDGE (e2, ei, EXIT_BLOCK_PTR->preds)
if (e2 != e)
{
- e->count -= e2->count;
+ e->count -= e2->count;
exit_block->count -= e2->count;
exit_block->frequency -= EDGE_FREQUENCY (e2);
}
update_bb_for_insn (exit_block);
}
-/* Helper function for discover_nonconstant_array_refs.
+/* Helper function for discover_nonconstant_array_refs.
Look for ARRAY_REF nodes with non-constant indexes and mark them
addressable. */
/* If we're emitting a nested function, make sure its parent gets
emitted as well. Doing otherwise confuses debug info. */
- {
+ {
tree parent;
for (parent = DECL_CONTEXT (current_function_decl);
- parent != NULL_TREE;
- parent = get_containing_scope (parent))
+ parent != NULL_TREE;
+ parent = get_containing_scope (parent))
if (TREE_CODE (parent) == FUNCTION_DECL)
- TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (parent)) = 1;
+ TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (parent)) = 1;
}
-
+
/* We are now committed to emitting code for this function. Do any
preparation, such as emitting abstract debug info for the inline
before it gets mangled by optimization. */
struct tree_opt_pass pass_expand =
{
- "expand", /* name */
+ "expand", /* name */
NULL, /* gate */
- tree_expand_cfg, /* execute */
+ tree_expand_cfg, /* execute */
NULL, /* sub */
NULL, /* next */
0, /* static_pass_number */
- TV_EXPAND, /* tv_id */
+ TV_EXPAND, /* tv_id */
/* ??? If TER is enabled, we actually receive GENERIC. */
PROP_gimple_leh | PROP_cfg, /* properties_required */
PROP_rtl, /* properties_provided */
if (bb->count < 0)
{
error ("verify_flow_info: Wrong count of block %i %i",
- bb->index, (int)bb->count);
+ bb->index, (int)bb->count);
err = 1;
}
if (bb->frequency < 0)
{
error ("verify_flow_info: Wrong frequency of block %i %i",
- bb->index, bb->frequency);
+ bb->index, bb->frequency);
err = 1;
}
FOR_EACH_EDGE (e, ei, bb->succs)
edge e;
edge_iterator ei;
char *s_indent;
-
+
s_indent = alloca ((size_t) indent + 1);
memset (s_indent, ' ', (size_t) indent);
s_indent[indent] = '\0';
/* Return 1 if BB ends with a call, possibly followed by some
instructions that must stay with the call, 0 otherwise. */
-bool
+bool
block_ends_with_call_p (basic_block bb)
{
if (!cfg_hooks->block_ends_with_call_p)
/* Return 1 if BB ends with a conditional branch, 0 otherwise. */
-bool
+bool
block_ends_with_condjump_p (basic_block bb)
{
if (!cfg_hooks->block_ends_with_condjump_p)
flow_call_edges_add (sbitmap blocks)
{
if (!cfg_hooks->flow_call_edges_add)
- internal_error ("%s does not support flow_call_edges_add",
+ internal_error ("%s does not support flow_call_edges_add",
cfg_hooks->name);
return (cfg_hooks->flow_call_edges_add) (blocks);
cfg_hooks->execute_on_shrinking_pred (e);
}
-/* This is used inside loop versioning when we want to insert
- stmts/insns on the edges, which have a different behavior
+/* This is used inside loop versioning when we want to insert
+ stmts/insns on the edges, which have a different behavior
in tree's and in RTL, so we made a CFG hook. */
void
lv_flush_pending_stmts (edge e)
unsigned int *n_to_remove, int flags)
{
gcc_assert (cfg_hooks->cfg_hook_duplicate_loop_to_header_edge);
- return cfg_hooks->cfg_hook_duplicate_loop_to_header_edge (loop, e, loops,
+ return cfg_hooks->cfg_hook_duplicate_loop_to_header_edge (loop, e, loops,
ndupl, wont_exit,
orig, to_remove,
n_to_remove, flags);
{
gcc_assert (cfg_hooks->lv_add_condition_to_bb);
cfg_hooks->lv_add_condition_to_bb (first, second, new, cond);
-}
+}
/* Add condition to new basic block and update CFG used in loop
versioning. */
void (*lv_add_condition_to_bb) (basic_block, basic_block, basic_block,
- void *);
+ void *);
/* Update the PHI nodes in case of loop versioning. */
void (*lv_adjust_loop_header_phi) (basic_block, basic_block,
basic_block, edge);
-
+
/* Given a condition BB extract the true/false taken/not taken edges
(depending if we are on tree's or RTL). */
void (*extract_cond_bb_edges) (basic_block, edge *, edge *);
-
+
/* Add PHI arguments queued in PENDINT_STMT list on edge E to edge
E->dest (only in tree-ssa loop versioning. */
void (*flush_pending_stmts) (edge);
if (NEXT_INSN (insn)
&& JUMP_P (NEXT_INSN (insn))
&& (GET_CODE (PATTERN (NEXT_INSN (insn))) == ADDR_VEC
- || GET_CODE (PATTERN (NEXT_INSN (insn))) == ADDR_DIFF_VEC))
+ || GET_CODE (PATTERN (NEXT_INSN (insn))) == ADDR_DIFF_VEC))
{
insn = NEXT_INSN (insn);
last_insn = insn;
continue;
/* No basic blocks at all? */
gcc_assert (insn);
-
+
if (PREV_INSN (insn))
cfg_layout_function_header =
unlink_insn_chain (get_insns (), PREV_INSN (insn));
for (insn = get_insns (); insn; insn = next)
{
int active = 0;
-
+
next = NEXT_INSN (insn);
if (NOTE_P (insn))
active = (active_insn_p (insn)
&& GET_CODE (PATTERN (insn)) != ADDR_VEC
&& GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC);
-
+
check_block_change (insn, &block);
if (active
struct tree_opt_pass pass_insn_locators_initialize =
{
"locators", /* name */
- NULL, /* gate */
- insn_locators_initialize, /* execute */
+ NULL, /* gate */
+ insn_locators_initialize, /* execute */
NULL, /* sub */
NULL, /* next */
0, /* static_pass_number */
this_block = insn_scope (insn);
/* For sequences compute scope resulting from merging all scopes
- of instructions nested inside. */
+ of instructions nested inside. */
if (GET_CODE (PATTERN (insn)) == SEQUENCE)
{
int i;
{
/* If the old fallthru is still next, nothing to do. */
if (bb->aux == e_fall->dest
- || e_fall->dest == EXIT_BLOCK_PTR)
+ || e_fall->dest == EXIT_BLOCK_PTR)
continue;
/* The degenerated case of conditional jump jumping to the next
bb->aux = nb;
/* Don't process this new block. */
bb = nb;
-
+
/* Make sure new bb is tagged for correct section (same as
fall-thru source, since you cannot fall-throu across
section boundaries). */
switch (NOTE_LINE_NUMBER (insn))
{
/* In case prologue is empty and function contain label
- in first BB, we may want to copy the block. */
+ in first BB, we may want to copy the block. */
case NOTE_INSN_PROLOGUE_END:
case NOTE_INSN_DELETED:
case NOTE_INSN_EPILOGUE_BEG:
case NOTE_INSN_FUNCTION_END:
/* Debug code expect these notes to exist just once.
- Keep them in the master copy.
- ??? It probably makes more sense to duplicate them for each
- epilogue copy. */
+ Keep them in the master copy.
+ ??? It probably makes more sense to duplicate them for each
+ epilogue copy. */
case NOTE_INSN_FUNCTION_BEG:
/* There is always just single entry to function. */
case NOTE_INSN_BASIC_BLOCK:
/* All other notes should have already been eliminated.
*/
gcc_assert (NOTE_LINE_NUMBER (insn) >= 0);
-
+
/* It is possible that no_line_number is set and the note
- won't be emitted. */
+ won't be emitted. */
emit_note_copy (insn);
}
break;
Created copies of N_EDGES edges in array EDGES are stored in array NEW_EDGES,
also in the same order.
-
+
Newly created basic blocks are put after the basic block AFTER in the
instruction stream, and the order of the blocks in BBS array is preserved. */
{
edge e;
edge_iterator ei;
-
+
if (!bitmap_bit_p (visited, bb->index))
- {
- /* This basic block is now visited */
- bitmap_set_bit (visited, bb->index);
- blocks[i++] = bb;
- }
-
+ {
+ /* This basic block is now visited */
+ bitmap_set_bit (visited, bb->index);
+ blocks[i++] = bb;
+ }
+
FOR_EACH_EDGE (e, ei, bb->succs)
- {
- if (flow_bb_inside_loop_p (loop, e->dest))
- {
- if (!bitmap_bit_p (visited, e->dest->index))
- {
- bitmap_set_bit (visited, e->dest->index);
- blocks[i++] = e->dest;
- }
- }
- }
-
+ {
+ if (flow_bb_inside_loop_p (loop, e->dest))
+ {
+ if (!bitmap_bit_p (visited, e->dest->index))
+ {
+ bitmap_set_bit (visited, e->dest->index);
+ blocks[i++] = e->dest;
+ }
+ }
+ }
+
gcc_assert (i >= vc);
-
+
bb = blocks[vc++];
}
-
+
BITMAP_FREE (visited);
return blocks;
}
for (i = 0; i < loops->num; i++)
{
if (!loops->parray[i])
- continue;
+ continue;
if (loops->parray[i]->num_nodes != sizes[i])
{
If first_special is true, the value in the first iteration is
delta + mult * base
-
+
If extend = UNKNOWN, first_special must be false, delta 0, mult 1 and value is
subreg_{mode} (base + i * step)
{
if (qt)
qt[tick] = v;
- g->vertices[v].post = tick++;
+ g->vertices[v].post = tick++;
if (!top)
break;
for parts of cycles that only "pass" through some loop -- i.e. for
each cycle, we want to mark blocks that belong directly to innermost
loop containing the whole cycle.
-
+
LOOPS is the loop tree. */
#define LOOP_REPR(LOOP) ((LOOP)->num + last_basic_block)
FOR_BB_BETWEEN (act, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
FOR_EACH_EDGE (e, ei, act->succs)
{
- /* Ignore edges to exit. */
- if (e->dest == EXIT_BLOCK_PTR)
+ /* Ignore edges to exit. */
+ if (e->dest == EXIT_BLOCK_PTR)
continue;
/* And latch edges. */
count_in += e->count;
if (count_in == 0)
- expected = count_latch * 2;
+ expected = count_latch * 2;
else
- expected = (count_latch + count_in - 1) / count_in;
+ expected = (count_latch + count_in - 1) / count_in;
/* Avoid overflows. */
return (expected > REG_BR_PROB_BASE ? REG_BR_PROB_BASE : expected);
target_res_regs = 3;
/* These are really just heuristic values. */
-
+
start_sequence ();
emit_move_insn (reg1, reg2);
seq = get_insns ();
{
basic_block bb;
edge e;
-
+
if (loops->num <= 1)
return;
Returns newly created loop. */
struct loop *
-loopify (struct loops *loops, edge latch_edge, edge header_edge,
+loopify (struct loops *loops, edge latch_edge, edge header_edge,
basic_block switch_bb, edge true_edge, edge false_edge,
bool redirect_all_edges)
{
if (redirect_all_edges)
{
loop_redirect_edge (header_edge, switch_bb);
- loop_redirect_edge (false_edge, loop->header);
-
+ loop_redirect_edge (false_edge, loop->header);
+
/* Update dominators. */
set_immediate_dominator (CDI_DOMINATORS, switch_bb, pred_bb);
set_immediate_dominator (CDI_DOMINATORS, loop->header, switch_bb);
{
outer = loop->outer;
if (!fix_loop_placement (loop))
- break;
+ break;
/* Changing the placement of a loop in the loop tree may alter the
validity of condition 2) of the description of fix_bb_placement
edge snd;
gcc_assert (EDGE_COUNT (src->succs) > 1);
-
+
/* Cannot handle more than two exit edges. */
if (EDGE_COUNT (src->succs) > 2)
return false;
return false;
single_succ_edge (src)->flags &= ~EDGE_IRREDUCIBLE_LOOP;
single_succ_edge (src)->flags |= irr;
-
+
return true;
}
ret = can_copy_bbs_p (bbs, loop->num_nodes);
free (bbs);
-
+
return ret;
}
: prob_pass_thru;
/* Complete peeling is special as the probability of exit in last
- copy becomes 1. */
+ copy becomes 1. */
if (flags & DLTHE_FLAG_COMPLETTE_PEEL)
{
int wanted_freq = EDGE_FREQUENCY (e);
/* Now simulate the duplication adjustments and compute header
frequency of the last copy. */
for (i = 0; i < ndupl; i++)
- wanted_freq = RDIV (wanted_freq * scale_step[i], REG_BR_PROB_BASE);
+ wanted_freq = RDIV (wanted_freq * scale_step[i], REG_BR_PROB_BASE);
scale_main = RDIV (wanted_freq * REG_BR_PROB_BASE, freq_in);
}
else if (is_latch)
}
free (new_bbs);
free (orig_loops);
-
+
/* Update the original loop. */
if (!is_latch)
set_immediate_dominator (CDI_DOMINATORS, e->dest, e->src);
continue;
dom_bb = nearest_common_dominator (
CDI_DOMINATORS, first_active[i], first_active_latch);
- set_immediate_dominator (CDI_DOMINATORS, dominated, dom_bb);
+ set_immediate_dominator (CDI_DOMINATORS, dominated, dom_bb);
}
free (dom_bbs);
}
Split it and insert new conditional expression and adjust edges.
--- edge e ---> [cond expr] ---> [first_head]
- |
- +---------> [second_head]
+ |
+ +---------> [second_head]
*/
static basic_block
}
/* Main entry point for Loop Versioning transformation.
-
+
This transformation given a condition and a loop, creates
-if (condition) { loop_copy1 } else { loop_copy2 },
where loop_copy1 is the loop transformed in one way, and loop_copy2
instruction stream, otherwise it is placed before LOOP. */
struct loop *
-loop_version (struct loops *loops, struct loop * loop,
+loop_version (struct loops *loops, struct loop * loop,
void *cond_expr, basic_block *condition_bb,
bool place_after)
{
entry = loop_preheader_edge (loop);
irred_flag = entry->flags & EDGE_IRREDUCIBLE_LOOP;
entry->flags &= ~EDGE_IRREDUCIBLE_LOOP;
-
+
/* Note down head of loop as first_head. */
first_head = entry->dest;
/* Duplicate loop. */
if (!cfg_hook_duplicate_loop_to_header_edge (loop, entry, loops, 1,
- NULL, NULL, NULL, NULL, 0))
+ NULL, NULL, NULL, NULL, 0))
return NULL;
/* After duplication entry edge now points to new loop head block.
}
latch_edge = single_succ_edge (get_bb_copy (loop->latch));
-
+
extract_cond_bb_edges (cond_bb, &true_edge, &false_edge);
nloop = loopify (loops,
latch_edge,
if (exit)
nloop->single_exit = find_edge (get_bb_copy (exit->src), exit->dest);
- /* loopify redirected latch_edge. Update its PENDING_STMTS. */
+ /* loopify redirected latch_edge. Update its PENDING_STMTS. */
lv_flush_pending_stmts (latch_edge);
- /* loopify redirected condition_bb's succ edge. Update its PENDING_STMTS. */
+ /* loopify redirected condition_bb's succ edge. Update its PENDING_STMTS. */
extract_cond_bb_edges (cond_bb, &true_edge, &false_edge);
lv_flush_pending_stmts (false_edge);
/* Adjust irreducible flag. */
free (bbs);
}
- /* At this point condition_bb is loop predheader with two successors,
- first_head and second_head. Make sure that loop predheader has only
+ /* At this point condition_bb is loop predheader with two successors,
+ first_head and second_head. Make sure that loop predheader has only
one successor. */
loop_split_edge_with (loop_preheader_edge (loop), NULL);
loop_split_edge_with (loop_preheader_edge (nloop), NULL);
to be correct). But still for the remaining loops the header dominates
the latch, and loops did not get new subloobs (new loops might possibly
get created, but we are not interested in them). Fix up the mess.
-
+
If CHANGED_BBS is not NULL, basic blocks whose loop has changed are
marked in it. */
}
/* Remove the dead loops from structures. */
- loops->tree_root->num_nodes = n_basic_blocks;
+ loops->tree_root->num_nodes = n_basic_blocks;
for (i = 1; i < loops->num; i++)
{
loop = loops->parray[i];
if (LABEL_P (insn))
{
/* Some labels can't be directly removed from the INSN chain, as they
- might be references via variables, constant pool etc.
- Convert them to the special NOTE_INSN_DELETED_LABEL note. */
+ might be references via variables, constant pool etc.
+ Convert them to the special NOTE_INSN_DELETED_LABEL note. */
if (! can_delete_label_p (insn))
{
const char *name = LABEL_NAME (insn);
rtx
entry_of_function (void)
{
- return (n_basic_blocks > NUM_FIXED_BLOCKS ?
+ return (n_basic_blocks > NUM_FIXED_BLOCKS ?
BB_HEAD (ENTRY_BLOCK_PTR->next_bb) : get_insns ());
}
/* This might have been an EH label that no longer has incoming
EH edges. Update data structures to match. */
maybe_remove_eh_handler (b_head);
-
+
/* Detect basic blocks with nothing but a label. This can happen
in particular at the end of a function. */
if (b_head == b_end)
and cold sections.
Basic block partitioning may result in some jumps that appear to
- be optimizable (or blocks that appear to be mergeable), but which really
- must be left untouched (they are required to make it safely across
- partition boundaries). See the comments at the top of
+ be optimizable (or blocks that appear to be mergeable), but which really
+ must be left untouched (they are required to make it safely across
+ partition boundaries). See the comments at the top of
bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
if (BB_PARTITION (a) != BB_PARTITION (b))
and cold sections.
Basic block partitioning may result in some jumps that appear to
- be optimizable (or blocks that appear to be mergeable), but which really
- must be left untouched (they are required to make it safely across
- partition boundaries). See the comments at the top of
+ be optimizable (or blocks that appear to be mergeable), but which really
+ must be left untouched (they are required to make it safely across
+ partition boundaries). See the comments at the top of
bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
-
+
if (find_reg_note (insn, REG_CROSSING_JUMP, NULL_RTX)
|| BB_PARTITION (src) != BB_PARTITION (target))
return NULL;
{
rtx insn = src->il.rtl->footer;
- delete_insn_chain (kill_from, BB_END (src));
+ delete_insn_chain (kill_from, BB_END (src));
/* Remove barriers but keep jumptables. */
while (insn)
}
}
else
- delete_insn_chain (kill_from, PREV_INSN (BB_HEAD (target)));
+ delete_insn_chain (kill_from, PREV_INSN (BB_HEAD (target)));
}
/* If this already is simplejump, redirect it. */
redirected = redirect_jump (BB_END (e->src), block_label (target), 0);
gcc_assert (redirected);
-
+
note = find_reg_note (BB_END (e->src), REG_BR_PROB, NULL_RTX);
if (note)
{
edge tmp;
edge_iterator ei;
bool found = false;
-
+
basic_block bb = create_basic_block (BB_HEAD (e->dest), NULL, ENTRY_BLOCK_PTR);
-
+
/* Change the existing edge's source to be the new block, and add
a new edge from the entry block to the new block. */
e->src = bb;
else
ei_next (&ei);
}
-
+
gcc_assert (found);
-
+
VEC_safe_push (edge, gc, bb->succs, e);
make_single_succ_edge (ENTRY_BLOCK_PTR, bb, EDGE_FALLTHRU);
}
NULL_RTX,
REG_NOTES
(BB_END
- (jump_block)));
-
+ (jump_block)));
+
/* Wire edge in. */
new_edge = make_edge (e->src, jump_block, EDGE_FALLTHRU);
new_edge->probability = e->probability;
if (!before && !after)
{
/* Figure out where to put these things. If the destination has
- one predecessor, insert there. Except for the exit block. */
+ one predecessor, insert there. Except for the exit block. */
if (single_pred_p (e->dest) && e->dest != EXIT_BLOCK_PTR)
{
bb = e->dest;
}
/* If the source has one successor and the edge is not abnormal,
- insert there. Except for the entry block. */
+ insert there. Except for the entry block. */
else if ((e->flags & EDGE_ABNORMAL) == 0
&& single_succ_p (e->src)
&& e->src != ENTRY_BLOCK_PTR)
if (JUMP_P (BB_END (bb))
&& !any_condjump_p (BB_END (bb))
- && (single_succ_edge (bb)->flags & EDGE_CROSSING))
- REG_NOTES (BB_END (bb)) = gen_rtx_EXPR_LIST
+ && (single_succ_edge (bb)->flags & EDGE_CROSSING))
+ REG_NOTES (BB_END (bb)) = gen_rtx_EXPR_LIST
(REG_CROSSING_JUMP, NULL_RTX, REG_NOTES (BB_END (bb)));
}
}
if (returnjump_p (last))
{
/* ??? Remove all outgoing edges from BB and add one for EXIT.
- This is not currently a problem because this only happens
- for the (single) epilogue, which already has a fallthru edge
- to EXIT. */
+ This is not currently a problem because this only happens
+ for the (single) epilogue, which already has a fallthru edge
+ to EXIT. */
e = single_succ_edge (bb);
gcc_assert (e->dest == EXIT_BLOCK_PTR
FOR_EACH_BB (bb)
if (bb->aux)
{
- SET_BIT (blocks, bb->index);
+ SET_BIT (blocks, bb->index);
/* Check for forgotten bb->aux values before commit_edge_insertions
call. */
gcc_assert (bb->aux == &bb->aux);
FOR_EACH_BB (bb)
if (bb->aux)
{
- SET_BIT (blocks, bb->index);
+ SET_BIT (blocks, bb->index);
/* Check for forgotten bb->aux values before commit_edge_insertions
call. */
gcc_assert (bb->aux == &bb->aux);
|| (BB_PARTITION (e->src) != BB_PARTITION (e->dest)
&& e->src != ENTRY_BLOCK_PTR
&& e->dest != EXIT_BLOCK_PTR))
- {
+ {
error ("fallthru edge crosses section boundary (bb %i)",
e->src->index);
err = 1;
}
/* OK pointers are correct. Now check the header of basic
- block. It ought to contain optional CODE_LABEL followed
+ block. It ought to contain optional CODE_LABEL followed
by NOTE_BASIC_BLOCK. */
x = BB_HEAD (bb);
if (LABEL_P (x))
}
else if (e->src != ENTRY_BLOCK_PTR
&& e->dest != EXIT_BLOCK_PTR)
- {
+ {
rtx insn;
if (e->src->next_bb != e->dest)
fatal_insn ("wrong insn in the fallthru edge", insn);
err = 1;
}
- }
+ }
}
num_bb_notes = 0;
BB_END (src)))
{
edge redirected;
-
+
if (dump_file)
fprintf (dump_file, "Fallthru edge unified with branch "
"%i->%i redirected to %i\n",
redirected = redirect_branch_edge (e, dest);
gcc_assert (redirected);
e->flags |= EDGE_FALLTHRU;
- e->src->flags |= BB_DIRTY;
+ e->src->flags |= BB_DIRTY;
return e;
}
/* In case we are redirecting fallthru edge to the branch edge
- of conditional jump, remove it. */
+ of conditional jump, remove it. */
if (EDGE_COUNT (src->succs) == 2)
{
/* Find the edge that is different from E. */
and cold sections.
Basic block partitioning may result in some jumps that appear to
- be optimizable (or blocks that appear to be mergeable), but which really
- must be left untouched (they are required to make it safely across
- partition boundaries). See the comments at the top of
+ be optimizable (or blocks that appear to be mergeable), but which really
+ must be left untouched (they are required to make it safely across
+ partition boundaries). See the comments at the top of
bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
if (BB_PARTITION (a) != BB_PARTITION (b))
/* This might have been an EH label that no longer has incoming
EH edges. Update data structures to match. */
maybe_remove_eh_handler (BB_HEAD (b));
-
+
delete_insn (BB_HEAD (b));
}
rtx split_at_insn = insn;
/* Don't split the block between a call and an insn that should
- remain in the same block as the call. */
+ remain in the same block as the call. */
if (CALL_P (insn))
while (split_at_insn != BB_END (bb)
&& keep_with_call_p (NEXT_INSN (split_at_insn)))
split_at_insn = NEXT_INSN (split_at_insn);
/* The handling above of the final block before the epilogue
- should be enough to verify that there is no edge to the exit
+ should be enough to verify that there is no edge to the exit
block in CFG already. Calling make_edge in such case would
cause us to mark that edge as fake and remove it later. */
in trees, and this should be of the same type since it is a hook. */
static void
rtl_lv_add_condition_to_bb (basic_block first_head ,
- basic_block second_head ATTRIBUTE_UNUSED,
- basic_block cond_bb, void *comp_rtx)
+ basic_block second_head ATTRIBUTE_UNUSED,
+ basic_block cond_bb, void *comp_rtx)
{
rtx label, seq, jump;
rtx op0 = XEXP ((rtx)comp_rtx, 0);
NULL, /* lv_add_condition_to_bb */
NULL, /* lv_adjust_loop_header_phi*/
NULL, /* extract_cond_bb_edges */
- NULL /* flush_pending_stmts */
+ NULL /* flush_pending_stmts */
};
/* Implementation of CFG manipulation for cfg layout RTL, where
rtl_lv_add_condition_to_bb, /* lv_add_condition_to_bb */
NULL, /* lv_adjust_loop_header_phi*/
rtl_extract_cond_bb_edges, /* extract_cond_bb_edges */
- NULL /* flush_pending_stmts */
+ NULL /* flush_pending_stmts */
};
02110-1301, USA. */
/* This file contains basic routines manipulating call graph and variable pool
-
+
The callgraph:
The call-graph is data structure designed for intra-procedural optimization
not change once the declaration is inserted into the call-graph.
The call-graph nodes are created lazily using cgraph_node function when
called for unknown declaration.
-
+
When built, there is one edge for each direct call. It is possible that
the reference will be later optimized out. The call-graph is built
conservatively in order to make conservative data flow analysis possible.
Each inlined call gets a unique corresponding clone node of the callee
and the data structure is updated while inlining is performed, so
the clones are eliminated and their callee edges redirected to the
- caller.
+ caller.
Each edge has "inline_failed" field. When the field is set to NULL,
the call will be inlined. When it is non-NULL it contains a reason
struct cgraph_node *n;
/* Make the next clone be the master clone */
- for (n = new_node; n; n = n->next_clone)
+ for (n = new_node; n; n = n->next_clone)
n->master_clone = new_node;
-
+
*slot = new_node;
node->next_clone->prev_clone = NULL;
}
else
{
- htab_clear_slot (cgraph_hash, slot);
+ htab_clear_slot (cgraph_hash, slot);
kill_body = true;
}
}
{
node->prev_clone->next_clone = node->next_clone;
if (node->next_clone)
- node->next_clone->prev_clone = node->prev_clone;
+ node->next_clone->prev_clone = node->prev_clone;
}
- /* While all the clones are removed after being proceeded, the function
+ /* While all the clones are removed after being proceeded, the function
itself is kept in the cgraph even after it is compiled. Check whether
we are done with this body and reclaim it proactively if this is the case.
*/
cgraph_local_info (tree decl)
{
struct cgraph_node *node;
-
+
gcc_assert (TREE_CODE (decl) == FUNCTION_DECL);
node = cgraph_node (decl);
return &node->local;
cgraph_global_info (tree decl)
{
struct cgraph_node *node;
-
+
gcc_assert (TREE_CODE (decl) == FUNCTION_DECL && cgraph_global_info_ready);
node = cgraph_node (decl);
return &node->global;
cgraph_rtl_info (tree decl)
{
struct cgraph_node *node;
-
+
gcc_assert (TREE_CODE (decl) == FUNCTION_DECL);
node = cgraph_node (decl);
if (decl != current_function_decl
}
/* Names used to print out the availability enum. */
-static const char * const availability_names[] =
+static const char * const availability_names[] =
{"unset", "not_available", "overwrittable", "available", "local"};
/* Dump given cgraph node. */
cgraph_node_name (node->global.inlined_to),
node->global.inlined_to->uid);
if (cgraph_function_flags_ready)
- fprintf (f, " availability:%s",
+ fprintf (f, " availability:%s",
availability_names [cgraph_function_body_availability (node)]);
if (node->master_clone && node->master_clone->uid != node->uid)
fprintf (f, "(%i)", node->master_clone->uid);
if (!cgraph_varpool_hash)
cgraph_varpool_hash = htab_create_ggc (10, hash_varpool_node,
- eq_varpool_node, NULL);
+ eq_varpool_node, NULL);
key.decl = decl;
slot = (struct cgraph_varpool_node **)
htab_find_slot (cgraph_varpool_hash, &key, INSERT);
cgraph_varpool_finalize_decl (tree decl)
{
struct cgraph_varpool_node *node = cgraph_varpool_node (decl);
-
+
/* The first declaration of a variable that comes through this function
decides whether it is global (in C, has external linkage)
or local (in C, has internal linkage). So do nothing more
struct cgraph_edge *new;
new = cgraph_create_edge (n, e->callee, call_stmt,
- e->count * count_scale / REG_BR_PROB_BASE,
- e->loop_nest + loop_nest);
+ e->count * count_scale / REG_BR_PROB_BASE,
+ e->loop_nest + loop_nest);
new->inline_failed = e->inline_failed;
if (update_original)
}
/* Create node representing clone of N executed COUNT times. Decrease
- the execution counts from original node too.
+ the execution counts from original node too.
When UPDATE_ORIGINAL is true, the counts are subtracted from the original
function's profile to reflect the fact that part of execution is handled
cgraph_master_clone (struct cgraph_node *n)
{
enum availability avail = cgraph_function_body_availability (n);
-
+
if (avail == AVAIL_NOT_AVAILABLE || avail == AVAIL_OVERWRITABLE)
return NULL;
- if (!n->master_clone)
+ if (!n->master_clone)
n->master_clone = cgraph_node (n->decl);
-
+
return n->master_clone;
}
document the requirement of both versions of function (extern
inline and offline) having same side effect characteristics as
good optimization is what this optimization is about. */
-
+
else if (!(*targetm.binds_local_p) (node->decl)
&& !DECL_COMDAT (node->decl) && !DECL_EXTERNAL (node->decl))
avail = AVAIL_OVERWRITABLE;
/* Pointer to a single unique cgraph node for this function. If the
function is to be output, this is the copy that will survive. */
struct cgraph_node *master_clone;
-
+
PTR GTY ((skip)) aux;
struct cgraph_local_info local;
struct cgraph_global_info global;
struct cgraph_rtl_info rtl;
-
+
/* Expected number of executions: calculated in profile.c. */
gcov_type count;
/* Unique id of the node. */
void cgraph_node_remove_callees (struct cgraph_node *node);
struct cgraph_edge *cgraph_create_edge (struct cgraph_node *,
struct cgraph_node *,
- tree, gcov_type, int);
+ tree, gcov_type, int);
struct cgraph_node *cgraph_node (tree);
struct cgraph_node *cgraph_node_for_asm (tree asmname);
struct cgraph_edge *cgraph_edge (struct cgraph_node *, tree);
struct cgraph_rtl_info *cgraph_rtl_info (tree);
const char * cgraph_node_name (struct cgraph_node *);
struct cgraph_edge * cgraph_clone_edge (struct cgraph_edge *,
- struct cgraph_node *,
- tree, gcov_type, int, bool);
+ struct cgraph_node *,
+ tree, gcov_type, int, bool);
struct cgraph_node * cgraph_clone_node (struct cgraph_node *, gcov_type,
int, bool);
void cgraph_reset_static_var_maps (void);
void init_cgraph (void);
struct cgraph_node *cgraph_function_versioning (struct cgraph_node *,
- VEC(cgraph_edge_p,heap)*,
+ VEC(cgraph_edge_p,heap)*,
varray_type);
void cgraph_analyze_function (struct cgraph_node *);
struct cgraph_node *save_inline_function_body (struct cgraph_node *);
return true;
/* Externally visible functions must be output. The exception is
- COMDAT functions that must be output only when they are needed.
+ COMDAT functions that must be output only when they are needed.
When not optimizing, also output the static functions. (see
PR25962), but don't do so for always_inline functions.
|| (!node->local.disregard_inline_limits
/* When declared inline, defer even the uninlinable functions.
This allows them to be eliminated when unused. */
- && !DECL_DECLARED_INLINE_P (decl)
+ && !DECL_DECLARED_INLINE_P (decl)
&& (!node->local.inlinable || !cgraph_default_inline_p (node, NULL))))
return true;
if (DECL_INITIAL (decl))
{
visited_nodes = pointer_set_create ();
- walk_tree (&DECL_INITIAL (decl), record_reference, NULL, visited_nodes);
+ walk_tree (&DECL_INITIAL (decl), record_reference, NULL, visited_nodes);
pointer_set_destroy (visited_nodes);
visited_nodes = NULL;
}
&& TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
|| node->force_output
|| decide_is_variable_needed (node, decl)
- /* ??? Cgraph does not yet rule the world with an iron hand,
+ /* ??? Cgraph does not yet rule the world with an iron hand,
and does not control the emission of debug information.
After a variable has its DECL_RTL set, we must assume that
it may be referenced by the debug information, and we can
/* If node->output is set, then this is a unit-at-a-time compilation
and we have already begun whole-unit analysis. This is *not*
testing for whether we've already emitted the function. That
- case can be sort-of legitimately seen with real function
+ case can be sort-of legitimately seen with real function
redefinition errors. I would argue that the front end should
never present us with such a case, but don't enforce that for now. */
gcc_assert (!node->output);
{
cgraph_varpool_mark_needed_node (cgraph_varpool_node (t));
if (lang_hooks.callgraph.analyze_expr)
- return lang_hooks.callgraph.analyze_expr (tp, walk_subtrees,
+ return lang_hooks.callgraph.analyze_expr (tp, walk_subtrees,
data);
}
break;
tree step;
visited_nodes = pointer_set_create ();
- /* Reach the trees by walking over the CFG, and note the
+ /* Reach the trees by walking over the CFG, and note the
enclosing basic-blocks in the call edges. */
FOR_EACH_BB_FN (bb, this_cfun)
for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi))
walk_tree (&TREE_OPERAND (stmt, 0),
record_reference, node, visited_nodes);
}
- else
+ else
walk_tree (bsi_stmt_ptr (bsi), record_reference, node, visited_nodes);
}
else if (TREE_CODE (decl) == VAR_DECL && DECL_INITIAL (decl))
walk_tree (&DECL_INITIAL (decl), record_reference, node, visited_nodes);
}
-
+
pointer_set_destroy (visited_nodes);
visited_nodes = NULL;
}
error ("node not found in cgraph_hash");
error_found = true;
}
-
+
if (node->analyzed
&& DECL_SAVED_TREE (node->decl) && !TREE_ASM_WRITTEN (node->decl)
&& (!DECL_EXTERNAL (node->decl) || node->global.inlined_to))
assemble_variable (decl, 0, 1, 0);
/* Local static variables are never seen by check_global_declarations
so we need to output debug info by hand. */
- if (DECL_CONTEXT (decl)
+ if (DECL_CONTEXT (decl)
&& (TREE_CODE (DECL_CONTEXT (decl)) == BLOCK
|| TREE_CODE (DECL_CONTEXT (decl)) == FUNCTION_DECL)
&& errorcount == 0 && sorrycount == 0)
if (errorcount || sorrycount)
return false;
-
+
/* EH might mark decls as needed during expansion. This should be safe since
we don't create references to new function, but it should not be used
elsewhere. */
tree decl = node->decl;
if (node->local.finalized && !DECL_SAVED_TREE (decl))
- cgraph_reset_node (node);
+ cgraph_reset_node (node);
if (!node->reachable && DECL_SAVED_TREE (decl))
{
{
tree decl = node->decl;
struct cgraph_edge *e;
-
+
gcc_assert (!node->output);
for (e = node->callers; e; e = e->next_caller)
|| DECL_EXTERNAL (decl));
}
-
+
}
}
}
/* Mark visibility of all functions.
-
+
A local function is one whose calls can occur only in the current
compilation unit and all its calls are explicit, so we can change
its calling convention. We simply mark all static functions whose
}
process_pending_assemble_externals ();
-
+
/* Frontend may output common variables after the unit has been finalized.
It is safe to deal with them here as they are always zero initialized. */
cgraph_varpool_analyze_pending_decls ();
for (node = cgraph_nodes; node; node = node->next)
if (node->analyzed
&& (node->global.inlined_to
- || DECL_SAVED_TREE (node->decl)))
+ || DECL_SAVED_TREE (node->decl)))
{
error_found = true;
dump_cgraph_node (stderr, node);
- }
+ }
if (error_found)
internal_error ("nodes with no released memory found");
}
}
/* Generate and emit a static constructor or destructor. WHICH must be
- one of 'I' or 'D'. BODY should be a STATEMENT_LIST containing
+ one of 'I' or 'D'. BODY should be a STATEMENT_LIST containing
GENERIC statements. */
void
}
else
cgraph_finalize_function (decl, 0);
-
+
if (targetm.have_ctors_dtors)
{
void (*fn) (rtx, int);
cgraph_dump_file = dump_begin (TDI_cgraph, NULL);
}
-/* The edges representing the callers of the NEW_VERSION node were
+/* The edges representing the callers of the NEW_VERSION node were
fixed by cgraph_function_versioning (), now the call_expr in their
respective tree code should be updated to call the NEW_VERSION. */
unsigned i;
gcc_assert (old_version);
-
+
new_version = cgraph_node (new_decl);
new_version->analyzed = true;
next_callee = e->next_callee;
if (e->callee == old_version)
cgraph_redirect_edge_callee (e, new_version);
-
+
if (!next_callee)
break;
}
}
/* Perform function versioning.
- Function versioning includes copying of the tree and
+ Function versioning includes copying of the tree and
a callgraph update (creating a new cgraph node and updating
its callees and callers).
/* Update the call_expr on the edges to call the new version node. */
update_call_expr (new_version_node);
- /* Update the new version's properties.
+ /* Update the new version's properties.
Make The new version visible only within this translation unit.
- ??? We cannot use COMDAT linkage because there is no
+ ??? We cannot use COMDAT linkage because there is no
ABI support for this. */
DECL_EXTERNAL (new_version_node->decl) = 0;
DECL_ONE_ONLY (new_version_node->decl) = 0;
enum pass {
PASS_FIRST, /* without constructors */
PASS_OBJ, /* individual objects */
- PASS_LIB, /* looking for shared libraries */
+ PASS_LIB, /* looking for shared libraries */
PASS_SECOND /* with constructors linked in */
};
int debug; /* true if -debug */
-static int shared_obj; /* true if -shared */
+static int shared_obj; /* true if -shared */
static const char *c_file; /* <xxx>.c for constructor/destructor list. */
static const char *o_file; /* <xxx>.o for constructor/destructor list. */
#ifdef COLLECT_EXPORT_LIST
-static const char *export_file; /* <xxx>.x for AIX export list. */
+static const char *export_file; /* <xxx>.x for AIX export list. */
#endif
const char *ldout; /* File for ld stdout. */
const char *lderrout; /* File for ld stderr. */
static const char *ldd_file_name; /* pathname of ldd (or equivalent) */
#endif
static const char *strip_file_name; /* pathname of strip */
-const char *c_file_name; /* pathname of gcc */
+const char *c_file_name; /* pathname of gcc */
static char *initname, *fininame; /* names of init and fini funcs */
static struct head constructors; /* list of constructors found */
explicitly puts an export list in command line */
case 'b':
if (arg[2] == 'E' || strncmp (&arg[2], "export", 6) == 0)
- export_flag = 1;
+ export_flag = 1;
else if (arg[2] == '6' && arg[3] == '4')
aix64_flag = 1;
else if (arg[2] == 'r' && arg[3] == 't' && arg[4] == 'l')
ld2--;
}
if (!strcmp (arg, "-dynamic-linker") && argv[1])
- {
+ {
++argv;
*ld1++ = *ld2++ = *argv;
}
}
#ifdef COLLECT_EXPORT_LIST
{
- /* Resolving full library name. */
+ /* Resolving full library name. */
const char *s = resolve_lib_name (arg+2);
/* Saving a full library name. */
else
{
/* Saving a full library name. */
- add_to_list (&libs, arg);
- }
+ add_to_list (&libs, arg);
+ }
#endif
}
}
|| id->sequence > (*id_ptr)->sequence
/* Hack: do lexical compare, too.
|| (id->sequence == (*id_ptr)->sequence
- && strcmp (id->name, (*id_ptr)->name) > 0) */
+ && strcmp (id->name, (*id_ptr)->name) > 0) */
)
{
id->next = *id_ptr;
*end = '\0';
if (access (name, R_OK) == 0)
- add_to_list (&libraries, name);
+ add_to_list (&libraries, name);
else
fatal ("unable to open dynamic dependency '%s'", buf);
# if defined (C_WEAKEXT)
# define GCC_OK_SYMBOL(X) \
(((X).n_sclass == C_EXT || (X).n_sclass == C_WEAKEXT) && \
- ((X).n_scnum > N_UNDEF) && \
- (aix64_flag \
- || (((X).n_type & N_TMASK) == (DT_NON << N_BTSHFT) \
- || ((X).n_type & N_TMASK) == (DT_FCN << N_BTSHFT))))
+ ((X).n_scnum > N_UNDEF) && \
+ (aix64_flag \
+ || (((X).n_type & N_TMASK) == (DT_NON << N_BTSHFT) \
+ || ((X).n_type & N_TMASK) == (DT_FCN << N_BTSHFT))))
# define GCC_UNDEF_SYMBOL(X) \
(((X).n_sclass == C_EXT || (X).n_sclass == C_WEAKEXT) && \
- ((X).n_scnum == N_UNDEF))
+ ((X).n_scnum == N_UNDEF))
# else
# define GCC_OK_SYMBOL(X) \
(((X).n_sclass == C_EXT) && \
- ((X).n_scnum > N_UNDEF) && \
- (aix64_flag \
- || (((X).n_type & N_TMASK) == (DT_NON << N_BTSHFT) \
- || ((X).n_type & N_TMASK) == (DT_FCN << N_BTSHFT))))
+ ((X).n_scnum > N_UNDEF) && \
+ (aix64_flag \
+ || (((X).n_type & N_TMASK) == (DT_NON << N_BTSHFT) \
+ || ((X).n_type & N_TMASK) == (DT_FCN << N_BTSHFT))))
# define GCC_UNDEF_SYMBOL(X) \
(((X).n_sclass == C_EXT) && ((X).n_scnum == N_UNDEF))
# endif
{
#endif
/* Some platforms (e.g. OSF4) declare ldopen as taking a
- non-const char * filename parameter, even though it will not
- modify that string. So we must cast away const-ness here,
- which will cause -Wcast-qual to burp. */
+ non-const char * filename parameter, even though it will not
+ modify that string. So we must cast away const-ness here,
+ which will cause -Wcast-qual to burp. */
if ((ldptr = ldopen ((char *)prog_name, ldptr)) != NULL)
{
if (! MY_ISCOFF (HEADER (ldptr).f_magic))
(1) We do not want to reinitialize at each label.
(2) It is useful, but not critical, to know the actual value assigned
- to a register. Often just its form is helpful.
+ to a register. Often just its form is helpful.
Therefore, we maintain the following fields:
truncation if we know that value already contains a truncated
value. */
- ENUM_BITFIELD(machine_mode) truncated_to_mode : 8;
+ ENUM_BITFIELD(machine_mode) truncated_to_mode : 8;
};
static struct reg_stat *reg_stat;
FOR_EACH_BB (this_basic_block)
{
for (insn = BB_HEAD (this_basic_block);
- insn != NEXT_INSN (BB_END (this_basic_block));
+ insn != NEXT_INSN (BB_END (this_basic_block));
insn = next ? next : NEXT_INSN (insn))
{
next = 0;
/* If this register is undefined at the start of the file, we can't
say what its contents were. */
&& ! REGNO_REG_SET_P
- (ENTRY_BLOCK_PTR->next_bb->il.rtl->global_live_at_start, REGNO (x))
+ (ENTRY_BLOCK_PTR->next_bb->il.rtl->global_live_at_start, REGNO (x))
&& GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT)
{
if (set == 0 || GET_CODE (set) == CLOBBER)
for (i = XVECLEN (PATTERN (i3), 0) - 1; i >= 0; i--)
if (GET_CODE (XVECEXP (PATTERN (i3), 0, i)) == CLOBBER)
{
- /* Don't substitute for a register intended as a clobberable
+ /* Don't substitute for a register intended as a clobberable
operand. */
rtx reg = XEXP (XVECEXP (PATTERN (i3), 0, i), 0);
if (rtx_equal_p (reg, dest))
{
/* Make sure succ doesn't contain a volatile reference. */
if (succ != 0 && volatile_refs_p (PATTERN (succ)))
- return 0;
+ return 0;
for (p = NEXT_INSN (insn); p != i3; p = NEXT_INSN (p))
- if (INSN_P (p) && p != succ && volatile_refs_p (PATTERN (p)))
+ if (INSN_P (p) && p != succ && volatile_refs_p (PATTERN (p)))
return 0;
}
Consider:
- (set (reg:DI 101) (reg:DI 100))
+ (set (reg:DI 101) (reg:DI 100))
(set (subreg:SI (reg:DI 101) 0) <foo>)
This is NOT equivalent to:
- (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>)
+ (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>)
(set (reg:DI 101) (reg:DI 100))])
Not only does this modify 100 (in which case it might still be valid
make up a dummy I1 that is
(set Y OP)
and change I2 to be
- (set (reg:CC X) (compare:CC Y (const_int 0)))
+ (set (reg:CC X) (compare:CC Y (const_int 0)))
(We can ignore any trailing CLOBBERs.)
rtx ni2dest;
/* I3 now uses what used to be its destination and which is now
- I2's destination. This requires us to do a few adjustments. */
+ I2's destination. This requires us to do a few adjustments. */
PATTERN (i3) = newpat;
adjust_for_new_dest (i3);
/* We need a LOG_LINK from I3 to I2. But we used to have one,
- so we still will.
+ so we still will.
However, some later insn might be using I2's dest and have
a LOG_LINK pointing at I3. We must remove this link.
}
undobuf.undos = 0;
}
-
\f
/* Find the innermost point within the rtx at LOC, possibly LOC itself,
where we have an arithmetic expression and return that point. LOC will
rtx negmask = gen_int_mode (~(mask << pos), mode);
SUBST (SET_SRC (x),
simplify_gen_binary (IOR, mode,
- simplify_gen_binary (AND, mode,
+ simplify_gen_binary (AND, mode,
dest, negmask),
or_mask));
}
&& ! (GET_CODE (XEXP (x, 1)) == SUBREG
&& OBJECT_P (SUBREG_REG (XEXP (x, 1)))))))
|| (UNARY_P (x)
- && (!OBJECT_P (XEXP (x, 0))
+ && (!OBJECT_P (XEXP (x, 0))
&& ! (GET_CODE (XEXP (x, 0)) == SUBREG
&& OBJECT_P (SUBREG_REG (XEXP (x, 0)))))))
{
else if (true_rtx == const0_rtx && false_rtx == const_true_rtx
&& ((reversed = reversed_comparison_code_parts
(cond_code, cond, cop1, NULL))
- != UNKNOWN))
+ != UNKNOWN))
x = simplify_gen_relational (reversed, mode, VOIDmode,
cond, cop1);
&& true_rtx == const0_rtx
&& ((reversed = reversed_comparison_code_parts
(cond_code, cond, cop1, NULL))
- != UNKNOWN))
+ != UNKNOWN))
x = simplify_gen_unary (NEG, mode,
simplify_gen_relational (reversed,
mode, VOIDmode,
return gen_lowpart (mode, SUBREG_REG (x));
if (GET_MODE_CLASS (GET_MODE (SUBREG_REG (x))) == MODE_CC)
- break;
+ break;
{
rtx temp;
temp = simplify_subreg (mode, SUBREG_REG (x), op0_mode,
tmp = simplify_relational_operation (old_code, compare_mode, VOIDmode,
op0, op1);
if (!tmp)
- new_code = old_code;
+ new_code = old_code;
else if (!CONSTANT_P (tmp))
- {
- new_code = GET_CODE (tmp);
- op0 = XEXP (tmp, 0);
- op1 = XEXP (tmp, 1);
- }
+ {
+ new_code = GET_CODE (tmp);
+ op0 = XEXP (tmp, 0);
+ op1 = XEXP (tmp, 1);
+ }
else
{
rtx pat = PATTERN (other_insn);
{
SUBST(SET_SRC (x), op0);
src = SET_SRC (x);
- }
+ }
else
{
/* Otherwise, update the COMPARE if needed. */
+ (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))
#ifndef WORD_REGISTER_OPERATIONS
&& (GET_MODE_SIZE (GET_MODE (src))
- < GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))))
+ < GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))))
#endif
#ifdef CANNOT_CHANGE_MODE_CLASS
&& ! (REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER
if (GET_CODE (x) == ZERO_EXTEND)
{
/* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI if we
- know that the last value didn't have any inappropriate bits
- set. */
+ know that the last value didn't have any inappropriate bits
+ set. */
if (GET_CODE (XEXP (x, 0)) == TRUNCATE
&& GET_MODE (XEXP (XEXP (x, 0), 0)) == GET_MODE (x)
&& GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
return SUBREG_REG (XEXP (x, 0));
/* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI when foo
- is a comparison and STORE_FLAG_VALUE permits. This is like
- the first case, but it works even when GET_MODE (x) is larger
- than HOST_WIDE_INT. */
+ is a comparison and STORE_FLAG_VALUE permits. This is like
+ the first case, but it works even when GET_MODE (x) is larger
+ than HOST_WIDE_INT. */
if (GET_CODE (XEXP (x, 0)) == TRUNCATE
&& GET_MODE (XEXP (XEXP (x, 0), 0)) == GET_MODE (x)
&& COMPARISON_P (XEXP (XEXP (x, 0), 0))
else
{
/* Be careful not to go beyond the extracted object and maintain the
- natural alignment of the memory. */
+ natural alignment of the memory. */
wanted_inner_mode = smallest_mode_for_size (len, MODE_INT);
while (pos % GET_MODE_BITSIZE (wanted_inner_mode) + len
> GET_MODE_BITSIZE (wanted_inner_mode))
{
rtx newer = force_to_mode (tem, mode, ~(HOST_WIDE_INT) 0,
0);
-
+
/* If we have something other than a SUBREG, we might have
done an expansion, so rerun ourselves. */
if (GET_CODE (newer) != SUBREG)
newer = make_compound_operation (newer, in_code);
-
+
return newer;
}
fmt = GET_RTX_FORMAT (code);
copied = false;
for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
- if (fmt[i] == 'e')
- {
- rtx op = canon_reg_for_combine (XEXP (x, i), reg);
+ if (fmt[i] == 'e')
+ {
+ rtx op = canon_reg_for_combine (XEXP (x, i), reg);
if (op != XEXP (x, i))
{
if (!copied)
x = copy_rtx (x);
}
XEXP (x, i) = op;
- }
- }
- else if (fmt[i] == 'E')
- {
- int j;
- for (j = 0; j < XVECLEN (x, i); j++)
+ }
+ }
+ else if (fmt[i] == 'E')
+ {
+ int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
{
- rtx op = canon_reg_for_combine (XVECEXP (x, i, j), reg);
- if (op != XVECEXP (x, i, j))
+ rtx op = canon_reg_for_combine (XVECEXP (x, i, j), reg);
+ if (op != XVECEXP (x, i, j))
{
if (!copied)
{
x = copy_rtx (x);
}
XVECEXP (x, i, j) = op;
- }
+ }
}
}
if (GET_CODE (x) == CONST_INT)
{
if (SCALAR_INT_MODE_P (mode))
- return gen_int_mode (INTVAL (x) & mask, mode);
+ return gen_int_mode (INTVAL (x) & mask, mode);
else
{
x = GEN_INT (INTVAL (x) & mask);
*ptrue = simplify_gen_relational (code, mode, VOIDmode,
true0, true1);
*pfalse = simplify_gen_relational (code, mode, VOIDmode,
- false0, false1);
+ false0, false1);
}
else
{
if (XEXP (x, 0) != r)
{
/* We must simplify the zero_extend here, before we lose
- track of the original inner_mode. */
+ track of the original inner_mode. */
new = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
r, inner_mode);
if (new)
expanding a bit field assignment. When we apply the distributive
law to this, we get (ior (and (A (not B))) (and (B (not B)))),
which then simplifies to (and (A (not B))).
-
+
Note that no checks happen on the validity of applying the inverse
distributive law. This is pointless since we can do it in the
few places where this routine is called.
if (reg_stat[REGNO (x)].last_set_value != 0
&& (reg_stat[REGNO (x)].last_set_mode == mode
- || (GET_MODE_CLASS (reg_stat[REGNO (x)].last_set_mode) == MODE_INT
+ || (GET_MODE_CLASS (reg_stat[REGNO (x)].last_set_mode) == MODE_INT
&& GET_MODE_CLASS (mode) == MODE_INT))
&& (reg_stat[REGNO (x)].last_set_label == label_tick
|| (REGNO (x) >= FIRST_PSEUDO_REGISTER
&& REG_N_SETS (REGNO (x)) == 1
&& ! REGNO_REG_SET_P
- (ENTRY_BLOCK_PTR->next_bb->il.rtl->global_live_at_start,
+ (ENTRY_BLOCK_PTR->next_bb->il.rtl->global_live_at_start,
REGNO (x))))
&& INSN_CUID (reg_stat[REGNO (x)].last_set) < subst_low_cuid)
{
{
#ifdef SHORT_IMMEDIATES_SIGN_EXTEND
/* If X is narrower than MODE and TEM is a non-negative
- constant that would appear negative in the mode of X,
- sign-extend it for use in reg_nonzero_bits because some
- machines (maybe most) will actually do the sign-extension
- and this is the conservative approach.
+ constant that would appear negative in the mode of X,
+ sign-extend it for use in reg_nonzero_bits because some
+ machines (maybe most) will actually do the sign-extension
+ and this is the conservative approach.
- ??? For 2.5, try to tighten up the MD files in this regard
- instead of this kludge. */
+ ??? For 2.5, try to tighten up the MD files in this regard
+ instead of this kludge. */
if (GET_MODE_BITSIZE (GET_MODE (x)) < GET_MODE_BITSIZE (mode)
&& GET_CODE (tem) == CONST_INT
unsigned HOST_WIDE_INT mask = reg_stat[REGNO (x)].nonzero_bits;
if (GET_MODE_BITSIZE (GET_MODE (x)) < GET_MODE_BITSIZE (mode))
- /* We don't know anything about the upper bits. */
- mask |= GET_MODE_MASK (mode) ^ GET_MODE_MASK (GET_MODE (x));
+ /* We don't know anything about the upper bits. */
+ mask |= GET_MODE_MASK (mode) ^ GET_MODE_MASK (GET_MODE (x));
*nonzero &= mask;
}
if (reg_stat[REGNO (x)].last_set_value != 0
&& reg_stat[REGNO (x)].last_set_mode == mode
&& (reg_stat[REGNO (x)].last_set_label == label_tick
- || (REGNO (x) >= FIRST_PSEUDO_REGISTER
+ || (REGNO (x) >= FIRST_PSEUDO_REGISTER
&& REG_N_SETS (REGNO (x)) == 1
&& ! REGNO_REG_SET_P
- (ENTRY_BLOCK_PTR->next_bb->il.rtl->global_live_at_start,
+ (ENTRY_BLOCK_PTR->next_bb->il.rtl->global_live_at_start,
REGNO (x))))
&& INSN_CUID (reg_stat[REGNO (x)].last_set) < subst_low_cuid)
{
if (nonzero_sign_valid && reg_stat[REGNO (x)].sign_bit_copies != 0
&& GET_MODE_BITSIZE (GET_MODE (x)) == GET_MODE_BITSIZE (mode))
*result = reg_stat[REGNO (x)].sign_bit_copies;
-
+
return NULL;
}
\f
&& 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
shift_mode))
&& (new = simplify_const_binary_operation (code, result_mode,
- XEXP (varop, 1),
- GEN_INT (count))) != 0
+ XEXP (varop, 1),
+ GEN_INT (count))) != 0
&& GET_CODE (new) == CONST_INT
&& merge_outer_ops (&outer_op, &outer_const, GET_CODE (varop),
INTVAL (new), result_mode, &complement_p))
for some (ashiftrt (xor)). */
if (GET_CODE (XEXP (varop, 1)) == CONST_INT
&& !(code == ASHIFTRT && GET_CODE (varop) == XOR
- && 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
+ && 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
shift_mode)))
{
rtx lhs = simplify_shift_const (NULL_RTX, code, shift_mode,
varop = apply_distributive_law (varop);
count = 0;
- continue;
+ continue;
}
break;
if (code == ASHIFT
&& GET_CODE (XEXP (varop, 1)) == CONST_INT
&& (new = simplify_const_binary_operation (ASHIFT, result_mode,
- XEXP (varop, 1),
- GEN_INT (count))) != 0
+ XEXP (varop, 1),
+ GEN_INT (count))) != 0
&& GET_CODE (new) == CONST_INT
&& merge_outer_ops (&outer_op, &outer_const, PLUS,
INTVAL (new), result_mode, &complement_p))
&& GET_CODE (XEXP (varop, 1)) == CONST_INT
&& mode_signbit_p (result_mode, XEXP (varop, 1))
&& (new = simplify_const_binary_operation (code, result_mode,
- XEXP (varop, 1),
- GEN_INT (count))) != 0
+ XEXP (varop, 1),
+ GEN_INT (count))) != 0
&& GET_CODE (new) == CONST_INT
&& merge_outer_ops (&outer_op, &outer_const, XOR,
INTVAL (new), result_mode, &complement_p))
&& ! unsigned_comparison_p
&& (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
&& ((unsigned HOST_WIDE_INT) const_op
- < (((unsigned HOST_WIDE_INT) 1
+ < (((unsigned HOST_WIDE_INT) 1
<< (GET_MODE_BITSIZE (mode) - 1))))
&& cmp_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
{
HOST_WIDE_INT c1 = -INTVAL (XEXP (SUBREG_REG (op0), 1));
if ((c1 > 0
- && (unsigned HOST_WIDE_INT) c1
+ && (unsigned HOST_WIDE_INT) c1
< (unsigned HOST_WIDE_INT) 1 << (mode_width - 1)
&& (equality_comparison_p || unsigned_comparison_p)
/* (A - C1) zero-extends if it is positive and sign-extends
{
op0 = SUBREG_REG (op0);
continue;
- }
+ }
}
/* If the inner mode is narrower and we are extracting the low part,
case UNEQ: case LTGT:
case LT: case LTU: case UNLT: case LE: case LEU: case UNLE:
case GT: case GTU: case UNGT: case GE: case GEU: case UNGE:
- case UNORDERED: case ORDERED:
+ case UNORDERED: case ORDERED:
/* We can't do anything if OP0 is a condition code value, rather
than an actual data value. */
if (const_op != 0
{
op0 = simplify_and_const_int
(NULL_RTX, mode, gen_rtx_LSHIFTRT (mode,
- XEXP (op0, 1),
- XEXP (XEXP (op0, 0), 1)),
+ XEXP (op0, 1),
+ XEXP (XEXP (op0, 0), 1)),
(HOST_WIDE_INT) 1);
continue;
}
{
/* For paradoxical subregs, allow case 1 as above. Case 3 isn't
implemented. */
- if (REG_P (SUBREG_REG (op0)))
+ if (REG_P (SUBREG_REG (op0)))
{
op0 = SUBREG_REG (op0);
op1 = gen_lowpart (GET_MODE (op0), op1);
record_truncated_value (rtx x)
{
enum machine_mode truncated_mode;
-
+
if (GET_CODE (x) == SUBREG && REG_P (SUBREG_REG (x)))
{
enum machine_mode original_mode = GET_MODE (SUBREG_REG (x));
if (place && JUMP_P (place))
{
rtx label = JUMP_LABEL (place);
-
+
if (!label)
JUMP_LABEL (place) = XEXP (note, 0);
else
if (place2 && JUMP_P (place2))
{
rtx label = JUMP_LABEL (place2);
-
+
if (!label)
JUMP_LABEL (place2) = XEXP (note, 0);
else
; the terms of the GNU General Public License as published by the Free
; Software Foundation; either version 2, or (at your option) any later
; version.
-;
+;
; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
; WARRANTY; without even the implied warranty of MERCHANTABILITY or
; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
; for more details.
-;
+;
; You should have received a copy of the GNU General Public License
; along with GCC; see the file COPYING. If not, write to the Free
; Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
Allow premature scheduling of queued insns
fsched-stalled-insns=
-Common RejectNegative Joined UInteger
+Common RejectNegative Joined UInteger
-fsched-stalled-insns=<number> Set number of queued insns that can be prematurely scheduled
; sched_stalled_insns_dep controls how many recently scheduled cycles will
Split lifetimes of induction variables when loops are unrolled
fvariable-expansion-in-unroller
-Common Report Var(flag_variable_expansion_in_unroller)
+Common Report Var(flag_variable_expansion_in_unroller)
Apply variable expansion when loops are unrolled
; Emit code to probe the stack, to help detect stack overflow; also