#include "output.h"
#include "flags.h"
#include "timevar.h"
-#include "toplev.h"
#include "diagnostic.h"
#include "gimple-pretty-print.h"
#include "langhooks.h"
#include "target.h"
#include "lto-streamer.h"
+#include "data-streamer.h"
+#include "tree-streamer.h"
#include "cfgloop.h"
#include "tree-scalar-evolution.h"
#include "intl.h"
*looping = MAX (*looping, looping2);
}
-/* Recognize special cases of builtins that are by themself not pure or const
+/* Recognize special cases of builtins that are by themselves not pure or const
but function using them is. */
static bool
special_builtin_state (enum pure_const_state_e *state, bool *looping,
case BUILT_IN_RETURN:
case BUILT_IN_UNREACHABLE:
case BUILT_IN_ALLOCA:
+ case BUILT_IN_ALLOCA_WITH_ALIGN:
case BUILT_IN_STACK_SAVE:
case BUILT_IN_STACK_RESTORE:
case BUILT_IN_EH_POINTER:
fprintf (dump_file, " Recursive call can loop.\n");
local->looping = true;
}
- /* Either calle is unknown or we are doing local analysis.
+ /* Either callee is unknown or we are doing local analysis.
Look to see if there are any bits available for the callee (such as by
declaration or because it is builtin) and process solely on the basis of
those bits. */
check_stmt (gimple_stmt_iterator *gsip, funct_state local, bool ipa)
{
gimple stmt = gsi_stmt (*gsip);
- unsigned int i = 0;
if (is_gimple_debug (stmt))
return;
print_gimple_stmt (dump_file, stmt, 0, 0);
}
- if (gimple_has_volatile_ops (stmt))
+ if (gimple_has_volatile_ops (stmt)
+ && !gimple_clobber_p (stmt))
{
local->pure_const_state = IPA_NEITHER;
if (dump_file)
}
break;
case GIMPLE_ASM:
- for (i = 0; i < gimple_asm_nclobbers (stmt); i++)
+ if (gimple_asm_clobbers_memory_p (stmt))
{
- tree op = gimple_asm_clobber_op (stmt, i);
- if (strcmp (TREE_STRING_POINTER (TREE_VALUE (op)), "memory") == 0)
- {
- if (dump_file)
- fprintf (dump_file, " memory asm clobber is not const/pure");
- /* Abandon all hope, ye who enter here. */
- local->pure_const_state = IPA_NEITHER;
- }
+ if (dump_file)
+ fprintf (dump_file, " memory asm clobber is not const/pure");
+ /* Abandon all hope, ye who enter here. */
+ local->pure_const_state = IPA_NEITHER;
}
if (gimple_asm_volatile_p (stmt))
{
l->looping_previously_known = true;
l->looping = false;
l->can_throw = false;
+ state_from_flags (&l->state_previously_known, &l->looping_previously_known,
+ flags_from_decl_or_type (fn->decl),
+ cgraph_node_cannot_return (fn));
+
+ if (fn->thunk.thunk_p || fn->alias)
+ {
+ /* Thunk gets propagated through, so nothing interesting happens. */
+ gcc_assert (ipa);
+ return l;
+ }
if (dump_file)
{
if (mark_dfs_back_edges ())
{
/* Preheaders are needed for SCEV to work.
- Simple lateches and recorded exits improve chances that loop will
+ Simple latches and recorded exits improve chances that loop will
proved to be finite in testcases such as in loop-15.c and loop-24.c */
loop_optimizer_init (LOOPS_NORMAL
| LOOPS_HAVE_RECORDED_EXITS);
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, " checking previously known:");
- state_from_flags (&l->state_previously_known, &l->looping_previously_known,
- flags_from_decl_or_type (fn->decl),
- cgraph_node_cannot_return (fn));
better_state (&l->pure_const_state, &l->looping,
l->state_previously_known,
We process AVAIL_OVERWRITABLE functions. We can not use the results
by default, but the info can be used at LTO with -fwhole-program or
- when function got clonned and the clone is AVAILABLE. */
+ when function got cloned and the clone is AVAILABLE. */
for (node = cgraph_nodes; node; node = node->next)
if (cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE)
count++;
}
- lto_output_uleb128_stream (ob->main_stream, count);
+ streamer_write_uhwi_stream (ob->main_stream, count);
/* Process all of the functions. */
for (csi = csi_start (set); !csi_end_p (csi); csi_next (&csi))
encoder = ob->decl_state->cgraph_node_encoder;
node_ref = lto_cgraph_encoder_encode (encoder, node);
- lto_output_uleb128_stream (ob->main_stream, node_ref);
+ streamer_write_uhwi_stream (ob->main_stream, node_ref);
/* Note that flags will need to be read in the opposite
order as we are pushing the bitflags into FLAGS. */
bp_pack_value (&bp, fs->looping_previously_known, 1);
bp_pack_value (&bp, fs->looping, 1);
bp_pack_value (&bp, fs->can_throw, 1);
- lto_output_bitpack (&bp);
+ streamer_write_bitpack (&bp);
}
}
if (ib)
{
unsigned int i;
- unsigned int count = lto_input_uleb128 (ib);
+ unsigned int count = streamer_read_uhwi (ib);
for (i = 0; i < count; i++)
{
lto_cgraph_encoder_t encoder;
fs = XCNEW (struct funct_state_d);
- index = lto_input_uleb128 (ib);
+ index = streamer_read_uhwi (ib);
encoder = file_data->cgraph_node_encoder;
node = lto_cgraph_encoder_deref (encoder, index);
set_function_state (node, fs);
/* Note that the flags must be read in the opposite
order in which they were written (the bitflags were
pushed into FLAGS). */
- bp = lto_input_bitpack (ib);
+ bp = streamer_read_bitpack (ib);
fs->pure_const_state
= (enum pure_const_state_e) bp_unpack_value (&bp, 2);
fs->state_previously_known
return (!e->can_throw_external);
}
-/* Return true if NODE is self recursive function. */
+/* Return true if NODE is self recursive function.
+ ??? self recursive and indirectly recursive funcions should
+ be the same, so this function seems unnecesary. */
static bool
self_recursive_p (struct cgraph_node *node)
{
struct cgraph_edge *e;
for (e = node->callees; e; e = e->next_callee)
- if (e->callee == node)
+ if (cgraph_function_node (e->callee, NULL) == node)
return true;
return false;
}
int i;
struct ipa_dfs_info * w_info;
- order_pos = ipa_utils_reduced_inorder (order, true, false, NULL);
+ order_pos = ipa_reduced_postorder (order, true, false, NULL);
if (dump_file)
{
dump_cgraph (dump_file);
- ipa_utils_print_order(dump_file, "reduced", order, order_pos);
+ ipa_print_order(dump_file, "reduced", order, order_pos);
}
/* Propagate the local information thru the call graph to produce
int count = 0;
node = order[i];
+ if (node->alias)
+ continue;
+
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "Starting cycle\n");
/* Now walk the edges and merge in callee properties. */
for (e = w->callees; e; e = e->next_callee)
{
- struct cgraph_node *y = e->callee;
+ enum availability avail;
+ struct cgraph_node *y = cgraph_function_node (e->callee, &avail);
enum pure_const_state_e edge_state = IPA_CONST;
bool edge_looping = false;
cgraph_node_name (e->callee),
e->callee->uid);
}
- if (cgraph_function_body_availability (y) > AVAIL_OVERWRITABLE)
+ if (avail > AVAIL_OVERWRITABLE)
{
funct_state y_l = get_function_state (y);
if (dump_file && (dump_flags & TDF_DETAILS))
break;
/* Now process the indirect call. */
- for (ie = node->indirect_calls; ie; ie = ie->next_callee)
+ for (ie = w->indirect_calls; ie; ie = ie->next_callee)
{
enum pure_const_state_e edge_state = IPA_CONST;
bool edge_looping = false;
break;
/* And finally all loads and stores. */
- for (i = 0; ipa_ref_list_reference_iterate (&node->ref_list, i, ref); i++)
+ for (i = 0; ipa_ref_list_reference_iterate (&w->ref_list, i, ref); i++)
{
enum pure_const_state_e ref_state = IPA_CONST;
bool ref_looping = false;
this_looping ? "looping " : "",
cgraph_node_name (w));
}
- cgraph_set_readonly_flag (w, true);
- cgraph_set_looping_const_or_pure_flag (w, this_looping);
+ cgraph_set_const_flag (w, true, this_looping);
break;
case IPA_PURE:
this_looping ? "looping " : "",
cgraph_node_name (w));
}
- cgraph_set_pure_flag (w, true);
- cgraph_set_looping_const_or_pure_flag (w, this_looping);
+ cgraph_set_pure_flag (w, true, this_looping);
break;
default:
}
}
- /* Cleanup. */
- for (node = cgraph_nodes; node; node = node->next)
- {
- /* Get rid of the aux information. */
- if (node->aux)
- {
- w_info = (struct ipa_dfs_info *) node->aux;
- free (node->aux);
- node->aux = NULL;
- }
- }
-
+ ipa_free_postorder_info ();
free (order);
}
int i;
struct ipa_dfs_info * w_info;
- order_pos = ipa_utils_reduced_inorder (order, true, false, ignore_edge);
+ order_pos = ipa_reduced_postorder (order, true, false, ignore_edge);
if (dump_file)
{
dump_cgraph (dump_file);
- ipa_utils_print_order(dump_file, "reduced for nothrow", order, order_pos);
+ ipa_print_order (dump_file, "reduced for nothrow", order, order_pos);
}
/* Propagate the local information thru the call graph to produce
bool can_throw = false;
node = order[i];
+ if (node->alias)
+ continue;
+
/* Find the worst state for any node in the cycle. */
w = node;
while (w)
for (e = w->callees; e; e = e->next_callee)
{
- struct cgraph_node *y = e->callee;
+ enum availability avail;
+ struct cgraph_node *y = cgraph_function_node (e->callee, &avail);
- if (cgraph_function_body_availability (y) > AVAIL_OVERWRITABLE)
+ if (avail > AVAIL_OVERWRITABLE)
{
funct_state y_l = get_function_state (y);
funct_state w_l = get_function_state (w);
if (!can_throw && !TREE_NOTHROW (w->decl))
{
- struct cgraph_edge *e;
cgraph_set_nothrow_flag (w, true);
- for (e = w->callers; e; e = e->next_caller)
- e->can_throw_external = false;
if (dump_file)
fprintf (dump_file, "Function found to be nothrow: %s\n",
cgraph_node_name (w));
}
}
- /* Cleanup. */
- for (node = cgraph_nodes; node; node = node->next)
- {
- /* Get rid of the aux information. */
- if (node->aux)
- {
- w_info = (struct ipa_dfs_info *) node->aux;
- free (node->aux);
- node->aux = NULL;
- }
- }
-
+ ipa_free_postorder_info ();
free (order);
}
if (cgraph_function_body_availability (node) <= AVAIL_OVERWRITABLE)
{
if (dump_file)
- fprintf (dump_file, "Function is not available or overwrittable; not analyzing.\n");
+ fprintf (dump_file, "Function is not available or overwritable; not analyzing.\n");
return true;
}
return false;
bool skip;
struct cgraph_node *node;
- node = cgraph_node (current_function_decl);
+ node = cgraph_get_node (current_function_decl);
skip = skip_function_for_local_pure_const (node);
if (!warn_suggest_attribute_const
&& !warn_suggest_attribute_pure
&& skip)
return 0;
- /* First do NORETURN discovery. */
+ l = analyze_function (node, false);
+
+ /* Do NORETURN discovery. */
if (!skip && !TREE_THIS_VOLATILE (current_function_decl)
&& EDGE_COUNT (EXIT_BLOCK_PTR->preds) == 0)
{
changed = true;
}
- l = analyze_function (node, false);
switch (l->pure_const_state)
{
warn_function_const (current_function_decl, !l->looping);
if (!skip)
{
- cgraph_set_readonly_flag (node, true);
- cgraph_set_looping_const_or_pure_flag (node, l->looping);
+ cgraph_set_const_flag (node, true, l->looping);
changed = true;
}
if (dump_file)
{
if (!skip)
{
- cgraph_set_looping_const_or_pure_flag (node, false);
+ cgraph_set_const_flag (node, true, false);
changed = true;
}
if (dump_file)
{
if (!skip)
{
- cgraph_set_pure_flag (node, true);
- cgraph_set_looping_const_or_pure_flag (node, l->looping);
+ cgraph_set_pure_flag (node, true, l->looping);
changed = true;
}
warn_function_pure (current_function_decl, !l->looping);
{
if (!skip)
{
- cgraph_set_looping_const_or_pure_flag (node, false);
+ cgraph_set_pure_flag (node, true, false);
changed = true;
}
if (dump_file)
}
if (!l->can_throw && !TREE_NOTHROW (current_function_decl))
{
- struct cgraph_edge *e;
-
cgraph_set_nothrow_flag (node, true);
- for (e = node->callers; e; e = e->next_caller)
- e->can_throw_external = false;
changed = true;
if (dump_file)
fprintf (dump_file, "Function found to be nothrow: %s\n",
lang_hooks.decl_printable_name (current_function_decl,
2));
}
- if (l)
- free (l);
+ free (l);
if (changed)
return execute_fixup_cfg ();
else