/* Nonzero if we found a computed goto while building basic blocks. */
static bool found_computed_goto;
+/* Hash table to store last discriminator assigned for each locus. */
+struct locus_discrim_map
+{
+ location_t locus;
+ int discriminator;
+};
+static htab_t discriminator_per_locus;
+
/* Basic blocks and flowgraphs. */
static void make_blocks (gimple_seq);
static void factor_computed_gotos (void);
static void make_cond_expr_edges (basic_block);
static void make_gimple_switch_edges (basic_block);
static void make_goto_expr_edges (basic_block);
+static unsigned int locus_map_hash (const void *);
+static int locus_map_eq (const void *, const void *);
+static void assign_discriminator (location_t, basic_block);
static edge gimple_redirect_edge_and_branch (edge, basic_block);
static edge gimple_try_redirect_by_replacing_jump (edge, basic_block);
static unsigned int split_critical_edges (void);
static int gimple_verify_flow_info (void);
static void gimple_make_forwarder_block (edge);
static void gimple_cfg2vcg (FILE *);
+static gimple first_non_label_stmt (basic_block);
/* Flowgraph optimization and cleanup. */
static void gimple_merge_blocks (basic_block, basic_block);
group_case_labels ();
/* Create the edges of the flowgraph. */
+ discriminator_per_locus = htab_create (13, locus_map_hash, locus_map_eq,
+ free);
make_edges ();
cleanup_dead_labels ();
+ htab_delete (discriminator_per_locus);
/* Debugging dumps. */
/* Build a label for the new block which will contain the
factored computed goto. */
- factored_label_decl = create_artificial_label ();
+ factored_label_decl = create_artificial_label (UNKNOWN_LOCATION);
factored_computed_goto_label
= gimple_build_label (factored_label_decl);
gsi_insert_after (&new_gsi, factored_computed_goto_label,
if (stmt && gimple_code (stmt) == GIMPLE_COND)
{
+ location_t loc = gimple_location (stmt);
tree cond;
bool zerop, onep;
fold_defer_overflow_warnings ();
- cond = fold_binary (gimple_cond_code (stmt), boolean_type_node,
+ cond = fold_binary_loc (loc, gimple_cond_code (stmt), boolean_type_node,
gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
if (cond)
{
fallthru = true;
if (fallthru)
- make_edge (bb, bb->next_bb, EDGE_FALLTHRU);
+ {
+ make_edge (bb, bb->next_bb, EDGE_FALLTHRU);
+ if (last)
+ assign_discriminator (gimple_location (last), bb->next_bb);
+ }
}
if (root_omp_region)
fold_cond_expr_cond ();
}
+/* Trivial hash function for a location_t. ITEM is a pointer to
+ a hash table entry that maps a location_t to a discriminator. */
+
+static unsigned int
+locus_map_hash (const void *item)
+{
+ return ((const struct locus_discrim_map *) item)->locus;
+}
+
+/* Equality function for the locus-to-discriminator map. VA and VB
+ point to the two hash table entries to compare. */
+
+static int
+locus_map_eq (const void *va, const void *vb)
+{
+ const struct locus_discrim_map *a = (const struct locus_discrim_map *) va;
+ const struct locus_discrim_map *b = (const struct locus_discrim_map *) vb;
+ return a->locus == b->locus;
+}
+
+/* Find the next available discriminator value for LOCUS. The
+ discriminator distinguishes among several basic blocks that
+ share a common locus, allowing for more accurate sample-based
+ profiling. */
+
+static int
+next_discriminator_for_locus (location_t locus)
+{
+ struct locus_discrim_map item;
+ struct locus_discrim_map **slot;
+
+ item.locus = locus;
+ item.discriminator = 0;
+ slot = (struct locus_discrim_map **)
+ htab_find_slot_with_hash (discriminator_per_locus, (void *) &item,
+ (hashval_t) locus, INSERT);
+ gcc_assert (slot);
+ if (*slot == HTAB_EMPTY_ENTRY)
+ {
+ *slot = XNEW (struct locus_discrim_map);
+ gcc_assert (*slot);
+ (*slot)->locus = locus;
+ (*slot)->discriminator = 0;
+ }
+ (*slot)->discriminator++;
+ return (*slot)->discriminator;
+}
+
+/* Return TRUE if LOCUS1 and LOCUS2 refer to the same source line. */
+
+static bool
+same_line_p (location_t locus1, location_t locus2)
+{
+ expanded_location from, to;
+
+ if (locus1 == locus2)
+ return true;
+
+ from = expand_location (locus1);
+ to = expand_location (locus2);
+
+ if (from.line != to.line)
+ return false;
+ if (from.file == to.file)
+ return true;
+ return (from.file != NULL
+ && to.file != NULL
+ && strcmp (from.file, to.file) == 0);
+}
+
+/* Assign a unique discriminator value to block BB if it begins at the same
+ LOCUS as its predecessor block. */
+
+static void
+assign_discriminator (location_t locus, basic_block bb)
+{
+ gimple to_stmt;
+
+ if (locus == 0 || bb->discriminator != 0)
+ return;
+
+ to_stmt = first_non_label_stmt (bb);
+ if (to_stmt && same_line_p (locus, gimple_location (to_stmt)))
+ bb->discriminator = next_discriminator_for_locus (locus);
+}
/* Create the edges for a GIMPLE_COND starting at block BB. */
basic_block then_bb, else_bb;
tree then_label, else_label;
edge e;
+ location_t entry_locus;
gcc_assert (entry);
gcc_assert (gimple_code (entry) == GIMPLE_COND);
+ entry_locus = gimple_location (entry);
+
/* Entry basic blocks for each component. */
then_label = gimple_cond_true_label (entry);
else_label = gimple_cond_false_label (entry);
else_stmt = first_stmt (else_bb);
e = make_edge (bb, then_bb, EDGE_TRUE_VALUE);
+ assign_discriminator (entry_locus, then_bb);
e->goto_locus = gimple_location (then_stmt);
if (e->goto_locus)
e->goto_block = gimple_block (then_stmt);
e = make_edge (bb, else_bb, EDGE_FALSE_VALUE);
if (e)
{
+ assign_discriminator (entry_locus, else_bb);
e->goto_locus = gimple_location (else_stmt);
if (e->goto_locus)
e->goto_block = gimple_block (else_stmt);
make_gimple_switch_edges (basic_block bb)
{
gimple entry = last_stmt (bb);
+ location_t entry_locus;
size_t i, n;
+ entry_locus = gimple_location (entry);
+
n = gimple_switch_num_labels (entry);
for (i = 0; i < n; ++i)
tree lab = CASE_LABEL (gimple_switch_label (entry, i));
basic_block label_bb = label_to_block (lab);
make_edge (bb, label_bb, 0);
+ assign_discriminator (entry_locus, label_bb);
}
}
if (simple_goto_p (goto_t))
{
tree dest = gimple_goto_dest (goto_t);
- edge e = make_edge (bb, label_to_block (dest), EDGE_FALLTHRU);
+ basic_block label_bb = label_to_block (dest);
+ edge e = make_edge (bb, label_bb, EDGE_FALLTHRU);
e->goto_locus = gimple_location (goto_t);
+ assign_discriminator (e->goto_locus, label_bb);
if (e->goto_locus)
e->goto_block = gimple_block (goto_t);
gsi_remove (&last, true);
/* Callback for for_each_eh_region. Helper for cleanup_dead_labels. */
static void
-update_eh_label (struct eh_region *region)
+update_eh_label (struct eh_region_d *region)
{
tree old_label = get_eh_region_tree_label (region);
if (old_label)
FOR_EACH_IMM_USE_STMT (stmt, imm_iter, name)
{
- if (gimple_code (stmt) != GIMPLE_PHI)
- push_stmt_changes (&stmt);
-
FOR_EACH_IMM_USE_ON_STMT (use, imm_iter)
{
replace_exp (use, val);
if (cfgcleanup_altered_bbs)
bitmap_set_bit (cfgcleanup_altered_bbs, gimple_bb (stmt)->index);
- /* FIXME. This should go in pop_stmt_changes. */
+ /* FIXME. This should go in update_stmt. */
for (i = 0; i < gimple_num_ops (stmt); i++)
{
tree op = gimple_op (stmt, i);
}
maybe_clean_or_replace_eh_stmt (stmt, stmt);
-
- pop_stmt_changes (&stmt);
+ update_stmt (stmt);
}
}
/* Return the one of two successors of BB that is not reachable by a
- reached by a complex edge, if there is one. Else, return BB. We use
+ complex edge, if there is one. Else, return BB. We use
this in optimizations that use post-dominators for their heuristics,
to catch the cases in C++ where function calls are involved. */
{
gimple stmt = gsi_stmt (gsi);
+ if (gimple_no_warning_p (stmt)) return false;
+
if (gimple_has_location (stmt))
{
location_t loc = gimple_location (stmt);
if (LOCATION_LINE (loc) > 0)
{
- warning (OPT_Wunreachable_code, "%Hwill never be executed", &loc);
+ warning_at (loc, OPT_Wunreachable_code, "will never be executed");
return true;
}
}
}
break;
- case GIMPLE_CHANGE_DYNAMIC_TYPE:
- /* If we do not optimize remove GIMPLE_CHANGE_DYNAMIC_TYPE as
- expansion is confused about them and we only remove them
- during alias computation otherwise. */
- if (!optimize)
- {
- data->last_was_goto = false;
- gsi_remove (gsi, false);
- break;
- }
- /* Fallthru. */
-
default:
data->last_was_goto = false;
gsi_next (gsi);
loop above, so the last statement we process is the first statement
in the block. */
if (loc > BUILTINS_LOCATION && LOCATION_LINE (loc) > 0)
- warning (OPT_Wunreachable_code, "%Hwill never be executed", &loc);
+ warning_at (loc, OPT_Wunreachable_code, "will never be executed");
remove_phi_nodes_and_edges_for_unreachable_block (bb);
bb->il.gimple = NULL;
return !gsi_end_p (i) ? gsi_stmt (i) : NULL;
}
+/* Return the first non-label statement in basic block BB. */
+
+static gimple
+first_non_label_stmt (basic_block bb)
+{
+ gimple_stmt_iterator i = gsi_start_bb (bb);
+ while (!gsi_end_p (i) && gimple_code (gsi_stmt (i)) == GIMPLE_LABEL)
+ gsi_next (&i);
+ return !gsi_end_p (i) ? gsi_stmt (i) : NULL;
+}
+
/* Return the last statement in basic block BB. */
gimple
return false;
}
-/* Verify if EXPR is a valid GIMPLE reference expression. Returns true
+/* Verify if EXPR is a valid GIMPLE reference expression. If
+ REQUIRE_LVALUE is true verifies it is an lvalue. Returns true
if there is an error, otherwise false. */
static bool
-verify_types_in_gimple_reference (tree expr)
+verify_types_in_gimple_reference (tree expr, bool require_lvalue)
{
while (handled_component_p (expr))
{
expr = op;
}
- return verify_types_in_gimple_min_lval (expr);
+ return ((require_lvalue || !is_gimple_min_invariant (expr))
+ && verify_types_in_gimple_min_lval (expr));
}
/* Returns true if there is one pointer type in TYPE_POINTER_TO (SRC_OBJ)
{
if (TREE_CODE (rhs1_type) != VECTOR_TYPE
|| !(INTEGRAL_TYPE_P (TREE_TYPE (rhs1_type))
- || FIXED_POINT_TYPE_P (TREE_TYPE (rhs1_type)))
+ || FIXED_POINT_TYPE_P (TREE_TYPE (rhs1_type))
+ || SCALAR_FLOAT_TYPE_P (TREE_TYPE (rhs1_type)))
|| (!INTEGRAL_TYPE_P (rhs2_type)
&& (TREE_CODE (rhs2_type) != VECTOR_TYPE
|| !INTEGRAL_TYPE_P (TREE_TYPE (rhs2_type))))
debug_generic_expr (rhs2_type);
return true;
}
+ /* For shifting a vector of floating point components we
+ only allow shifting by a constant multiple of the element size. */
+ if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (rhs1_type))
+ && (TREE_CODE (rhs2) != INTEGER_CST
+ || !div_if_zero_remainder (EXACT_DIV_EXPR, rhs2,
+ TYPE_SIZE (TREE_TYPE (rhs1_type)))))
+ {
+ error ("non-element sized vector shift of floating point vector");
+ return true;
+ }
return false;
}
}
if (handled_component_p (lhs))
- res |= verify_types_in_gimple_reference (lhs);
+ res |= verify_types_in_gimple_reference (lhs, true);
/* Special codes we cannot handle via their class. */
switch (rhs_code)
return true;
}
- return verify_types_in_gimple_reference (op);
+ return verify_types_in_gimple_reference (op, true);
}
/* tcc_reference */
debug_generic_stmt (rhs1);
return true;
}
- return res || verify_types_in_gimple_reference (rhs1);
+ return res || verify_types_in_gimple_reference (rhs1, false);
/* tcc_constant */
case SSA_NAME:
case GIMPLE_ASM:
return false;
- case GIMPLE_CHANGE_DYNAMIC_TYPE:
- return (!is_gimple_val (gimple_cdt_location (stmt))
- || !POINTER_TYPE_P (TREE_TYPE (gimple_cdt_location (stmt))));
-
case GIMPLE_PHI:
return verify_gimple_phi (stmt);
if (addr)
{
debug_generic_expr (addr);
- inform (input_location, "in statement");
+ inform (gimple_location (gsi_stmt (*gsi)), "in statement");
debug_gimple_stmt (stmt);
return true;
}
if (gimple_bb (stmt) != bb)
{
error ("gimple_bb (stmt) is set to a wrong basic block");
+ debug_gimple_stmt (stmt);
err |= true;
}
}
}
- label = create_artificial_label ();
+ label = create_artificial_label (UNKNOWN_LOCATION);
stmt = gimple_build_label (label);
gsi_insert_before (&s, stmt, GSI_NEW_STMT);
return label;
gsi = gsi_last_bb (bb);
stmt = gsi_end_p (gsi) ? NULL : gsi_stmt (gsi);
- switch (stmt ? gimple_code (stmt) : ERROR_MARK)
+ switch (stmt ? gimple_code (stmt) : GIMPLE_ERROR_MARK)
{
case GIMPLE_COND:
/* For COND_EXPR, we only need to redirect the edge. */
m = XNEW (struct tree_map);
m->hash = DECL_UID (decl);
m->base.from = decl;
- m->to = create_artificial_label ();
+ m->to = create_artificial_label (UNKNOWN_LOCATION);
LABEL_DECL_UID (m->to) = LABEL_DECL_UID (decl);
if (LABEL_DECL_UID (m->to) >= cfun->cfg->last_label_uid)
cfun->cfg->last_label_uid = LABEL_DECL_UID (m->to) + 1;
PROP_no_crit_edges, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
- TODO_dump_func /* todo_flags_finish */
+ TODO_dump_func | TODO_verify_flow /* todo_flags_finish */
}
};
tree type, tree a, tree b, tree c)
{
tree ret;
+ location_t loc = gimple_location (gsi_stmt (*gsi));
- ret = fold_build3 (code, type, a, b, c);
+ ret = fold_build3_loc (loc, code, type, a, b, c);
STRIP_NOPS (ret);
return force_gimple_operand_gsi (gsi, ret, true, NULL, true,
{
tree ret;
- ret = fold_build2 (code, type, a, b);
+ ret = fold_build2_loc (gimple_location (gsi_stmt (*gsi)), code, type, a, b);
STRIP_NOPS (ret);
return force_gimple_operand_gsi (gsi, ret, true, NULL, true,
{
tree ret;
- ret = fold_build1 (code, type, a);
+ ret = fold_build1_loc (gimple_location (gsi_stmt (*gsi)), code, type, a);
STRIP_NOPS (ret);
return force_gimple_operand_gsi (gsi, ret, true, NULL, true,
}
if (location == UNKNOWN_LOCATION)
location = cfun->function_end_locus;
- warning (0, "%H%<noreturn%> function does return", &location);
+ warning_at (location, 0, "%<noreturn%> function does return");
}
/* If we see "return;" in some basic block, then we do reach the end
&& !TREE_THIS_VOLATILE (cfun->decl)
&& EDGE_COUNT (EXIT_BLOCK_PTR->preds) == 0
&& !lang_hooks.missing_noreturn_ok_p (cfun->decl))
- warning (OPT_Wmissing_noreturn, "%Jfunction might be possible candidate "
- "for attribute %<noreturn%>",
- cfun->decl);
+ warning_at (DECL_SOURCE_LOCATION (cfun->decl), OPT_Wmissing_noreturn,
+ "function might be possible candidate "
+ "for attribute %<noreturn%>");
return 0;
}
0 /* todo_flags_finish */
}
};
+
+
+/* Walk a gimplified function and warn for functions whose return value is
+ ignored and attribute((warn_unused_result)) is set. This is done before
+ inlining, so we don't have to worry about that. */
+
+static void
+do_warn_unused_result (gimple_seq seq)
+{
+ tree fdecl, ftype;
+ gimple_stmt_iterator i;
+
+ for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (&i))
+ {
+ gimple g = gsi_stmt (i);
+
+ switch (gimple_code (g))
+ {
+ case GIMPLE_BIND:
+ do_warn_unused_result (gimple_bind_body (g));
+ break;
+ case GIMPLE_TRY:
+ do_warn_unused_result (gimple_try_eval (g));
+ do_warn_unused_result (gimple_try_cleanup (g));
+ break;
+ case GIMPLE_CATCH:
+ do_warn_unused_result (gimple_catch_handler (g));
+ break;
+ case GIMPLE_EH_FILTER:
+ do_warn_unused_result (gimple_eh_filter_failure (g));
+ break;
+
+ case GIMPLE_CALL:
+ if (gimple_call_lhs (g))
+ break;
+
+ /* This is a naked call, as opposed to a GIMPLE_CALL with an
+ LHS. All calls whose value is ignored should be
+ represented like this. Look for the attribute. */
+ fdecl = gimple_call_fndecl (g);
+ ftype = TREE_TYPE (TREE_TYPE (gimple_call_fn (g)));
+
+ if (lookup_attribute ("warn_unused_result", TYPE_ATTRIBUTES (ftype)))
+ {
+ location_t loc = gimple_location (g);
+
+ if (fdecl)
+ warning_at (loc, OPT_Wunused_result,
+ "ignoring return value of %qD, "
+ "declared with attribute warn_unused_result",
+ fdecl);
+ else
+ warning_at (loc, OPT_Wunused_result,
+ "ignoring return value of function "
+ "declared with attribute warn_unused_result");
+ }
+ break;
+
+ default:
+ /* Not a container, not a call, or a call whose value is used. */
+ break;
+ }
+ }
+}
+
+static unsigned int
+run_warn_unused_result (void)
+{
+ do_warn_unused_result (gimple_body (current_function_decl));
+ return 0;
+}
+
+static bool
+gate_warn_unused_result (void)
+{
+ return flag_warn_unused_result;
+}
+
+struct gimple_opt_pass pass_warn_unused_result =
+{
+ {
+ GIMPLE_PASS,
+ "warn_unused_result", /* name */
+ gate_warn_unused_result, /* gate */
+ run_warn_unused_result, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_NONE, /* tv_id */
+ PROP_gimple_any, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0, /* todo_flags_finish */
+ }
+};
+