/* Mudflap: narrow-pointer bounds-checking by tree rewriting.
- Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
Free Software Foundation, Inc.
Contributed by Frank Ch. Eigler <fche@redhat.com>
and Graydon Hoare <graydon@redhat.com>
/* Indirection-related instrumentation. */
static void mf_decl_cache_locals (void);
static void mf_decl_clear_locals (void);
-static void mf_xform_derefs (void);
+static void mf_xform_statements (void);
static unsigned int execute_mudflap_function_ops (void);
/* Addressable variables instrumentation. */
static inline tree
mf_make_builtin (enum tree_code category, const char *name, tree type)
{
- tree decl = mf_mark (build_decl (category, get_identifier (name), type));
+ tree decl = mf_mark (build_decl (UNKNOWN_LOCATION,
+ category, get_identifier (name), type));
TREE_PUBLIC (decl) = 1;
DECL_EXTERNAL (decl) = 1;
lang_hooks.decls.pushdecl (decl);
/* There is, abominably, no language-independent way to construct a
RECORD_TYPE. So we have to call the basic type construction
primitives by hand. */
- tree fieldlo = build_decl (FIELD_DECL, get_identifier ("low"), field_type);
- tree fieldhi = build_decl (FIELD_DECL, get_identifier ("high"), field_type);
+ tree fieldlo = build_decl (UNKNOWN_LOCATION,
+ FIELD_DECL, get_identifier ("low"), field_type);
+ tree fieldhi = build_decl (UNKNOWN_LOCATION,
+ FIELD_DECL, get_identifier ("high"), field_type);
tree struct_type = make_node (RECORD_TYPE);
DECL_CONTEXT (fieldlo) = struct_type;
/* ------------------------------------------------------------------------ */
-/* Memory reference transforms. Perform the mudflap indirection-related
- tree transforms on the current function.
-
- This is the second part of the mudflap instrumentation. It works on
+/* This is the second part of the mudflap instrumentation. It works on
low-level GIMPLE using the CFG, because we want to run this pass after
tree optimizations have been performed, but we have to preserve the CFG
- for expansion from trees to RTL. */
+ for expansion from trees to RTL.
+ Below is the list of transformations performed on statements in the
+ current function.
+
+ 1) Memory reference transforms: Perform the mudflap indirection-related
+ tree transforms on memory references.
+
+ 2) Mark BUILTIN_ALLOCA calls not inlineable.
+
+ */
static unsigned int
execute_mudflap_function_ops (void)
if (! flag_mudflap_threads)
mf_decl_cache_locals ();
- mf_xform_derefs ();
+ mf_xform_statements ();
if (! flag_mudflap_threads)
mf_decl_clear_locals ();
return 0;
}
+/* Insert a gimple_seq SEQ on all the outgoing edges out of BB. Note that
+ if BB has more than one edge, STMT will be replicated for each edge.
+ Also, abnormal edges will be ignored. */
+
+static void
+insert_edge_copies_seq (gimple_seq seq, basic_block bb)
+{
+ edge e;
+ edge_iterator ei;
+ unsigned n_copies = -1;
+
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ if (!(e->flags & EDGE_ABNORMAL))
+ n_copies++;
+
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ if (!(e->flags & EDGE_ABNORMAL))
+ gsi_insert_seq_on_edge (e, n_copies-- > 0 ? gimple_seq_copy (seq) : seq);
+}
+
/* Create and initialize local shadow variables for the lookup cache
globals. Put their decls in the *_l globals for use by
mf_build_check_statement_for. */
/* Build the cache vars. */
mf_cache_shift_decl_l
- = mf_mark (create_tmp_var (TREE_TYPE (mf_cache_shift_decl),
+ = mf_mark (make_rename_temp (TREE_TYPE (mf_cache_shift_decl),
"__mf_lookup_shift_l"));
mf_cache_mask_decl_l
- = mf_mark (create_tmp_var (TREE_TYPE (mf_cache_mask_decl),
+ = mf_mark (make_rename_temp (TREE_TYPE (mf_cache_mask_decl),
"__mf_lookup_mask_l"));
/* Build initialization nodes for the cache vars. We just load the
}
/* Build our local variables. */
- mf_elem = create_tmp_var (mf_cache_structptr_type, "__mf_elem");
- mf_base = create_tmp_var (mf_uintptr_type, "__mf_base");
- mf_limit = create_tmp_var (mf_uintptr_type, "__mf_limit");
+ mf_elem = make_rename_temp (mf_cache_structptr_type, "__mf_elem");
+ mf_base = make_rename_temp (mf_uintptr_type, "__mf_base");
+ mf_limit = make_rename_temp (mf_uintptr_type, "__mf_limit");
/* Build: __mf_base = (uintptr_t) <base address expression>. */
seq = gimple_seq_alloc ();
- t = fold_convert (mf_uintptr_type, unshare_expr (base));
+ t = fold_convert_loc (location, mf_uintptr_type,
+ unshare_expr (base));
t = force_gimple_operand (t, &stmts, false, NULL_TREE);
gimple_seq_add_seq (&seq, stmts);
g = gimple_build_assign (mf_base, t);
gimple_seq_add_stmt (&seq, g);
/* Build: __mf_limit = (uintptr_t) <limit address expression>. */
- t = fold_convert (mf_uintptr_type, unshare_expr (limit));
+ t = fold_convert_loc (location, mf_uintptr_type,
+ unshare_expr (limit));
t = force_gimple_operand (t, &stmts, false, NULL_TREE);
gimple_seq_add_seq (&seq, stmts);
g = gimple_build_assign (mf_limit, t);
t = build2 (TRUTH_OR_EXPR, boolean_type_node, t, u);
t = force_gimple_operand (t, &stmts, false, NULL_TREE);
gimple_seq_add_seq (&seq, stmts);
- cond = create_tmp_var (boolean_type_node, "__mf_unlikely_cond");
+ cond = make_rename_temp (boolean_type_node, "__mf_unlikely_cond");
g = gimple_build_assign (cond, t);
gimple_set_location (g, location);
gimple_seq_add_stmt (&seq, g);
/* u is a string, so it is already a gimple value. */
u = mf_file_function_line_tree (location);
/* NB: we pass the overall [base..limit] range to mf_check. */
- v = fold_build2 (PLUS_EXPR, mf_uintptr_type,
- fold_build2 (MINUS_EXPR, mf_uintptr_type, mf_limit, mf_base),
+ v = fold_build2_loc (location, PLUS_EXPR, mf_uintptr_type,
+ fold_build2_loc (location,
+ MINUS_EXPR, mf_uintptr_type, mf_limit, mf_base),
build_int_cst (mf_uintptr_type, 1));
v = force_gimple_operand (v, &stmts, true, NULL_TREE);
gimple_seq_add_seq (&seq, stmts);
is necessary. Or we may have an innocent "a.b.c"
expression that must not be instrumented. We need to
recurse all the way down the nesting structure to figure it
- out: looking just at the outer node is not enough. */
+ out: looking just at the outer node is not enough. */
tree var;
int component_ref_only = (TREE_CODE (t) == COMPONENT_REF);
/* If we have a bitfield component reference, we must note the
&& (TREE_CODE (var) == ARRAY_REF
|| TREE_CODE (var) == COMPONENT_REF))
elt = var;
-
+
if (TREE_CODE (var) == ARRAY_REF)
{
component_ref_only = 0;
&& TREE_CODE (var) != STRING_CST)
return;
}
- else
+ else
{
- gcc_assert (TREE_CODE (var) == VAR_DECL
+ gcc_assert (TREE_CODE (var) == VAR_DECL
|| TREE_CODE (var) == PARM_DECL
|| TREE_CODE (var) == RESULT_DECL
|| TREE_CODE (var) == STRING_CST);
if (TREE_CODE (DECL_SIZE_UNIT (field)) == INTEGER_CST)
size = DECL_SIZE_UNIT (field);
-
+
if (elt)
elt = build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (elt)),
elt);
- addr = fold_convert (ptr_type_node, elt ? elt : base);
- addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node,
- addr, fold_convert (sizetype,
- byte_position (field)));
+ addr = fold_convert_loc (location, ptr_type_node, elt ? elt : base);
+ addr = fold_build2_loc (location, POINTER_PLUS_EXPR, ptr_type_node,
+ addr, fold_convert_loc (location, sizetype,
+ byte_position (field)));
}
else
addr = build1 (ADDR_EXPR, build_pointer_type (type), t);
- limit = fold_build2 (MINUS_EXPR, mf_uintptr_type,
- fold_build2 (PLUS_EXPR, mf_uintptr_type,
+ limit = fold_build2_loc (location, MINUS_EXPR, mf_uintptr_type,
+ fold_build2_loc (location, PLUS_EXPR, mf_uintptr_type,
convert (mf_uintptr_type, addr),
size),
integer_one_node);
case INDIRECT_REF:
addr = TREE_OPERAND (t, 0);
base = addr;
- limit = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node,
- fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, base,
+ limit = fold_build2_loc (location, POINTER_PLUS_EXPR, ptr_type_node,
+ fold_build2_loc (location,
+ POINTER_PLUS_EXPR, ptr_type_node, base,
size),
size_int (-1));
break;
case TARGET_MEM_REF:
addr = tree_mem_ref_addr (ptr_type_node, t);
base = addr;
- limit = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node,
- fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, base,
+ limit = fold_build2_loc (location, POINTER_PLUS_EXPR, ptr_type_node,
+ fold_build2_loc (location,
+ POINTER_PLUS_EXPR, ptr_type_node, base,
size),
size_int (-1));
break;
bpu = bitsize_int (BITS_PER_UNIT);
ofs = convert (bitsizetype, TREE_OPERAND (t, 2));
- rem = size_binop (TRUNC_MOD_EXPR, ofs, bpu);
- ofs = fold_convert (sizetype, size_binop (TRUNC_DIV_EXPR, ofs, bpu));
+ rem = size_binop_loc (location, TRUNC_MOD_EXPR, ofs, bpu);
+ ofs = fold_convert_loc (location,
+ sizetype,
+ size_binop_loc (location,
+ TRUNC_DIV_EXPR, ofs, bpu));
size = convert (bitsizetype, TREE_OPERAND (t, 1));
- size = size_binop (PLUS_EXPR, size, rem);
- size = size_binop (CEIL_DIV_EXPR, size, bpu);
+ size = size_binop_loc (location, PLUS_EXPR, size, rem);
+ size = size_binop_loc (location, CEIL_DIV_EXPR, size, bpu);
size = convert (sizetype, size);
addr = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
addr = convert (ptr_type_node, addr);
- addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, addr, ofs);
+ addr = fold_build2_loc (location, POINTER_PLUS_EXPR,
+ ptr_type_node, addr, ofs);
base = addr;
- limit = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node,
- fold_build2 (POINTER_PLUS_EXPR, ptr_type_node,
+ limit = fold_build2_loc (location, POINTER_PLUS_EXPR, ptr_type_node,
+ fold_build2_loc (location,
+ POINTER_PLUS_EXPR, ptr_type_node,
base, size),
size_int (-1));
}
mf_build_check_statement_for (base, limit, iter, location, dirflag);
}
-
+/* Transform
+ 1) Memory references.
+ 2) BUILTIN_ALLOCA calls.
+*/
static void
-mf_xform_derefs (void)
+mf_xform_statements (void)
{
basic_block bb, next;
gimple_stmt_iterator i;
}
break;
+ case GIMPLE_CALL:
+ {
+ tree fndecl = gimple_call_fndecl (s);
+ if (fndecl && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_ALLOCA))
+ gimple_call_set_cannot_inline (s, true);
+ }
+ break;
+
default:
;
}
while (decl != NULL_TREE)
{
- if (mf_decl_eligible_p (decl)
+ if (mf_decl_eligible_p (decl)
/* Not already processed. */
&& ! mf_marked_p (decl)
/* Automatic variable. */
gimplified when we got here. */
size = convert (size_type_node, TYPE_SIZE_UNIT (TREE_TYPE (decl)));
gcc_assert (is_gimple_val (size));
-
+
unregister_fncall_param =
mf_mark (build1 (ADDR_EXPR,
size,
build_int_cst (NULL_TREE, 3),
variable_name);
-
+
/* Accumulate the two calls. */
gimple_set_location (register_fncall, location);
{
if (!DECL_ARTIFICIAL (decl))
warning (OPT_Wmudflap,
- "mudflap cannot track %qs in stub function",
- IDENTIFIER_POINTER (DECL_NAME (decl)));
+ "mudflap cannot track %qE in stub function",
+ DECL_NAME (decl));
}
else
{
arg,
convert (size_type_node, object_size),
/* __MF_TYPE_STATIC */
- build_int_cst (NULL_TREE, 4),
+ build_int_cst (NULL_TREE, 4),
varname);
append_to_statement_list (call_stmt, &enqueued_call_stmt_chain);
tree call2_stmt = build_call_expr (mf_init_fndecl, 0);
append_to_statement_list (call2_stmt, &ctor_statements);
}
-
+
/* If appropriate, call __mf_set_options to pass along read-ignore mode. */
if (flag_mudflap_ignore_reads)
{
if (! COMPLETE_TYPE_P (TREE_TYPE (obj)))
{
warning (OPT_Wmudflap,
- "mudflap cannot track unknown size extern %qs",
- IDENTIFIER_POINTER (DECL_NAME (obj)));
+ "mudflap cannot track unknown size extern %qE",
+ DECL_NAME (obj));
continue;
}
-
- mudflap_register_call (obj,
+
+ mudflap_register_call (obj,
size_in_bytes (TREE_TYPE (obj)),
mf_varname_tree (obj));
}
enqueued_call_stmt_chain = NULL_TREE;
}
- cgraph_build_static_cdtor ('I', ctor_statements,
+ cgraph_build_static_cdtor ('I', ctor_statements,
MAX_RESERVED_INIT_PRIORITY-1);
}
return flag_mudflap != 0;
}
-struct gimple_opt_pass pass_mudflap_1 =
+struct gimple_opt_pass pass_mudflap_1 =
{
{
GIMPLE_PASS,
NULL, /* sub */
NULL, /* next */
0, /* static_pass_number */
- 0, /* tv_id */
+ TV_NONE, /* tv_id */
PROP_gimple_any, /* properties_required */
0, /* properties_provided */
0, /* properties_destroyed */
}
};
-struct gimple_opt_pass pass_mudflap_2 =
+struct gimple_opt_pass pass_mudflap_2 =
{
{
GIMPLE_PASS,
NULL, /* sub */
NULL, /* next */
0, /* static_pass_number */
- 0, /* tv_id */
- PROP_gimple_leh, /* properties_required */
+ TV_NONE, /* tv_id */
+ PROP_ssa | PROP_cfg | PROP_gimple_leh,/* properties_required */
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
TODO_verify_flow | TODO_verify_stmts
- | TODO_dump_func /* todo_flags_finish */
+ | TODO_dump_func | TODO_update_ssa /* todo_flags_finish */
}
};