/* Indirection-related instrumentation. */
static void mf_decl_cache_locals (void);
static void mf_decl_clear_locals (void);
-static void mf_xform_derefs (void);
+static void mf_xform_statements (void);
static unsigned int execute_mudflap_function_ops (void);
/* Addressable variables instrumentation. */
/* ------------------------------------------------------------------------ */
-/* Memory reference transforms. Perform the mudflap indirection-related
- tree transforms on the current function.
-
- This is the second part of the mudflap instrumentation. It works on
+/* This is the second part of the mudflap instrumentation. It works on
low-level GIMPLE using the CFG, because we want to run this pass after
tree optimizations have been performed, but we have to preserve the CFG
- for expansion from trees to RTL. */
+ for expansion from trees to RTL.
+ Below is the list of transformations performed on statements in the
+ current function.
+
+ 1) Memory reference transforms: Perform the mudflap indirection-related
+ tree transforms on memory references.
+
+ 2) Mark BUILTIN_ALLOCA calls not inlineable.
+
+ */
static unsigned int
execute_mudflap_function_ops (void)
if (! flag_mudflap_threads)
mf_decl_cache_locals ();
- mf_xform_derefs ();
+ mf_xform_statements ();
if (! flag_mudflap_threads)
mf_decl_clear_locals ();
/* Build: __mf_base = (uintptr_t) <base address expression>. */
seq = gimple_seq_alloc ();
- t = fold_convert (mf_uintptr_type, unshare_expr (base));
+ t = fold_convert_loc (location, mf_uintptr_type,
+ unshare_expr (base));
t = force_gimple_operand (t, &stmts, false, NULL_TREE);
gimple_seq_add_seq (&seq, stmts);
g = gimple_build_assign (mf_base, t);
gimple_seq_add_stmt (&seq, g);
/* Build: __mf_limit = (uintptr_t) <limit address expression>. */
- t = fold_convert (mf_uintptr_type, unshare_expr (limit));
+ t = fold_convert_loc (location, mf_uintptr_type,
+ unshare_expr (limit));
t = force_gimple_operand (t, &stmts, false, NULL_TREE);
gimple_seq_add_seq (&seq, stmts);
g = gimple_build_assign (mf_limit, t);
/* u is a string, so it is already a gimple value. */
u = mf_file_function_line_tree (location);
/* NB: we pass the overall [base..limit] range to mf_check. */
- v = fold_build2 (PLUS_EXPR, mf_uintptr_type,
- fold_build2 (MINUS_EXPR, mf_uintptr_type, mf_limit, mf_base),
+ v = fold_build2_loc (location, PLUS_EXPR, mf_uintptr_type,
+ fold_build2_loc (location,
+ MINUS_EXPR, mf_uintptr_type, mf_limit, mf_base),
build_int_cst (mf_uintptr_type, 1));
v = force_gimple_operand (v, &stmts, true, NULL_TREE);
gimple_seq_add_seq (&seq, stmts);
is necessary. Or we may have an innocent "a.b.c"
expression that must not be instrumented. We need to
recurse all the way down the nesting structure to figure it
- out: looking just at the outer node is not enough. */
+ out: looking just at the outer node is not enough. */
tree var;
int component_ref_only = (TREE_CODE (t) == COMPONENT_REF);
/* If we have a bitfield component reference, we must note the
&& (TREE_CODE (var) == ARRAY_REF
|| TREE_CODE (var) == COMPONENT_REF))
elt = var;
-
+
if (TREE_CODE (var) == ARRAY_REF)
{
component_ref_only = 0;
&& TREE_CODE (var) != STRING_CST)
return;
}
- else
+ else
{
- gcc_assert (TREE_CODE (var) == VAR_DECL
+ gcc_assert (TREE_CODE (var) == VAR_DECL
|| TREE_CODE (var) == PARM_DECL
|| TREE_CODE (var) == RESULT_DECL
|| TREE_CODE (var) == STRING_CST);
if (TREE_CODE (DECL_SIZE_UNIT (field)) == INTEGER_CST)
size = DECL_SIZE_UNIT (field);
-
+
if (elt)
elt = build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (elt)),
elt);
- addr = fold_convert (ptr_type_node, elt ? elt : base);
- addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node,
- addr, fold_convert (sizetype,
- byte_position (field)));
+ addr = fold_convert_loc (location, ptr_type_node, elt ? elt : base);
+ addr = fold_build2_loc (location, POINTER_PLUS_EXPR, ptr_type_node,
+ addr, fold_convert_loc (location, sizetype,
+ byte_position (field)));
}
else
addr = build1 (ADDR_EXPR, build_pointer_type (type), t);
- limit = fold_build2 (MINUS_EXPR, mf_uintptr_type,
- fold_build2 (PLUS_EXPR, mf_uintptr_type,
+ limit = fold_build2_loc (location, MINUS_EXPR, mf_uintptr_type,
+ fold_build2_loc (location, PLUS_EXPR, mf_uintptr_type,
convert (mf_uintptr_type, addr),
size),
integer_one_node);
case INDIRECT_REF:
addr = TREE_OPERAND (t, 0);
base = addr;
- limit = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node,
- fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, base,
+ limit = fold_build2_loc (location, POINTER_PLUS_EXPR, ptr_type_node,
+ fold_build2_loc (location,
+ POINTER_PLUS_EXPR, ptr_type_node, base,
size),
size_int (-1));
break;
case TARGET_MEM_REF:
addr = tree_mem_ref_addr (ptr_type_node, t);
base = addr;
- limit = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node,
- fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, base,
+ limit = fold_build2_loc (location, POINTER_PLUS_EXPR, ptr_type_node,
+ fold_build2_loc (location,
+ POINTER_PLUS_EXPR, ptr_type_node, base,
size),
size_int (-1));
break;
bpu = bitsize_int (BITS_PER_UNIT);
ofs = convert (bitsizetype, TREE_OPERAND (t, 2));
- rem = size_binop (TRUNC_MOD_EXPR, ofs, bpu);
- ofs = fold_convert (sizetype, size_binop (TRUNC_DIV_EXPR, ofs, bpu));
+ rem = size_binop_loc (location, TRUNC_MOD_EXPR, ofs, bpu);
+ ofs = fold_convert_loc (location,
+ sizetype,
+ size_binop_loc (location,
+ TRUNC_DIV_EXPR, ofs, bpu));
size = convert (bitsizetype, TREE_OPERAND (t, 1));
- size = size_binop (PLUS_EXPR, size, rem);
- size = size_binop (CEIL_DIV_EXPR, size, bpu);
+ size = size_binop_loc (location, PLUS_EXPR, size, rem);
+ size = size_binop_loc (location, CEIL_DIV_EXPR, size, bpu);
size = convert (sizetype, size);
addr = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
addr = convert (ptr_type_node, addr);
- addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, addr, ofs);
+ addr = fold_build2_loc (location, POINTER_PLUS_EXPR,
+ ptr_type_node, addr, ofs);
base = addr;
- limit = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node,
- fold_build2 (POINTER_PLUS_EXPR, ptr_type_node,
+ limit = fold_build2_loc (location, POINTER_PLUS_EXPR, ptr_type_node,
+ fold_build2_loc (location,
+ POINTER_PLUS_EXPR, ptr_type_node,
base, size),
size_int (-1));
}
mf_build_check_statement_for (base, limit, iter, location, dirflag);
}
-
+/* Transform
+ 1) Memory references.
+ 2) BUILTIN_ALLOCA calls.
+*/
static void
-mf_xform_derefs (void)
+mf_xform_statements (void)
{
basic_block bb, next;
gimple_stmt_iterator i;
}
break;
+ case GIMPLE_CALL:
+ {
+ tree fndecl = gimple_call_fndecl (s);
+ if (fndecl && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_ALLOCA))
+ gimple_call_set_cannot_inline (s, true);
+ }
+ break;
+
default:
;
}
while (decl != NULL_TREE)
{
- if (mf_decl_eligible_p (decl)
+ if (mf_decl_eligible_p (decl)
/* Not already processed. */
&& ! mf_marked_p (decl)
/* Automatic variable. */
gimplified when we got here. */
size = convert (size_type_node, TYPE_SIZE_UNIT (TREE_TYPE (decl)));
gcc_assert (is_gimple_val (size));
-
+
unregister_fncall_param =
mf_mark (build1 (ADDR_EXPR,
size,
build_int_cst (NULL_TREE, 3),
variable_name);
-
+
/* Accumulate the two calls. */
gimple_set_location (register_fncall, location);
arg,
convert (size_type_node, object_size),
/* __MF_TYPE_STATIC */
- build_int_cst (NULL_TREE, 4),
+ build_int_cst (NULL_TREE, 4),
varname);
append_to_statement_list (call_stmt, &enqueued_call_stmt_chain);
tree call2_stmt = build_call_expr (mf_init_fndecl, 0);
append_to_statement_list (call2_stmt, &ctor_statements);
}
-
+
/* If appropriate, call __mf_set_options to pass along read-ignore mode. */
if (flag_mudflap_ignore_reads)
{
DECL_NAME (obj));
continue;
}
-
- mudflap_register_call (obj,
+
+ mudflap_register_call (obj,
size_in_bytes (TREE_TYPE (obj)),
mf_varname_tree (obj));
}
enqueued_call_stmt_chain = NULL_TREE;
}
- cgraph_build_static_cdtor ('I', ctor_statements,
+ cgraph_build_static_cdtor ('I', ctor_statements,
MAX_RESERVED_INIT_PRIORITY-1);
}
return flag_mudflap != 0;
}
-struct gimple_opt_pass pass_mudflap_1 =
+struct gimple_opt_pass pass_mudflap_1 =
{
{
GIMPLE_PASS,
}
};
-struct gimple_opt_pass pass_mudflap_2 =
+struct gimple_opt_pass pass_mudflap_2 =
{
{
GIMPLE_PASS,