if (optimize_bb_for_speed_p (bb)
&& occ->recip_def && use_stmt != occ->recip_def_stmt)
{
+ gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
gimple_assign_set_rhs_code (use_stmt, MULT_EXPR);
SET_USE (use_p, occ->recip_def);
- fold_stmt_inplace (use_stmt);
+ fold_stmt_inplace (&gsi);
update_stmt (use_stmt);
}
}
FOR_EACH_IMM_USE_STMT (stmt, ui, arg1)
{
+ gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
gimple_assign_set_rhs_code (stmt, MULT_EXPR);
- fold_stmt_inplace (stmt);
+ fold_stmt_inplace (&gsi);
update_stmt (stmt);
}
}
if (sizeof (HOST_WIDEST_INT) < 8)
return 0;
- bswap32_p = (built_in_decls[BUILT_IN_BSWAP32]
+ bswap32_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP32)
&& optab_handler (bswap_optab, SImode) != CODE_FOR_nothing);
- bswap64_p = (built_in_decls[BUILT_IN_BSWAP64]
+ bswap64_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP64)
&& (optab_handler (bswap_optab, DImode) != CODE_FOR_nothing
|| (bswap32_p && word_mode == SImode)));
assumes that the return and argument type are the same. */
if (bswap32_p)
{
- tree fndecl = built_in_decls[BUILT_IN_BSWAP32];
+ tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
bswap32_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
}
if (bswap64_p)
{
- tree fndecl = built_in_decls[BUILT_IN_BSWAP64];
+ tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
bswap64_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
}
case 32:
if (bswap32_p)
{
- fndecl = built_in_decls[BUILT_IN_BSWAP32];
+ fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
bswap_type = bswap32_type;
}
break;
case 64:
if (bswap64_p)
{
- fndecl = built_in_decls[BUILT_IN_BSWAP64];
+ fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
bswap_type = bswap64_type;
}
break;
}
};
-/* Return true if RHS is a suitable operand for a widening multiplication.
+/* Return true if RHS is a suitable operand for a widening multiplication,
+ assuming a target type of TYPE.
There are two cases:
- RHS makes some value at least twice as wide. Store that value
but leave *TYPE_OUT untouched. */
static bool
-is_widening_mult_rhs_p (tree rhs, tree *type_out, tree *new_rhs_out)
+is_widening_mult_rhs_p (tree type, tree rhs, tree *type_out,
+ tree *new_rhs_out)
{
gimple stmt;
- tree type, type1, rhs1;
+ tree type1, rhs1;
enum tree_code rhs_code;
if (TREE_CODE (rhs) == SSA_NAME)
{
- type = TREE_TYPE (rhs);
stmt = SSA_NAME_DEF_STMT (rhs);
- if (!is_gimple_assign (stmt))
- return false;
+ if (is_gimple_assign (stmt))
+ {
+ rhs_code = gimple_assign_rhs_code (stmt);
+ if (TREE_CODE (type) == INTEGER_TYPE
+ ? !CONVERT_EXPR_CODE_P (rhs_code)
+ : rhs_code != FIXED_CONVERT_EXPR)
+ rhs1 = rhs;
+ else
+ {
+ rhs1 = gimple_assign_rhs1 (stmt);
- rhs_code = gimple_assign_rhs_code (stmt);
- if (TREE_CODE (type) == INTEGER_TYPE
- ? !CONVERT_EXPR_CODE_P (rhs_code)
- : rhs_code != FIXED_CONVERT_EXPR)
- return false;
+ if (TREE_CODE (rhs1) == INTEGER_CST)
+ {
+ *new_rhs_out = rhs1;
+ *type_out = NULL;
+ return true;
+ }
+ }
+ }
+ else
+ rhs1 = rhs;
- rhs1 = gimple_assign_rhs1 (stmt);
type1 = TREE_TYPE (rhs1);
+
if (TREE_CODE (type1) != TREE_CODE (type)
|| TYPE_PRECISION (type1) * 2 > TYPE_PRECISION (type))
return false;
return false;
}
-/* Return true if STMT performs a widening multiplication. If so,
- store the unwidened types of the operands in *TYPE1_OUT and *TYPE2_OUT
- respectively. Also fill *RHS1_OUT and *RHS2_OUT such that converting
- those operands to types *TYPE1_OUT and *TYPE2_OUT would give the
- operands of the multiplication. */
+/* Return true if STMT performs a widening multiplication, assuming the
+ output type is TYPE. If so, store the unwidened types of the operands
+ in *TYPE1_OUT and *TYPE2_OUT respectively. Also fill *RHS1_OUT and
+ *RHS2_OUT such that converting those operands to types *TYPE1_OUT
+ and *TYPE2_OUT would give the operands of the multiplication. */
static bool
is_widening_mult_p (gimple stmt,
tree *type1_out, tree *rhs1_out,
tree *type2_out, tree *rhs2_out)
{
- tree type;
+ tree type = TREE_TYPE (gimple_assign_lhs (stmt));
- type = TREE_TYPE (gimple_assign_lhs (stmt));
if (TREE_CODE (type) != INTEGER_TYPE
&& TREE_CODE (type) != FIXED_POINT_TYPE)
return false;
- if (!is_widening_mult_rhs_p (gimple_assign_rhs1 (stmt), type1_out, rhs1_out))
+ if (!is_widening_mult_rhs_p (type, gimple_assign_rhs1 (stmt), type1_out,
+ rhs1_out))
return false;
- if (!is_widening_mult_rhs_p (gimple_assign_rhs2 (stmt), type2_out, rhs2_out))
+ if (!is_widening_mult_rhs_p (type, gimple_assign_rhs2 (stmt), type2_out,
+ rhs2_out))
return false;
if (*type1_out == NULL)
*type2_out = *type1_out;
}
- /* FIXME: remove this restriction. */
- if (TYPE_PRECISION (*type1_out) != TYPE_PRECISION (*type2_out))
- return false;
+ /* Ensure that the larger of the two operands comes first. */
+ if (TYPE_PRECISION (*type1_out) < TYPE_PRECISION (*type2_out))
+ {
+ tree tmp;
+ tmp = *type1_out;
+ *type1_out = *type2_out;
+ *type2_out = tmp;
+ tmp = *rhs1_out;
+ *rhs1_out = *rhs2_out;
+ *rhs2_out = tmp;
+ }
return true;
}
static bool
convert_mult_to_widen (gimple stmt, gimple_stmt_iterator *gsi)
{
- tree lhs, rhs1, rhs2, type, type1, type2, tmp;
+ tree lhs, rhs1, rhs2, type, type1, type2, tmp = NULL;
enum insn_code handler;
enum machine_mode to_mode, from_mode, actual_mode;
optab op;
int actual_precision;
location_t loc = gimple_location (stmt);
+ bool from_unsigned1, from_unsigned2;
lhs = gimple_assign_lhs (stmt);
type = TREE_TYPE (lhs);
to_mode = TYPE_MODE (type);
from_mode = TYPE_MODE (type1);
+ from_unsigned1 = TYPE_UNSIGNED (type1);
+ from_unsigned2 = TYPE_UNSIGNED (type2);
- if (TYPE_UNSIGNED (type1) && TYPE_UNSIGNED (type2))
+ if (from_unsigned1 && from_unsigned2)
op = umul_widen_optab;
- else if (!TYPE_UNSIGNED (type1) && !TYPE_UNSIGNED (type2))
+ else if (!from_unsigned1 && !from_unsigned2)
op = smul_widen_optab;
else
op = usmul_widen_optab;
0, &actual_mode);
if (handler == CODE_FOR_nothing)
- return false;
+ {
+ if (op != smul_widen_optab)
+ {
+ /* We can use a signed multiply with unsigned types as long as
+ there is a wider mode to use, or it is the smaller of the two
+ types that is unsigned. Note that type1 >= type2, always. */
+ if ((TYPE_UNSIGNED (type1)
+ && TYPE_PRECISION (type1) == GET_MODE_PRECISION (from_mode))
+ || (TYPE_UNSIGNED (type2)
+ && TYPE_PRECISION (type2) == GET_MODE_PRECISION (from_mode)))
+ {
+ from_mode = GET_MODE_WIDER_MODE (from_mode);
+ if (GET_MODE_SIZE (to_mode) <= GET_MODE_SIZE (from_mode))
+ return false;
+ }
+
+ op = smul_widen_optab;
+ handler = find_widening_optab_handler_and_mode (op, to_mode,
+ from_mode, 0,
+ &actual_mode);
+
+ if (handler == CODE_FOR_nothing)
+ return false;
+
+ from_unsigned1 = from_unsigned2 = false;
+ }
+ else
+ return false;
+ }
/* Ensure that the inputs to the handler are in the correct precison
for the opcode. This will be the full mode size. */
actual_precision = GET_MODE_PRECISION (actual_mode);
- if (actual_precision != TYPE_PRECISION (type1))
+ if (actual_precision != TYPE_PRECISION (type1)
+ || from_unsigned1 != TYPE_UNSIGNED (type1))
{
tmp = create_tmp_var (build_nonstandard_integer_type
- (actual_precision, TYPE_UNSIGNED (type1)),
+ (actual_precision, from_unsigned1),
NULL);
rhs1 = build_and_insert_cast (gsi, loc, tmp, rhs1);
-
+ }
+ if (actual_precision != TYPE_PRECISION (type2)
+ || from_unsigned2 != TYPE_UNSIGNED (type2))
+ {
/* Reuse the same type info, if possible. */
- if (TYPE_UNSIGNED (type1) != TYPE_UNSIGNED (type2))
+ if (!tmp || from_unsigned1 != from_unsigned2)
tmp = create_tmp_var (build_nonstandard_integer_type
- (actual_precision, TYPE_UNSIGNED (type2)),
+ (actual_precision, from_unsigned2),
NULL);
rhs2 = build_and_insert_cast (gsi, loc, tmp, rhs2);
}
+ /* Handle constants. */
+ if (TREE_CODE (rhs1) == INTEGER_CST)
+ rhs1 = fold_convert (type1, rhs1);
+ if (TREE_CODE (rhs2) == INTEGER_CST)
+ rhs2 = fold_convert (type2, rhs2);
+
gimple_assign_set_rhs1 (stmt, rhs1);
gimple_assign_set_rhs2 (stmt, rhs2);
gimple_assign_set_rhs_code (stmt, WIDEN_MULT_EXPR);
enum tree_code code)
{
gimple rhs1_stmt = NULL, rhs2_stmt = NULL;
- tree type, type1, type2, tmp;
+ gimple conv1_stmt = NULL, conv2_stmt = NULL, conv_stmt;
+ tree type, type1, type2, optype, tmp = NULL;
tree lhs, rhs1, rhs2, mult_rhs1, mult_rhs2, add_rhs;
enum tree_code rhs1_code = ERROR_MARK, rhs2_code = ERROR_MARK;
optab this_optab;
enum machine_mode to_mode, from_mode, actual_mode;
location_t loc = gimple_location (stmt);
int actual_precision;
+ bool from_unsigned1, from_unsigned2;
lhs = gimple_assign_lhs (stmt);
type = TREE_TYPE (lhs);
if (is_gimple_assign (rhs1_stmt))
rhs1_code = gimple_assign_rhs_code (rhs1_stmt);
}
- else
- return false;
if (TREE_CODE (rhs2) == SSA_NAME)
{
if (is_gimple_assign (rhs2_stmt))
rhs2_code = gimple_assign_rhs_code (rhs2_stmt);
}
- else
- return false;
+
+ /* Allow for one conversion statement between the multiply
+ and addition/subtraction statement. If there are more than
+ one conversions then we assume they would invalidate this
+ transformation. If that's not the case then they should have
+ been folded before now. */
+ if (CONVERT_EXPR_CODE_P (rhs1_code))
+ {
+ conv1_stmt = rhs1_stmt;
+ rhs1 = gimple_assign_rhs1 (rhs1_stmt);
+ if (TREE_CODE (rhs1) == SSA_NAME)
+ {
+ rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
+ if (is_gimple_assign (rhs1_stmt))
+ rhs1_code = gimple_assign_rhs_code (rhs1_stmt);
+ }
+ else
+ return false;
+ }
+ if (CONVERT_EXPR_CODE_P (rhs2_code))
+ {
+ conv2_stmt = rhs2_stmt;
+ rhs2 = gimple_assign_rhs1 (rhs2_stmt);
+ if (TREE_CODE (rhs2) == SSA_NAME)
+ {
+ rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
+ if (is_gimple_assign (rhs2_stmt))
+ rhs2_code = gimple_assign_rhs_code (rhs2_stmt);
+ }
+ else
+ return false;
+ }
/* If code is WIDEN_MULT_EXPR then it would seem unnecessary to call
is_widening_mult_p, but we still need the rhs returns.
&type2, &mult_rhs2))
return false;
add_rhs = rhs2;
+ conv_stmt = conv1_stmt;
}
else if (rhs2_code == MULT_EXPR || rhs2_code == WIDEN_MULT_EXPR)
{
&type2, &mult_rhs2))
return false;
add_rhs = rhs1;
+ conv_stmt = conv2_stmt;
}
else
return false;
to_mode = TYPE_MODE (type);
from_mode = TYPE_MODE (type1);
+ from_unsigned1 = TYPE_UNSIGNED (type1);
+ from_unsigned2 = TYPE_UNSIGNED (type2);
+ optype = type1;
- if (TYPE_UNSIGNED (type1) != TYPE_UNSIGNED (type2))
- return false;
+ /* There's no such thing as a mixed sign madd yet, so use a wider mode. */
+ if (from_unsigned1 != from_unsigned2)
+ {
+ if (!INTEGRAL_TYPE_P (type))
+ return false;
+ /* We can use a signed multiply with unsigned types as long as
+ there is a wider mode to use, or it is the smaller of the two
+ types that is unsigned. Note that type1 >= type2, always. */
+ if ((from_unsigned1
+ && TYPE_PRECISION (type1) == GET_MODE_PRECISION (from_mode))
+ || (from_unsigned2
+ && TYPE_PRECISION (type2) == GET_MODE_PRECISION (from_mode)))
+ {
+ from_mode = GET_MODE_WIDER_MODE (from_mode);
+ if (GET_MODE_SIZE (from_mode) >= GET_MODE_SIZE (to_mode))
+ return false;
+ }
+
+ from_unsigned1 = from_unsigned2 = false;
+ optype = build_nonstandard_integer_type (GET_MODE_PRECISION (from_mode),
+ false);
+ }
+
+ /* If there was a conversion between the multiply and addition
+ then we need to make sure it fits a multiply-and-accumulate.
+ The should be a single mode change which does not change the
+ value. */
+ if (conv_stmt)
+ {
+ /* We use the original, unmodified data types for this. */
+ tree from_type = TREE_TYPE (gimple_assign_rhs1 (conv_stmt));
+ tree to_type = TREE_TYPE (gimple_assign_lhs (conv_stmt));
+ int data_size = TYPE_PRECISION (type1) + TYPE_PRECISION (type2);
+ bool is_unsigned = TYPE_UNSIGNED (type1) && TYPE_UNSIGNED (type2);
+
+ if (TYPE_PRECISION (from_type) > TYPE_PRECISION (to_type))
+ {
+ /* Conversion is a truncate. */
+ if (TYPE_PRECISION (to_type) < data_size)
+ return false;
+ }
+ else if (TYPE_PRECISION (from_type) < TYPE_PRECISION (to_type))
+ {
+ /* Conversion is an extend. Check it's the right sort. */
+ if (TYPE_UNSIGNED (from_type) != is_unsigned
+ && !(is_unsigned && TYPE_PRECISION (from_type) > data_size))
+ return false;
+ }
+ /* else convert is a no-op for our purposes. */
+ }
/* Verify that the machine can perform a widening multiply
accumulate in this mode/signedness combination, otherwise
this transformation is likely to pessimize code. */
- this_optab = optab_for_tree_code (wmult_code, type1, optab_default);
+ this_optab = optab_for_tree_code (wmult_code, optype, optab_default);
handler = find_widening_optab_handler_and_mode (this_optab, to_mode,
from_mode, 0, &actual_mode);
/* Ensure that the inputs to the handler are in the correct precison
for the opcode. This will be the full mode size. */
actual_precision = GET_MODE_PRECISION (actual_mode);
- if (actual_precision != TYPE_PRECISION (type1))
+ if (actual_precision != TYPE_PRECISION (type1)
+ || from_unsigned1 != TYPE_UNSIGNED (type1))
{
tmp = create_tmp_var (build_nonstandard_integer_type
- (actual_precision, TYPE_UNSIGNED (type1)),
+ (actual_precision, from_unsigned1),
NULL);
-
mult_rhs1 = build_and_insert_cast (gsi, loc, tmp, mult_rhs1);
+ }
+ if (actual_precision != TYPE_PRECISION (type2)
+ || from_unsigned2 != TYPE_UNSIGNED (type2))
+ {
+ if (!tmp || from_unsigned1 != from_unsigned2)
+ tmp = create_tmp_var (build_nonstandard_integer_type
+ (actual_precision, from_unsigned2),
+ NULL);
mult_rhs2 = build_and_insert_cast (gsi, loc, tmp, mult_rhs2);
}
+ if (!useless_type_conversion_p (type, TREE_TYPE (add_rhs)))
+ add_rhs = build_and_insert_cast (gsi, loc, create_tmp_var (type, NULL),
+ add_rhs);
+
+ /* Handle constants. */
+ if (TREE_CODE (mult_rhs1) == INTEGER_CST)
+ mult_rhs1 = fold_convert (type1, mult_rhs1);
+ if (TREE_CODE (mult_rhs2) == INTEGER_CST)
+ mult_rhs2 = fold_convert (type2, mult_rhs2);
+
gimple_assign_set_rhs_with_ops_1 (gsi, wmult_code, mult_rhs1, mult_rhs2,
add_rhs);
update_stmt (gsi_stmt (*gsi));
if (optab_handler (fma_optab, TYPE_MODE (type)) == CODE_FOR_nothing)
return false;
+ /* If the multiplication has zero uses, it is kept around probably because
+ of -fnon-call-exceptions. Don't optimize it away in that case,
+ it is DCE job. */
+ if (has_zero_uses (mul_result))
+ return false;
+
/* Make sure that the multiplication statement becomes dead after
the transformation, thus that all uses are transformed to FMAs.
This means we assume that an FMA operation has the same cost