/* Global, SSA-based optimizations using mathematical identities.
- Copyright (C) 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
-
+ Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010
+ Free Software Foundation, Inc.
+
This file is part of GCC.
-
+
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
-
+
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
-
+
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "flags.h"
#include "tree.h"
#include "tree-flow.h"
-#include "real.h"
#include "timevar.h"
#include "tree-pass.h"
#include "alloc-pool.h"
#include "basic-block.h"
#include "target.h"
#include "diagnostic.h"
-#include "rtl.h"
-#include "expr.h"
+#include "gimple-pretty-print.h"
+
+/* FIXME: RTL headers have to be included here for optabs. */
+#include "rtl.h" /* Because optabs.h wants enum rtx_code. */
+#include "expr.h" /* Because optabs.h wants sepops. */
#include "optabs.h"
/* This structure represents one basic block that either computes a
recip_def = make_rename_temp (type, "reciptmp");
new_stmt = gimple_build_assign_with_ops (RDIV_EXPR, recip_def,
build_one_cst (type), def);
-
+
if (occ->bb_has_division)
{
/* Case 1: insert before an existing division. */
count++;
}
}
-
+
/* Do the expensive part only if we can hope to optimize something. */
threshold = targetm.min_divisions_for_recip_mul (TYPE_MODE (TREE_TYPE (def)));
if (count >= threshold)
|| DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD))
{
enum built_in_function code;
- bool md_code;
+ bool md_code, fail;
+ imm_use_iterator ui;
+ use_operand_p use_p;
code = DECL_FUNCTION_CODE (fndecl);
md_code = DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD;
if (!fndecl)
continue;
+ /* Check that all uses of the SSA name are divisions,
+ otherwise replacing the defining statement will do
+ the wrong thing. */
+ fail = false;
+ FOR_EACH_IMM_USE_FAST (use_p, ui, arg1)
+ {
+ gimple stmt2 = USE_STMT (use_p);
+ if (is_gimple_debug (stmt2))
+ continue;
+ if (!is_gimple_assign (stmt2)
+ || gimple_assign_rhs_code (stmt2) != RDIV_EXPR
+ || gimple_assign_rhs1 (stmt2) == arg1
+ || gimple_assign_rhs2 (stmt2) != arg1)
+ {
+ fail = true;
+ break;
+ }
+ }
+ if (fail)
+ continue;
+
+ gimple_replace_lhs (stmt1, arg1);
gimple_call_set_fndecl (stmt1, fndecl);
update_stmt (stmt1);
- gimple_assign_set_rhs_code (stmt, MULT_EXPR);
- fold_stmt_inplace (stmt);
- update_stmt (stmt);
+ FOR_EACH_IMM_USE_STMT (stmt, ui, arg1)
+ {
+ gimple_assign_set_rhs_code (stmt, MULT_EXPR);
+ fold_stmt_inplace (stmt);
+ update_stmt (stmt);
+ }
}
}
}
gsi = gsi_for_stmt (use_stmt);
gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
- gsi_remove (&gsi, true);
+ gsi_remove (&gsi, true);
}
VEC_free(gimple, heap, stmts);
}
};
-/* Find all expressions in the form of sqrt(a/b) and
- convert them to rsqrt(b/a). */
-
-static unsigned int
-execute_convert_to_rsqrt (void)
-{
- basic_block bb;
-
- FOR_EACH_BB (bb)
- {
- gimple_stmt_iterator gsi;
-
- for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
- {
- gimple stmt = gsi_stmt (gsi);
- tree fndecl;
-
- if (is_gimple_call (stmt)
- && gimple_call_lhs (stmt)
- && (fndecl = gimple_call_fndecl (stmt))
- && (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
- || DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD))
- {
- enum built_in_function code;
- bool md_code;
- tree arg1;
- gimple stmt1;
-
- code = DECL_FUNCTION_CODE (fndecl);
- md_code = DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD;
-
- fndecl = targetm.builtin_reciprocal (code, md_code, true);
- if (!fndecl)
- continue;
-
- arg1 = gimple_call_arg (stmt, 0);
-
- if (TREE_CODE (arg1) != SSA_NAME)
- continue;
-
- stmt1 = SSA_NAME_DEF_STMT (arg1);
-
- if (is_gimple_assign (stmt1)
- && gimple_assign_rhs_code (stmt1) == RDIV_EXPR)
- {
- tree arg10, arg11;
-
- arg10 = gimple_assign_rhs1 (stmt1);
- arg11 = gimple_assign_rhs2 (stmt1);
-
- /* Swap operands of RDIV_EXPR. */
- gimple_assign_set_rhs1 (stmt1, arg11);
- gimple_assign_set_rhs2 (stmt1, arg10);
- fold_stmt_inplace (stmt1);
- update_stmt (stmt1);
-
- gimple_call_set_fndecl (stmt, fndecl);
- update_stmt (stmt);
- }
- }
- }
- }
-
- return 0;
-}
-
-static bool
-gate_convert_to_rsqrt (void)
-{
- return flag_unsafe_math_optimizations && optimize;
-}
-
-struct gimple_opt_pass pass_convert_to_rsqrt =
-{
- {
- GIMPLE_PASS,
- "rsqrt", /* name */
- gate_convert_to_rsqrt, /* gate */
- execute_convert_to_rsqrt, /* execute */
- NULL, /* sub */
- NULL, /* next */
- 0, /* static_pass_number */
- TV_NONE, /* tv_id */
- PROP_ssa, /* properties_required */
- 0, /* properties_provided */
- 0, /* properties_destroyed */
- 0, /* todo_flags_start */
- TODO_dump_func | TODO_update_ssa | TODO_verify_ssa
- | TODO_verify_stmts /* todo_flags_finish */
- }
-};
-
/* A symbolic number is used to detect byte permutation and selection
patterns. Therefore the field N contains an artificial number
consisting of byte size markers:
{
/* Set up the symbolic number N by setting each byte to a
value between 1 and the byte size of rhs1. The highest
- order byte is set to 1 and the lowest order byte to
- n.size. */
+ order byte is set to n->size and the lowest order
+ byte to 1. */
n->size = TYPE_PRECISION (TREE_TYPE (rhs1));
if (n->size % BITS_PER_UNIT != 0)
return NULL_TREE;
n->size /= BITS_PER_UNIT;
n->n = (sizeof (HOST_WIDEST_INT) < 8 ? 0 :
- (unsigned HOST_WIDEST_INT)0x01020304 << 32 | 0x05060708);
- n->n >>= (sizeof (HOST_WIDEST_INT) - n->size) * BITS_PER_UNIT;
+ (unsigned HOST_WIDEST_INT)0x08070605 << 32 | 0x04030201);
+
+ if (n->size < (int)sizeof (HOST_WIDEST_INT))
+ n->n &= ((unsigned HOST_WIDEST_INT)1 <<
+ (n->size * BITS_PER_UNIT)) - 1;
source_expr1 = rhs1;
}
{
/* If STMT casts to a smaller type mask out the bits not
belonging to the target type. */
- n->size = type_size / BITS_PER_UNIT;
n->n &= ((unsigned HOST_WIDEST_INT)1 << type_size) - 1;
}
+ n->size = type_size / BITS_PER_UNIT;
}
break;
default:
find_bswap (gimple stmt)
{
/* The number which the find_bswap result should match in order to
- have a full byte swap. The insignificant bytes are masked out
- before using it. */
+ have a full byte swap. The number is shifted to the left according
+ to the size of the symbolic number before using it. */
unsigned HOST_WIDEST_INT cmp =
sizeof (HOST_WIDEST_INT) < 8 ? 0 :
- (unsigned HOST_WIDEST_INT)0x08070605 << 32 | 0x04030201;
+ (unsigned HOST_WIDEST_INT)0x01020304 << 32 | 0x05060708;
struct symbolic_number n;
tree source_expr;
+ /* The last parameter determines the depth search limit. It usually
+ correlates directly to the number of bytes to be touched. We
+ increase that number by one here in order to also cover signed ->
+ unsigned conversions of the src operand as can be seen in
+ libgcc. */
source_expr = find_bswap_1 (stmt, &n,
TREE_INT_CST_LOW (
- TYPE_SIZE_UNIT (gimple_expr_type (stmt))));
+ TYPE_SIZE_UNIT (gimple_expr_type (stmt))) + 1);
if (!source_expr)
return NULL_TREE;
((unsigned HOST_WIDEST_INT)1 << (n.size * BITS_PER_UNIT)) - 1;
n.n &= mask;
- cmp &= mask;
+ cmp >>= (sizeof (HOST_WIDEST_INT) - n.size) * BITS_PER_UNIT;
}
/* A complete byte swap should make the symbolic number to start
basic_block bb;
bool bswap32_p, bswap64_p;
bool changed = false;
+ tree bswap32_type = NULL_TREE, bswap64_type = NULL_TREE;
if (BITS_PER_UNIT != 8)
return 0;
&& optab_handler (bswap_optab, SImode)->insn_code !=
CODE_FOR_nothing);
bswap64_p = (built_in_decls[BUILT_IN_BSWAP64]
- && optab_handler (bswap_optab, DImode)->insn_code !=
- CODE_FOR_nothing);
+ && (optab_handler (bswap_optab, DImode)->insn_code !=
+ CODE_FOR_nothing
+ || (bswap32_p && word_mode == SImode)));
if (!bswap32_p && !bswap64_p)
return 0;
+ /* Determine the argument type of the builtins. The code later on
+ assumes that the return and argument type are the same. */
+ if (bswap32_p)
+ {
+ tree fndecl = built_in_decls[BUILT_IN_BSWAP32];
+ bswap32_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
+ }
+
+ if (bswap64_p)
+ {
+ tree fndecl = built_in_decls[BUILT_IN_BSWAP64];
+ bswap64_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
+ }
+
FOR_EACH_BB (bb)
{
gimple_stmt_iterator gsi;
for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
gimple stmt = gsi_stmt (gsi);
- tree bswap_src;
+ tree bswap_src, bswap_type;
+ tree bswap_tmp;
tree fndecl = NULL_TREE;
int type_size;
gimple call;
{
case 32:
if (bswap32_p)
- fndecl = built_in_decls[BUILT_IN_BSWAP32];
+ {
+ fndecl = built_in_decls[BUILT_IN_BSWAP32];
+ bswap_type = bswap32_type;
+ }
break;
case 64:
if (bswap64_p)
- fndecl = built_in_decls[BUILT_IN_BSWAP64];
+ {
+ fndecl = built_in_decls[BUILT_IN_BSWAP64];
+ bswap_type = bswap64_type;
+ }
break;
default:
continue;
continue;
changed = true;
- call = gimple_build_call (fndecl, 1, bswap_src);
- gimple_call_set_lhs (call, gimple_assign_lhs (stmt));
+
+ bswap_tmp = bswap_src;
+
+ /* Convert the src expression if necessary. */
+ if (!useless_type_conversion_p (TREE_TYPE (bswap_tmp), bswap_type))
+ {
+ gimple convert_stmt;
+
+ bswap_tmp = create_tmp_var (bswap_type, "bswapsrc");
+ add_referenced_var (bswap_tmp);
+ bswap_tmp = make_ssa_name (bswap_tmp, NULL);
+
+ convert_stmt = gimple_build_assign_with_ops (
+ CONVERT_EXPR, bswap_tmp, bswap_src, NULL);
+ gsi_insert_before (&gsi, convert_stmt, GSI_SAME_STMT);
+ }
+
+ call = gimple_build_call (fndecl, 1, bswap_tmp);
+
+ bswap_tmp = gimple_assign_lhs (stmt);
+
+ /* Convert the result if necessary. */
+ if (!useless_type_conversion_p (TREE_TYPE (bswap_tmp), bswap_type))
+ {
+ gimple convert_stmt;
+
+ bswap_tmp = create_tmp_var (bswap_type, "bswapdst");
+ add_referenced_var (bswap_tmp);
+ bswap_tmp = make_ssa_name (bswap_tmp, NULL);
+ convert_stmt = gimple_build_assign_with_ops (
+ CONVERT_EXPR, gimple_assign_lhs (stmt), bswap_tmp, NULL);
+ gsi_insert_after (&gsi, convert_stmt, GSI_SAME_STMT);
+ }
+
+ gimple_call_set_lhs (call, bswap_tmp);
if (dump_file)
{
0 /* todo_flags_finish */
}
};
+
+/* Find integer multiplications where the operands are extended from
+ smaller types, and replace the MULT_EXPR with a WIDEN_MULT_EXPR
+ where appropriate. */
+
+static unsigned int
+execute_optimize_widening_mul (void)
+{
+ bool changed = false;
+ basic_block bb;
+
+ FOR_EACH_BB (bb)
+ {
+ gimple_stmt_iterator gsi;
+
+ for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ gimple stmt = gsi_stmt (gsi);
+ gimple rhs1_stmt = NULL, rhs2_stmt = NULL;
+ tree type, type1 = NULL, type2 = NULL;
+ tree rhs1, rhs2, rhs1_convop = NULL, rhs2_convop = NULL;
+ enum tree_code rhs1_code, rhs2_code;
+
+ if (!is_gimple_assign (stmt)
+ || gimple_assign_rhs_code (stmt) != MULT_EXPR)
+ continue;
+
+ type = TREE_TYPE (gimple_assign_lhs (stmt));
+
+ if (TREE_CODE (type) != INTEGER_TYPE)
+ continue;
+
+ rhs1 = gimple_assign_rhs1 (stmt);
+ rhs2 = gimple_assign_rhs2 (stmt);
+
+ if (TREE_CODE (rhs1) == SSA_NAME)
+ {
+ rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
+ if (!is_gimple_assign (rhs1_stmt))
+ continue;
+ rhs1_code = gimple_assign_rhs_code (rhs1_stmt);
+ if (!CONVERT_EXPR_CODE_P (rhs1_code))
+ continue;
+ rhs1_convop = gimple_assign_rhs1 (rhs1_stmt);
+ type1 = TREE_TYPE (rhs1_convop);
+ if (TYPE_PRECISION (type1) * 2 != TYPE_PRECISION (type))
+ continue;
+ }
+ else if (TREE_CODE (rhs1) != INTEGER_CST)
+ continue;
+
+ if (TREE_CODE (rhs2) == SSA_NAME)
+ {
+ rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
+ if (!is_gimple_assign (rhs2_stmt))
+ continue;
+ rhs2_code = gimple_assign_rhs_code (rhs2_stmt);
+ if (!CONVERT_EXPR_CODE_P (rhs2_code))
+ continue;
+ rhs2_convop = gimple_assign_rhs1 (rhs2_stmt);
+ type2 = TREE_TYPE (rhs2_convop);
+ if (TYPE_PRECISION (type2) * 2 != TYPE_PRECISION (type))
+ continue;
+ }
+ else if (TREE_CODE (rhs2) != INTEGER_CST)
+ continue;
+
+ if (rhs1_stmt == NULL && rhs2_stmt == NULL)
+ continue;
+
+ /* Verify that the machine can perform a widening multiply in this
+ mode/signedness combination, otherwise this transformation is
+ likely to pessimize code. */
+ if ((rhs1_stmt == NULL || TYPE_UNSIGNED (type1))
+ && (rhs2_stmt == NULL || TYPE_UNSIGNED (type2))
+ && (optab_handler (umul_widen_optab, TYPE_MODE (type))
+ ->insn_code == CODE_FOR_nothing))
+ continue;
+ else if ((rhs1_stmt == NULL || !TYPE_UNSIGNED (type1))
+ && (rhs2_stmt == NULL || !TYPE_UNSIGNED (type2))
+ && (optab_handler (smul_widen_optab, TYPE_MODE (type))
+ ->insn_code == CODE_FOR_nothing))
+ continue;
+ else if (rhs1_stmt != NULL && rhs2_stmt != 0
+ && (TYPE_UNSIGNED (type1) != TYPE_UNSIGNED (type2))
+ && (optab_handler (usmul_widen_optab, TYPE_MODE (type))
+ ->insn_code == CODE_FOR_nothing))
+ continue;
+
+ if ((rhs1_stmt == NULL && !int_fits_type_p (rhs1, type2))
+ || (rhs2_stmt == NULL && !int_fits_type_p (rhs2, type1)))
+ continue;
+
+ if (rhs1_stmt == NULL)
+ gimple_assign_set_rhs1 (stmt, fold_convert (type2, rhs1));
+ else
+ gimple_assign_set_rhs1 (stmt, rhs1_convop);
+ if (rhs2_stmt == NULL)
+ gimple_assign_set_rhs2 (stmt, fold_convert (type1, rhs2));
+ else
+ gimple_assign_set_rhs2 (stmt, rhs2_convop);
+ gimple_assign_set_rhs_code (stmt, WIDEN_MULT_EXPR);
+ update_stmt (stmt);
+ changed = true;
+ }
+ }
+ return (changed ? TODO_dump_func | TODO_update_ssa | TODO_verify_ssa
+ | TODO_verify_stmts : 0);
+}
+
+static bool
+gate_optimize_widening_mul (void)
+{
+ return flag_expensive_optimizations && optimize;
+}
+
+struct gimple_opt_pass pass_optimize_widening_mul =
+{
+ {
+ GIMPLE_PASS,
+ "widening_mul", /* name */
+ gate_optimize_widening_mul, /* gate */
+ execute_optimize_widening_mul, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_NONE, /* tv_id */
+ PROP_ssa, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0 /* todo_flags_finish */
+ }
+};