+/* The function minmax_replacement does the main work of doing the minmax
+ replacement. Return true if the replacement is done. Otherwise return
+ false.
+ BB is the basic block where the replacement is going to be done on. ARG0
+ is argument 0 from the PHI. Likewise for ARG1. */
+
+static bool
+minmax_replacement (basic_block cond_bb, basic_block middle_bb,
+ edge e0, edge e1, tree phi,
+ tree arg0, tree arg1)
+{
+ tree result, type;
+ tree cond, new_stmt;
+ edge true_edge, false_edge;
+ enum tree_code cmp, minmax, ass_code;
+ tree smaller, larger, arg_true, arg_false;
+ block_stmt_iterator bsi, bsi_from;
+
+ type = TREE_TYPE (PHI_RESULT (phi));
+
+ /* The optimization may be unsafe due to NaNs. */
+ if (HONOR_NANS (TYPE_MODE (type)))
+ return false;
+
+ cond = COND_EXPR_COND (last_stmt (cond_bb));
+ cmp = TREE_CODE (cond);
+ result = PHI_RESULT (phi);
+
+ /* This transformation is only valid for order comparisons. Record which
+ operand is smaller/larger if the result of the comparison is true. */
+ if (cmp == LT_EXPR || cmp == LE_EXPR)
+ {
+ smaller = TREE_OPERAND (cond, 0);
+ larger = TREE_OPERAND (cond, 1);
+ }
+ else if (cmp == GT_EXPR || cmp == GE_EXPR)
+ {
+ smaller = TREE_OPERAND (cond, 1);
+ larger = TREE_OPERAND (cond, 0);
+ }
+ else
+ return false;
+
+ /* We need to know which is the true edge and which is the false
+ edge so that we know if have abs or negative abs. */
+ extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
+
+ /* Forward the edges over the middle basic block. */
+ if (true_edge->dest == middle_bb)
+ true_edge = EDGE_SUCC (true_edge->dest, 0);
+ if (false_edge->dest == middle_bb)
+ false_edge = EDGE_SUCC (false_edge->dest, 0);
+
+ if (true_edge == e0)
+ {
+ gcc_assert (false_edge == e1);
+ arg_true = arg0;
+ arg_false = arg1;
+ }
+ else
+ {
+ gcc_assert (false_edge == e0);
+ gcc_assert (true_edge == e1);
+ arg_true = arg1;
+ arg_false = arg0;
+ }
+
+ if (empty_block_p (middle_bb))
+ {
+ if (operand_equal_for_phi_arg_p (arg_true, smaller)
+ && operand_equal_for_phi_arg_p (arg_false, larger))
+ {
+ /* Case
+
+ if (smaller < larger)
+ rslt = smaller;
+ else
+ rslt = larger; */
+ minmax = MIN_EXPR;
+ }
+ else if (operand_equal_for_phi_arg_p (arg_false, smaller)
+ && operand_equal_for_phi_arg_p (arg_true, larger))
+ minmax = MAX_EXPR;
+ else
+ return false;
+ }
+ else
+ {
+ /* Recognize the following case, assuming d <= u:
+
+ if (a <= u)
+ b = MAX (a, d);
+ x = PHI <b, u>
+
+ This is equivalent to
+
+ b = MAX (a, d);
+ x = MIN (b, u); */
+
+ tree assign = last_and_only_stmt (middle_bb);
+ tree lhs, rhs, op0, op1, bound;
+
+ if (!assign
+ || TREE_CODE (assign) != GIMPLE_MODIFY_STMT)
+ return false;
+
+ lhs = GIMPLE_STMT_OPERAND (assign, 0);
+ rhs = GIMPLE_STMT_OPERAND (assign, 1);
+ ass_code = TREE_CODE (rhs);
+ if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
+ return false;
+ op0 = TREE_OPERAND (rhs, 0);
+ op1 = TREE_OPERAND (rhs, 1);
+
+ if (true_edge->src == middle_bb)
+ {
+ /* We got here if the condition is true, i.e., SMALLER < LARGER. */
+ if (!operand_equal_for_phi_arg_p (lhs, arg_true))
+ return false;
+
+ if (operand_equal_for_phi_arg_p (arg_false, larger))
+ {
+ /* Case
+
+ if (smaller < larger)
+ {
+ r' = MAX_EXPR (smaller, bound)
+ }
+ r = PHI <r', larger> --> to be turned to MIN_EXPR. */
+ if (ass_code != MAX_EXPR)
+ return false;
+
+ minmax = MIN_EXPR;
+ if (operand_equal_for_phi_arg_p (op0, smaller))
+ bound = op1;
+ else if (operand_equal_for_phi_arg_p (op1, smaller))
+ bound = op0;
+ else
+ return false;
+
+ /* We need BOUND <= LARGER. */
+ if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
+ bound, larger)))
+ return false;
+ }
+ else if (operand_equal_for_phi_arg_p (arg_false, smaller))
+ {
+ /* Case
+
+ if (smaller < larger)
+ {
+ r' = MIN_EXPR (larger, bound)
+ }
+ r = PHI <r', smaller> --> to be turned to MAX_EXPR. */
+ if (ass_code != MIN_EXPR)
+ return false;
+
+ minmax = MAX_EXPR;
+ if (operand_equal_for_phi_arg_p (op0, larger))
+ bound = op1;
+ else if (operand_equal_for_phi_arg_p (op1, larger))
+ bound = op0;
+ else
+ return false;
+
+ /* We need BOUND >= SMALLER. */
+ if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
+ bound, smaller)))
+ return false;
+ }
+ else
+ return false;
+ }
+ else
+ {
+ /* We got here if the condition is false, i.e., SMALLER > LARGER. */
+ if (!operand_equal_for_phi_arg_p (lhs, arg_false))
+ return false;
+
+ if (operand_equal_for_phi_arg_p (arg_true, larger))
+ {
+ /* Case
+
+ if (smaller > larger)
+ {
+ r' = MIN_EXPR (smaller, bound)
+ }
+ r = PHI <r', larger> --> to be turned to MAX_EXPR. */
+ if (ass_code != MIN_EXPR)
+ return false;
+
+ minmax = MAX_EXPR;
+ if (operand_equal_for_phi_arg_p (op0, smaller))
+ bound = op1;
+ else if (operand_equal_for_phi_arg_p (op1, smaller))
+ bound = op0;
+ else
+ return false;
+
+ /* We need BOUND >= LARGER. */
+ if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
+ bound, larger)))
+ return false;
+ }
+ else if (operand_equal_for_phi_arg_p (arg_true, smaller))
+ {
+ /* Case
+
+ if (smaller > larger)
+ {
+ r' = MAX_EXPR (larger, bound)
+ }
+ r = PHI <r', smaller> --> to be turned to MIN_EXPR. */
+ if (ass_code != MAX_EXPR)
+ return false;
+
+ minmax = MIN_EXPR;
+ if (operand_equal_for_phi_arg_p (op0, larger))
+ bound = op1;
+ else if (operand_equal_for_phi_arg_p (op1, larger))
+ bound = op0;
+ else
+ return false;
+
+ /* We need BOUND <= SMALLER. */
+ if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
+ bound, smaller)))
+ return false;
+ }
+ else
+ return false;
+ }
+
+ /* Move the statement from the middle block. */
+ bsi = bsi_last (cond_bb);
+ bsi_from = bsi_last (middle_bb);
+ bsi_move_before (&bsi_from, &bsi);
+ }
+
+ /* Emit the statement to compute min/max. */
+ result = duplicate_ssa_name (PHI_RESULT (phi), NULL);
+ new_stmt = build_gimple_modify_stmt (result, build2 (minmax, type, arg0, arg1));
+ SSA_NAME_DEF_STMT (result) = new_stmt;
+ bsi = bsi_last (cond_bb);
+ bsi_insert_before (&bsi, new_stmt, BSI_NEW_STMT);
+
+ replace_phi_edge_with_variable (cond_bb, e1, phi, result);
+ return true;
+}
+
+/* The function absolute_replacement does the main work of doing the absolute
+ replacement. Return true if the replacement is done. Otherwise return
+ false.
+ bb is the basic block where the replacement is going to be done on. arg0
+ is argument 0 from the phi. Likewise for arg1. */
+
+static bool
+abs_replacement (basic_block cond_bb, basic_block middle_bb,
+ edge e0 ATTRIBUTE_UNUSED, edge e1,
+ tree phi, tree arg0, tree arg1)
+{
+ tree result;
+ tree new_stmt, cond;
+ block_stmt_iterator bsi;
+ edge true_edge, false_edge;
+ tree assign;
+ edge e;
+ tree rhs, lhs;
+ bool negate;
+ enum tree_code cond_code;
+
+ /* If the type says honor signed zeros we cannot do this
+ optimization. */
+ if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (arg1))))
+ return false;
+
+ /* OTHER_BLOCK must have only one executable statement which must have the
+ form arg0 = -arg1 or arg1 = -arg0. */
+
+ assign = last_and_only_stmt (middle_bb);
+ /* If we did not find the proper negation assignment, then we can not
+ optimize. */
+ if (assign == NULL)
+ return false;
+
+ /* If we got here, then we have found the only executable statement
+ in OTHER_BLOCK. If it is anything other than arg = -arg1 or
+ arg1 = -arg0, then we can not optimize. */
+ if (TREE_CODE (assign) != GIMPLE_MODIFY_STMT)
+ return false;
+
+ lhs = GIMPLE_STMT_OPERAND (assign, 0);
+ rhs = GIMPLE_STMT_OPERAND (assign, 1);
+
+ if (TREE_CODE (rhs) != NEGATE_EXPR)
+ return false;
+
+ rhs = TREE_OPERAND (rhs, 0);
+
+ /* The assignment has to be arg0 = -arg1 or arg1 = -arg0. */
+ if (!(lhs == arg0 && rhs == arg1)
+ && !(lhs == arg1 && rhs == arg0))
+ return false;
+
+ cond = COND_EXPR_COND (last_stmt (cond_bb));
+ result = PHI_RESULT (phi);
+
+ /* Only relationals comparing arg[01] against zero are interesting. */
+ cond_code = TREE_CODE (cond);
+ if (cond_code != GT_EXPR && cond_code != GE_EXPR
+ && cond_code != LT_EXPR && cond_code != LE_EXPR)
+ return false;
+
+ /* Make sure the conditional is arg[01] OP y. */
+ if (TREE_OPERAND (cond, 0) != rhs)
+ return false;
+
+ if (FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (cond, 1)))
+ ? real_zerop (TREE_OPERAND (cond, 1))
+ : integer_zerop (TREE_OPERAND (cond, 1)))
+ ;
+ else
+ return false;
+
+ /* We need to know which is the true edge and which is the false
+ edge so that we know if have abs or negative abs. */
+ extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
+
+ /* For GT_EXPR/GE_EXPR, if the true edge goes to OTHER_BLOCK, then we
+ will need to negate the result. Similarly for LT_EXPR/LE_EXPR if
+ the false edge goes to OTHER_BLOCK. */
+ if (cond_code == GT_EXPR || cond_code == GE_EXPR)
+ e = true_edge;
+ else
+ e = false_edge;
+
+ if (e->dest == middle_bb)
+ negate = true;
+ else
+ negate = false;
+
+ result = duplicate_ssa_name (result, NULL);
+
+ if (negate)
+ {
+ tree tmp = create_tmp_var (TREE_TYPE (result), NULL);
+ add_referenced_var (tmp);
+ lhs = make_ssa_name (tmp, NULL);
+ }
+ else
+ lhs = result;
+
+ /* Build the modify expression with abs expression. */
+ new_stmt = build_gimple_modify_stmt (lhs,
+ build1 (ABS_EXPR, TREE_TYPE (lhs), rhs));
+ SSA_NAME_DEF_STMT (lhs) = new_stmt;
+
+ bsi = bsi_last (cond_bb);
+ bsi_insert_before (&bsi, new_stmt, BSI_NEW_STMT);
+
+ if (negate)
+ {
+ /* Get the right BSI. We want to insert after the recently
+ added ABS_EXPR statement (which we know is the first statement
+ in the block. */
+ new_stmt = build_gimple_modify_stmt (result,
+ build1 (NEGATE_EXPR, TREE_TYPE (lhs),
+ lhs));
+ SSA_NAME_DEF_STMT (result) = new_stmt;
+
+ bsi_insert_after (&bsi, new_stmt, BSI_NEW_STMT);
+ }
+
+ replace_phi_edge_with_variable (cond_bb, e1, phi, result);
+
+ /* Note that we optimized this PHI. */
+ return true;
+}
+
+/* Auxiliary functions to determine the set of memory accesses which
+ can't trap because they are preceded by accesses to the same memory
+ portion. We do that for INDIRECT_REFs, so we only need to track
+ the SSA_NAME of the pointer indirectly referenced. The algorithm
+ simply is a walk over all instructions in dominator order. When
+ we see an INDIRECT_REF we determine if we've already seen a same
+ ref anywhere up to the root of the dominator tree. If we do the
+ current access can't trap. If we don't see any dominating access
+ the current access might trap, but might also make later accesses
+ non-trapping, so we remember it. We need to be careful with loads
+ or stores, for instance a load might not trap, while a store would,
+ so if we see a dominating read access this doesn't mean that a later
+ write access would not trap. Hence we also need to differentiate the
+ type of access(es) seen.
+
+ ??? We currently are very conservative and assume that a load might
+ trap even if a store doesn't (write-only memory). This probably is
+ overly conservative. */
+
+/* A hash-table of SSA_NAMEs, and in which basic block an INDIRECT_REF
+ through it was seen, which would constitute a no-trap region for
+ same accesses. */
+struct name_to_bb
+{
+ tree ssa_name;
+ basic_block bb;
+ unsigned store : 1;
+};
+
+/* The hash table for remembering what we've seen. */
+static htab_t seen_ssa_names;
+
+/* The set of INDIRECT_REFs which can't trap. */
+static struct pointer_set_t *nontrap_set;
+
+/* The hash function, based on the pointer to the pointer SSA_NAME. */
+static hashval_t
+name_to_bb_hash (const void *p)
+{
+ tree n = ((struct name_to_bb *)p)->ssa_name;
+ return htab_hash_pointer (n) ^ ((struct name_to_bb *)p)->store;
+}
+
+/* The equality function of *P1 and *P2. SSA_NAMEs are shared, so
+ it's enough to simply compare them for equality. */
+static int
+name_to_bb_eq (const void *p1, const void *p2)
+{
+ const struct name_to_bb *n1 = (const struct name_to_bb *)p1;
+ const struct name_to_bb *n2 = (const struct name_to_bb *)p2;
+
+ return n1->ssa_name == n2->ssa_name && n1->store == n2->store;
+}
+
+/* We see a the expression EXP in basic block BB. If it's an interesting
+ expression (an INDIRECT_REF through an SSA_NAME) possibly insert the
+ expression into the set NONTRAP or the hash table of seen expressions.
+ STORE is true if this expression is on the LHS, otherwise it's on
+ the RHS. */
+static void
+add_or_mark_expr (basic_block bb, tree exp,
+ struct pointer_set_t *nontrap, bool store)
+{
+ if (INDIRECT_REF_P (exp)
+ && TREE_CODE (TREE_OPERAND (exp, 0)) == SSA_NAME)
+ {
+ tree name = TREE_OPERAND (exp, 0);
+ struct name_to_bb map;
+ void **slot;
+ struct name_to_bb *n2bb;
+ basic_block found_bb = 0;
+
+ /* Try to find the last seen INDIRECT_REF through the same
+ SSA_NAME, which can trap. */
+ map.ssa_name = name;
+ map.bb = 0;
+ map.store = store;
+ slot = htab_find_slot (seen_ssa_names, &map, INSERT);
+ n2bb = (struct name_to_bb *) *slot;
+ if (n2bb)
+ found_bb = n2bb->bb;
+
+ /* If we've found a trapping INDIRECT_REF, _and_ it dominates EXP
+ (it's in a basic block on the path from us to the dominator root)
+ then we can't trap. */
+ if (found_bb && found_bb->aux == (void *)1)
+ {
+ pointer_set_insert (nontrap, exp);
+ }
+ else
+ {
+ /* EXP might trap, so insert it into the hash table. */
+ if (n2bb)
+ {
+ n2bb->bb = bb;
+ }
+ else
+ {
+ n2bb = XNEW (struct name_to_bb);
+ n2bb->ssa_name = name;
+ n2bb->bb = bb;
+ n2bb->store = store;
+ *slot = n2bb;
+ }
+ }
+ }
+}
+
+/* Called by walk_dominator_tree, when entering the block BB. */
+static void
+nt_init_block (struct dom_walk_data *data ATTRIBUTE_UNUSED, basic_block bb)
+{
+ block_stmt_iterator bsi;
+ /* Mark this BB as being on the path to dominator root. */
+ bb->aux = (void*)1;
+
+ /* And walk the statements in order. */
+ for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi))
+ {
+ tree stmt = bsi_stmt (bsi);
+
+ if (TREE_CODE (stmt) == GIMPLE_MODIFY_STMT)
+ {
+ tree lhs = GIMPLE_STMT_OPERAND (stmt, 0);
+ tree rhs = GIMPLE_STMT_OPERAND (stmt, 1);
+ add_or_mark_expr (bb, rhs, nontrap_set, false);
+ add_or_mark_expr (bb, lhs, nontrap_set, true);
+ }
+ }
+}
+
+/* Called by walk_dominator_tree, when basic block BB is exited. */
+static void
+nt_fini_block (struct dom_walk_data *data ATTRIBUTE_UNUSED, basic_block bb)
+{
+ /* This BB isn't on the path to dominator root anymore. */
+ bb->aux = NULL;
+}
+
+/* This is the entry point of gathering non trapping memory accesses.
+ It will do a dominator walk over the whole function, and it will
+ make use of the bb->aux pointers. It returns a set of trees
+ (the INDIRECT_REFs itself) which can't trap. */
+static struct pointer_set_t *
+get_non_trapping (void)
+{
+ struct pointer_set_t *nontrap;
+ struct dom_walk_data walk_data;
+
+ nontrap = pointer_set_create ();
+ seen_ssa_names = htab_create (128, name_to_bb_hash, name_to_bb_eq,
+ free);
+ /* We're going to do a dominator walk, so ensure that we have
+ dominance information. */
+ calculate_dominance_info (CDI_DOMINATORS);
+
+ /* Setup callbacks for the generic dominator tree walker. */
+ nontrap_set = nontrap;
+ walk_data.walk_stmts_backward = false;
+ walk_data.dom_direction = CDI_DOMINATORS;
+ walk_data.initialize_block_local_data = NULL;
+ walk_data.before_dom_children_before_stmts = nt_init_block;
+ walk_data.before_dom_children_walk_stmts = NULL;
+ walk_data.before_dom_children_after_stmts = NULL;
+ walk_data.after_dom_children_before_stmts = NULL;
+ walk_data.after_dom_children_walk_stmts = NULL;
+ walk_data.after_dom_children_after_stmts = nt_fini_block;
+ walk_data.global_data = NULL;
+ walk_data.block_local_data_size = 0;
+ walk_data.interesting_blocks = NULL;
+
+ init_walk_dominator_tree (&walk_data);
+ walk_dominator_tree (&walk_data, ENTRY_BLOCK_PTR);
+ fini_walk_dominator_tree (&walk_data);
+ htab_delete (seen_ssa_names);
+
+ return nontrap;
+}
+
+/* Do the main work of conditional store replacement. We already know
+ that the recognized pattern looks like so:
+
+ split:
+ if (cond) goto MIDDLE_BB; else goto JOIN_BB (edge E1)
+ MIDDLE_BB:
+ something
+ fallthrough (edge E0)
+ JOIN_BB:
+ some more
+
+ We check that MIDDLE_BB contains only one store, that that store
+ doesn't trap (not via NOTRAP, but via checking if an access to the same
+ memory location dominates us) and that the store has a "simple" RHS. */
+
+static bool
+cond_store_replacement (basic_block middle_bb, basic_block join_bb,
+ edge e0, edge e1, struct pointer_set_t *nontrap)
+{
+ tree assign = last_and_only_stmt (middle_bb);
+ tree lhs, rhs, newexpr, name;
+ tree newphi;
+ block_stmt_iterator bsi;
+
+ /* Check if middle_bb contains of only one store. */
+ if (!assign
+ || TREE_CODE (assign) != GIMPLE_MODIFY_STMT)
+ return false;
+
+ lhs = GIMPLE_STMT_OPERAND (assign, 0);
+ if (!INDIRECT_REF_P (lhs))
+ return false;
+ rhs = GIMPLE_STMT_OPERAND (assign, 1);
+ if (TREE_CODE (rhs) != SSA_NAME && !is_gimple_min_invariant (rhs))
+ return false;
+ /* Prove that we can move the store down. We could also check
+ TREE_THIS_NOTRAP here, but in that case we also could move stores,
+ whose value is not available readily, which we want to avoid. */
+ if (!pointer_set_contains (nontrap, lhs))
+ return false;
+
+ /* Now we've checked the constraints, so do the transformation:
+ 1) Remove the single store. */
+ mark_symbols_for_renaming (assign);
+ bsi = bsi_for_stmt (assign);
+ bsi_remove (&bsi, true);
+
+ /* 2) Create a temporary where we can store the old content
+ of the memory touched by the store, if we need to. */
+ if (!condstoretemp || TREE_TYPE (lhs) != TREE_TYPE (condstoretemp))
+ {
+ condstoretemp = create_tmp_var (TREE_TYPE (lhs), "cstore");
+ get_var_ann (condstoretemp);
+ if (TREE_CODE (TREE_TYPE (lhs)) == COMPLEX_TYPE
+ || TREE_CODE (TREE_TYPE (lhs)) == VECTOR_TYPE)
+ DECL_GIMPLE_REG_P (condstoretemp) = 1;
+ }
+ add_referenced_var (condstoretemp);
+
+ /* 3) Insert a load from the memory of the store to the temporary
+ on the edge which did not contain the store. */
+ lhs = unshare_expr (lhs);
+ newexpr = build_gimple_modify_stmt (condstoretemp, lhs);
+ name = make_ssa_name (condstoretemp, newexpr);
+ GIMPLE_STMT_OPERAND (newexpr, 0) = name;
+ mark_symbols_for_renaming (newexpr);
+ bsi_insert_on_edge (e1, newexpr);
+
+ /* 4) Create a PHI node at the join block, with one argument
+ holding the old RHS, and the other holding the temporary
+ where we stored the old memory contents. */
+ newphi = create_phi_node (condstoretemp, join_bb);
+ add_phi_arg (newphi, rhs, e0);
+ add_phi_arg (newphi, name, e1);
+
+ lhs = unshare_expr (lhs);
+ newexpr = build_gimple_modify_stmt (lhs, PHI_RESULT (newphi));
+ mark_symbols_for_renaming (newexpr);
+
+ /* 5) Insert that PHI node. */
+ bsi = bsi_start (join_bb);
+ while (!bsi_end_p (bsi) && TREE_CODE (bsi_stmt (bsi)) == LABEL_EXPR)
+ bsi_next (&bsi);
+ if (bsi_end_p (bsi))
+ {
+ bsi = bsi_last (join_bb);
+ bsi_insert_after (&bsi, newexpr, BSI_NEW_STMT);
+ }
+ else
+ bsi_insert_before (&bsi, newexpr, BSI_NEW_STMT);
+
+ return true;
+}