1 /* Global, SSA-based optimizations using mathematical identities.
2 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011
3 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* Currently, the only mini-pass in this file tries to CSE reciprocal
22 operations. These are common in sequences such as this one:
24 modulus = sqrt(x*x + y*y + z*z);
29 that can be optimized to
31 modulus = sqrt(x*x + y*y + z*z);
32 rmodulus = 1.0 / modulus;
37 We do this for loop invariant divisors, and with this pass whenever
38 we notice that a division has the same divisor multiple times.
40 Of course, like in PRE, we don't insert a division if a dominator
41 already has one. However, this cannot be done as an extension of
42 PRE for several reasons.
44 First of all, with some experiments it was found out that the
45 transformation is not always useful if there are only two divisions
46 hy the same divisor. This is probably because modern processors
47 can pipeline the divisions; on older, in-order processors it should
48 still be effective to optimize two divisions by the same number.
49 We make this a param, and it shall be called N in the remainder of
52 Second, if trapping math is active, we have less freedom on where
53 to insert divisions: we can only do so in basic blocks that already
54 contain one. (If divisions don't trap, instead, we can insert
55 divisions elsewhere, which will be in blocks that are common dominators
56 of those that have the division).
58 We really don't want to compute the reciprocal unless a division will
59 be found. To do this, we won't insert the division in a basic block
60 that has less than N divisions *post-dominating* it.
62 The algorithm constructs a subset of the dominator tree, holding the
63 blocks containing the divisions and the common dominators to them,
64 and walk it twice. The first walk is in post-order, and it annotates
65 each block with the number of divisions that post-dominate it: this
66 gives information on where divisions can be inserted profitably.
67 The second walk is in pre-order, and it inserts divisions as explained
68 above, and replaces divisions by multiplications.
70 In the best case, the cost of the pass is O(n_statements). In the
71 worst-case, the cost is due to creating the dominator tree subset,
72 with a cost of O(n_basic_blocks ^ 2); however this can only happen
73 for n_statements / n_basic_blocks statements. So, the amortized cost
74 of creating the dominator tree subset is O(n_basic_blocks) and the
75 worst-case cost of the pass is O(n_statements * n_basic_blocks).
77 More practically, the cost will be small because there are few
78 divisions, and they tend to be in the same basic block, so insert_bb
79 is called very few times.
81 If we did this using domwalk.c, an efficient implementation would have
82 to work on all the variables in a single pass, because we could not
83 work on just a subset of the dominator tree, as we do now, and the
84 cost would also be something like O(n_statements * n_basic_blocks).
85 The data structures would be more complex in order to work on all the
86 variables in a single pass. */
90 #include "coretypes.h"
94 #include "tree-flow.h"
96 #include "tree-pass.h"
97 #include "alloc-pool.h"
98 #include "basic-block.h"
100 #include "gimple-pretty-print.h"
102 /* FIXME: RTL headers have to be included here for optabs. */
103 #include "rtl.h" /* Because optabs.h wants enum rtx_code. */
104 #include "expr.h" /* Because optabs.h wants sepops. */
107 /* This structure represents one basic block that either computes a
108 division, or is a common dominator for basic block that compute a
111 /* The basic block represented by this structure. */
114 /* If non-NULL, the SSA_NAME holding the definition for a reciprocal
118 /* If non-NULL, the GIMPLE_ASSIGN for a reciprocal computation that
119 was inserted in BB. */
120 gimple recip_def_stmt;
122 /* Pointer to a list of "struct occurrence"s for blocks dominated
124 struct occurrence *children;
126 /* Pointer to the next "struct occurrence"s in the list of blocks
127 sharing a common dominator. */
128 struct occurrence *next;
130 /* The number of divisions that are in BB before compute_merit. The
131 number of divisions that are in BB or post-dominate it after
135 /* True if the basic block has a division, false if it is a common
136 dominator for basic blocks that do. If it is false and trapping
137 math is active, BB is not a candidate for inserting a reciprocal. */
138 bool bb_has_division;
143 /* Number of 1.0/X ops inserted. */
146 /* Number of 1.0/FUNC ops inserted. */
152 /* Number of cexpi calls inserted. */
158 /* Number of hand-written 32-bit bswaps found. */
161 /* Number of hand-written 64-bit bswaps found. */
167 /* Number of widening multiplication ops inserted. */
168 int widen_mults_inserted;
170 /* Number of integer multiply-and-accumulate ops inserted. */
173 /* Number of fp fused multiply-add ops inserted. */
177 /* The instance of "struct occurrence" representing the highest
178 interesting block in the dominator tree. */
179 static struct occurrence *occ_head;
181 /* Allocation pool for getting instances of "struct occurrence". */
182 static alloc_pool occ_pool;
186 /* Allocate and return a new struct occurrence for basic block BB, and
187 whose children list is headed by CHILDREN. */
188 static struct occurrence *
189 occ_new (basic_block bb, struct occurrence *children)
191 struct occurrence *occ;
193 bb->aux = occ = (struct occurrence *) pool_alloc (occ_pool);
194 memset (occ, 0, sizeof (struct occurrence));
197 occ->children = children;
202 /* Insert NEW_OCC into our subset of the dominator tree. P_HEAD points to a
203 list of "struct occurrence"s, one per basic block, having IDOM as
204 their common dominator.
206 We try to insert NEW_OCC as deep as possible in the tree, and we also
207 insert any other block that is a common dominator for BB and one
208 block already in the tree. */
211 insert_bb (struct occurrence *new_occ, basic_block idom,
212 struct occurrence **p_head)
214 struct occurrence *occ, **p_occ;
216 for (p_occ = p_head; (occ = *p_occ) != NULL; )
218 basic_block bb = new_occ->bb, occ_bb = occ->bb;
219 basic_block dom = nearest_common_dominator (CDI_DOMINATORS, occ_bb, bb);
222 /* BB dominates OCC_BB. OCC becomes NEW_OCC's child: remove OCC
225 occ->next = new_occ->children;
226 new_occ->children = occ;
228 /* Try the next block (it may as well be dominated by BB). */
231 else if (dom == occ_bb)
233 /* OCC_BB dominates BB. Tail recurse to look deeper. */
234 insert_bb (new_occ, dom, &occ->children);
238 else if (dom != idom)
240 gcc_assert (!dom->aux);
242 /* There is a dominator between IDOM and BB, add it and make
243 two children out of NEW_OCC and OCC. First, remove OCC from
249 /* None of the previous blocks has DOM as a dominator: if we tail
250 recursed, we would reexamine them uselessly. Just switch BB with
251 DOM, and go on looking for blocks dominated by DOM. */
252 new_occ = occ_new (dom, new_occ);
257 /* Nothing special, go on with the next element. */
262 /* No place was found as a child of IDOM. Make BB a sibling of IDOM. */
263 new_occ->next = *p_head;
267 /* Register that we found a division in BB. */
270 register_division_in (basic_block bb)
272 struct occurrence *occ;
274 occ = (struct occurrence *) bb->aux;
277 occ = occ_new (bb, NULL);
278 insert_bb (occ, ENTRY_BLOCK_PTR, &occ_head);
281 occ->bb_has_division = true;
282 occ->num_divisions++;
286 /* Compute the number of divisions that postdominate each block in OCC and
290 compute_merit (struct occurrence *occ)
292 struct occurrence *occ_child;
293 basic_block dom = occ->bb;
295 for (occ_child = occ->children; occ_child; occ_child = occ_child->next)
298 if (occ_child->children)
299 compute_merit (occ_child);
302 bb = single_noncomplex_succ (dom);
306 if (dominated_by_p (CDI_POST_DOMINATORS, bb, occ_child->bb))
307 occ->num_divisions += occ_child->num_divisions;
312 /* Return whether USE_STMT is a floating-point division by DEF. */
314 is_division_by (gimple use_stmt, tree def)
316 return is_gimple_assign (use_stmt)
317 && gimple_assign_rhs_code (use_stmt) == RDIV_EXPR
318 && gimple_assign_rhs2 (use_stmt) == def
319 /* Do not recognize x / x as valid division, as we are getting
320 confused later by replacing all immediate uses x in such
322 && gimple_assign_rhs1 (use_stmt) != def;
325 /* Walk the subset of the dominator tree rooted at OCC, setting the
326 RECIP_DEF field to a definition of 1.0 / DEF that can be used in
327 the given basic block. The field may be left NULL, of course,
328 if it is not possible or profitable to do the optimization.
330 DEF_BSI is an iterator pointing at the statement defining DEF.
331 If RECIP_DEF is set, a dominator already has a computation that can
335 insert_reciprocals (gimple_stmt_iterator *def_gsi, struct occurrence *occ,
336 tree def, tree recip_def, int threshold)
340 gimple_stmt_iterator gsi;
341 struct occurrence *occ_child;
344 && (occ->bb_has_division || !flag_trapping_math)
345 && occ->num_divisions >= threshold)
347 /* Make a variable with the replacement and substitute it. */
348 type = TREE_TYPE (def);
349 recip_def = make_rename_temp (type, "reciptmp");
350 new_stmt = gimple_build_assign_with_ops (RDIV_EXPR, recip_def,
351 build_one_cst (type), def);
353 if (occ->bb_has_division)
355 /* Case 1: insert before an existing division. */
356 gsi = gsi_after_labels (occ->bb);
357 while (!gsi_end_p (gsi) && !is_division_by (gsi_stmt (gsi), def))
360 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
362 else if (def_gsi && occ->bb == def_gsi->bb)
364 /* Case 2: insert right after the definition. Note that this will
365 never happen if the definition statement can throw, because in
366 that case the sole successor of the statement's basic block will
367 dominate all the uses as well. */
368 gsi_insert_after (def_gsi, new_stmt, GSI_NEW_STMT);
372 /* Case 3: insert in a basic block not containing defs/uses. */
373 gsi = gsi_after_labels (occ->bb);
374 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
377 reciprocal_stats.rdivs_inserted++;
379 occ->recip_def_stmt = new_stmt;
382 occ->recip_def = recip_def;
383 for (occ_child = occ->children; occ_child; occ_child = occ_child->next)
384 insert_reciprocals (def_gsi, occ_child, def, recip_def, threshold);
388 /* Replace the division at USE_P with a multiplication by the reciprocal, if
392 replace_reciprocal (use_operand_p use_p)
394 gimple use_stmt = USE_STMT (use_p);
395 basic_block bb = gimple_bb (use_stmt);
396 struct occurrence *occ = (struct occurrence *) bb->aux;
398 if (optimize_bb_for_speed_p (bb)
399 && occ->recip_def && use_stmt != occ->recip_def_stmt)
401 gimple_assign_set_rhs_code (use_stmt, MULT_EXPR);
402 SET_USE (use_p, occ->recip_def);
403 fold_stmt_inplace (use_stmt);
404 update_stmt (use_stmt);
409 /* Free OCC and return one more "struct occurrence" to be freed. */
411 static struct occurrence *
412 free_bb (struct occurrence *occ)
414 struct occurrence *child, *next;
416 /* First get the two pointers hanging off OCC. */
418 child = occ->children;
420 pool_free (occ_pool, occ);
422 /* Now ensure that we don't recurse unless it is necessary. */
428 next = free_bb (next);
435 /* Look for floating-point divisions among DEF's uses, and try to
436 replace them by multiplications with the reciprocal. Add
437 as many statements computing the reciprocal as needed.
439 DEF must be a GIMPLE register of a floating-point type. */
442 execute_cse_reciprocals_1 (gimple_stmt_iterator *def_gsi, tree def)
445 imm_use_iterator use_iter;
446 struct occurrence *occ;
447 int count = 0, threshold;
449 gcc_assert (FLOAT_TYPE_P (TREE_TYPE (def)) && is_gimple_reg (def));
451 FOR_EACH_IMM_USE_FAST (use_p, use_iter, def)
453 gimple use_stmt = USE_STMT (use_p);
454 if (is_division_by (use_stmt, def))
456 register_division_in (gimple_bb (use_stmt));
461 /* Do the expensive part only if we can hope to optimize something. */
462 threshold = targetm.min_divisions_for_recip_mul (TYPE_MODE (TREE_TYPE (def)));
463 if (count >= threshold)
466 for (occ = occ_head; occ; occ = occ->next)
469 insert_reciprocals (def_gsi, occ, def, NULL, threshold);
472 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, def)
474 if (is_division_by (use_stmt, def))
476 FOR_EACH_IMM_USE_ON_STMT (use_p, use_iter)
477 replace_reciprocal (use_p);
482 for (occ = occ_head; occ; )
489 gate_cse_reciprocals (void)
491 return optimize && flag_reciprocal_math;
494 /* Go through all the floating-point SSA_NAMEs, and call
495 execute_cse_reciprocals_1 on each of them. */
497 execute_cse_reciprocals (void)
502 occ_pool = create_alloc_pool ("dominators for recip",
503 sizeof (struct occurrence),
504 n_basic_blocks / 3 + 1);
506 memset (&reciprocal_stats, 0, sizeof (reciprocal_stats));
507 calculate_dominance_info (CDI_DOMINATORS);
508 calculate_dominance_info (CDI_POST_DOMINATORS);
510 #ifdef ENABLE_CHECKING
512 gcc_assert (!bb->aux);
515 for (arg = DECL_ARGUMENTS (cfun->decl); arg; arg = DECL_CHAIN (arg))
516 if (gimple_default_def (cfun, arg)
517 && FLOAT_TYPE_P (TREE_TYPE (arg))
518 && is_gimple_reg (arg))
519 execute_cse_reciprocals_1 (NULL, gimple_default_def (cfun, arg));
523 gimple_stmt_iterator gsi;
527 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
529 phi = gsi_stmt (gsi);
530 def = PHI_RESULT (phi);
531 if (FLOAT_TYPE_P (TREE_TYPE (def))
532 && is_gimple_reg (def))
533 execute_cse_reciprocals_1 (NULL, def);
536 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
538 gimple stmt = gsi_stmt (gsi);
540 if (gimple_has_lhs (stmt)
541 && (def = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_DEF)) != NULL
542 && FLOAT_TYPE_P (TREE_TYPE (def))
543 && TREE_CODE (def) == SSA_NAME)
544 execute_cse_reciprocals_1 (&gsi, def);
547 if (optimize_bb_for_size_p (bb))
550 /* Scan for a/func(b) and convert it to reciprocal a*rfunc(b). */
551 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
553 gimple stmt = gsi_stmt (gsi);
556 if (is_gimple_assign (stmt)
557 && gimple_assign_rhs_code (stmt) == RDIV_EXPR)
559 tree arg1 = gimple_assign_rhs2 (stmt);
562 if (TREE_CODE (arg1) != SSA_NAME)
565 stmt1 = SSA_NAME_DEF_STMT (arg1);
567 if (is_gimple_call (stmt1)
568 && gimple_call_lhs (stmt1)
569 && (fndecl = gimple_call_fndecl (stmt1))
570 && (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
571 || DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD))
573 enum built_in_function code;
578 code = DECL_FUNCTION_CODE (fndecl);
579 md_code = DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD;
581 fndecl = targetm.builtin_reciprocal (code, md_code, false);
585 /* Check that all uses of the SSA name are divisions,
586 otherwise replacing the defining statement will do
589 FOR_EACH_IMM_USE_FAST (use_p, ui, arg1)
591 gimple stmt2 = USE_STMT (use_p);
592 if (is_gimple_debug (stmt2))
594 if (!is_gimple_assign (stmt2)
595 || gimple_assign_rhs_code (stmt2) != RDIV_EXPR
596 || gimple_assign_rhs1 (stmt2) == arg1
597 || gimple_assign_rhs2 (stmt2) != arg1)
606 gimple_replace_lhs (stmt1, arg1);
607 gimple_call_set_fndecl (stmt1, fndecl);
609 reciprocal_stats.rfuncs_inserted++;
611 FOR_EACH_IMM_USE_STMT (stmt, ui, arg1)
613 gimple_assign_set_rhs_code (stmt, MULT_EXPR);
614 fold_stmt_inplace (stmt);
622 statistics_counter_event (cfun, "reciprocal divs inserted",
623 reciprocal_stats.rdivs_inserted);
624 statistics_counter_event (cfun, "reciprocal functions inserted",
625 reciprocal_stats.rfuncs_inserted);
627 free_dominance_info (CDI_DOMINATORS);
628 free_dominance_info (CDI_POST_DOMINATORS);
629 free_alloc_pool (occ_pool);
633 struct gimple_opt_pass pass_cse_reciprocals =
638 gate_cse_reciprocals, /* gate */
639 execute_cse_reciprocals, /* execute */
642 0, /* static_pass_number */
644 PROP_ssa, /* properties_required */
645 0, /* properties_provided */
646 0, /* properties_destroyed */
647 0, /* todo_flags_start */
648 TODO_dump_func | TODO_update_ssa | TODO_verify_ssa
649 | TODO_verify_stmts /* todo_flags_finish */
653 /* Records an occurrence at statement USE_STMT in the vector of trees
654 STMTS if it is dominated by *TOP_BB or dominates it or this basic block
655 is not yet initialized. Returns true if the occurrence was pushed on
656 the vector. Adjusts *TOP_BB to be the basic block dominating all
657 statements in the vector. */
660 maybe_record_sincos (VEC(gimple, heap) **stmts,
661 basic_block *top_bb, gimple use_stmt)
663 basic_block use_bb = gimple_bb (use_stmt);
665 && (*top_bb == use_bb
666 || dominated_by_p (CDI_DOMINATORS, use_bb, *top_bb)))
667 VEC_safe_push (gimple, heap, *stmts, use_stmt);
669 || dominated_by_p (CDI_DOMINATORS, *top_bb, use_bb))
671 VEC_safe_push (gimple, heap, *stmts, use_stmt);
680 /* Look for sin, cos and cexpi calls with the same argument NAME and
681 create a single call to cexpi CSEing the result in this case.
682 We first walk over all immediate uses of the argument collecting
683 statements that we can CSE in a vector and in a second pass replace
684 the statement rhs with a REALPART or IMAGPART expression on the
685 result of the cexpi call we insert before the use statement that
686 dominates all other candidates. */
689 execute_cse_sincos_1 (tree name)
691 gimple_stmt_iterator gsi;
692 imm_use_iterator use_iter;
693 tree fndecl, res, type;
694 gimple def_stmt, use_stmt, stmt;
695 int seen_cos = 0, seen_sin = 0, seen_cexpi = 0;
696 VEC(gimple, heap) *stmts = NULL;
697 basic_block top_bb = NULL;
699 bool cfg_changed = false;
701 type = TREE_TYPE (name);
702 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, name)
704 if (gimple_code (use_stmt) != GIMPLE_CALL
705 || !gimple_call_lhs (use_stmt)
706 || !(fndecl = gimple_call_fndecl (use_stmt))
707 || DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_NORMAL)
710 switch (DECL_FUNCTION_CODE (fndecl))
712 CASE_FLT_FN (BUILT_IN_COS):
713 seen_cos |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
716 CASE_FLT_FN (BUILT_IN_SIN):
717 seen_sin |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
720 CASE_FLT_FN (BUILT_IN_CEXPI):
721 seen_cexpi |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
728 if (seen_cos + seen_sin + seen_cexpi <= 1)
730 VEC_free(gimple, heap, stmts);
734 /* Simply insert cexpi at the beginning of top_bb but not earlier than
735 the name def statement. */
736 fndecl = mathfn_built_in (type, BUILT_IN_CEXPI);
739 res = create_tmp_reg (TREE_TYPE (TREE_TYPE (fndecl)), "sincostmp");
740 stmt = gimple_build_call (fndecl, 1, name);
741 res = make_ssa_name (res, stmt);
742 gimple_call_set_lhs (stmt, res);
744 def_stmt = SSA_NAME_DEF_STMT (name);
745 if (!SSA_NAME_IS_DEFAULT_DEF (name)
746 && gimple_code (def_stmt) != GIMPLE_PHI
747 && gimple_bb (def_stmt) == top_bb)
749 gsi = gsi_for_stmt (def_stmt);
750 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
754 gsi = gsi_after_labels (top_bb);
755 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
758 sincos_stats.inserted++;
760 /* And adjust the recorded old call sites. */
761 for (i = 0; VEC_iterate(gimple, stmts, i, use_stmt); ++i)
764 fndecl = gimple_call_fndecl (use_stmt);
766 switch (DECL_FUNCTION_CODE (fndecl))
768 CASE_FLT_FN (BUILT_IN_COS):
769 rhs = fold_build1 (REALPART_EXPR, type, res);
772 CASE_FLT_FN (BUILT_IN_SIN):
773 rhs = fold_build1 (IMAGPART_EXPR, type, res);
776 CASE_FLT_FN (BUILT_IN_CEXPI):
784 /* Replace call with a copy. */
785 stmt = gimple_build_assign (gimple_call_lhs (use_stmt), rhs);
787 gsi = gsi_for_stmt (use_stmt);
788 gsi_replace (&gsi, stmt, true);
789 if (gimple_purge_dead_eh_edges (gimple_bb (stmt)))
793 VEC_free(gimple, heap, stmts);
798 /* To evaluate powi(x,n), the floating point value x raised to the
799 constant integer exponent n, we use a hybrid algorithm that
800 combines the "window method" with look-up tables. For an
801 introduction to exponentiation algorithms and "addition chains",
802 see section 4.6.3, "Evaluation of Powers" of Donald E. Knuth,
803 "Seminumerical Algorithms", Vol. 2, "The Art of Computer Programming",
804 3rd Edition, 1998, and Daniel M. Gordon, "A Survey of Fast Exponentiation
805 Methods", Journal of Algorithms, Vol. 27, pp. 129-146, 1998. */
807 /* Provide a default value for POWI_MAX_MULTS, the maximum number of
808 multiplications to inline before calling the system library's pow
809 function. powi(x,n) requires at worst 2*bits(n)-2 multiplications,
810 so this default never requires calling pow, powf or powl. */
812 #ifndef POWI_MAX_MULTS
813 #define POWI_MAX_MULTS (2*HOST_BITS_PER_WIDE_INT-2)
816 /* The size of the "optimal power tree" lookup table. All
817 exponents less than this value are simply looked up in the
818 powi_table below. This threshold is also used to size the
819 cache of pseudo registers that hold intermediate results. */
820 #define POWI_TABLE_SIZE 256
822 /* The size, in bits of the window, used in the "window method"
823 exponentiation algorithm. This is equivalent to a radix of
824 (1<<POWI_WINDOW_SIZE) in the corresponding "m-ary method". */
825 #define POWI_WINDOW_SIZE 3
827 /* The following table is an efficient representation of an
828 "optimal power tree". For each value, i, the corresponding
829 value, j, in the table states than an optimal evaluation
830 sequence for calculating pow(x,i) can be found by evaluating
831 pow(x,j)*pow(x,i-j). An optimal power tree for the first
832 100 integers is given in Knuth's "Seminumerical algorithms". */
834 static const unsigned char powi_table[POWI_TABLE_SIZE] =
836 0, 1, 1, 2, 2, 3, 3, 4, /* 0 - 7 */
837 4, 6, 5, 6, 6, 10, 7, 9, /* 8 - 15 */
838 8, 16, 9, 16, 10, 12, 11, 13, /* 16 - 23 */
839 12, 17, 13, 18, 14, 24, 15, 26, /* 24 - 31 */
840 16, 17, 17, 19, 18, 33, 19, 26, /* 32 - 39 */
841 20, 25, 21, 40, 22, 27, 23, 44, /* 40 - 47 */
842 24, 32, 25, 34, 26, 29, 27, 44, /* 48 - 55 */
843 28, 31, 29, 34, 30, 60, 31, 36, /* 56 - 63 */
844 32, 64, 33, 34, 34, 46, 35, 37, /* 64 - 71 */
845 36, 65, 37, 50, 38, 48, 39, 69, /* 72 - 79 */
846 40, 49, 41, 43, 42, 51, 43, 58, /* 80 - 87 */
847 44, 64, 45, 47, 46, 59, 47, 76, /* 88 - 95 */
848 48, 65, 49, 66, 50, 67, 51, 66, /* 96 - 103 */
849 52, 70, 53, 74, 54, 104, 55, 74, /* 104 - 111 */
850 56, 64, 57, 69, 58, 78, 59, 68, /* 112 - 119 */
851 60, 61, 61, 80, 62, 75, 63, 68, /* 120 - 127 */
852 64, 65, 65, 128, 66, 129, 67, 90, /* 128 - 135 */
853 68, 73, 69, 131, 70, 94, 71, 88, /* 136 - 143 */
854 72, 128, 73, 98, 74, 132, 75, 121, /* 144 - 151 */
855 76, 102, 77, 124, 78, 132, 79, 106, /* 152 - 159 */
856 80, 97, 81, 160, 82, 99, 83, 134, /* 160 - 167 */
857 84, 86, 85, 95, 86, 160, 87, 100, /* 168 - 175 */
858 88, 113, 89, 98, 90, 107, 91, 122, /* 176 - 183 */
859 92, 111, 93, 102, 94, 126, 95, 150, /* 184 - 191 */
860 96, 128, 97, 130, 98, 133, 99, 195, /* 192 - 199 */
861 100, 128, 101, 123, 102, 164, 103, 138, /* 200 - 207 */
862 104, 145, 105, 146, 106, 109, 107, 149, /* 208 - 215 */
863 108, 200, 109, 146, 110, 170, 111, 157, /* 216 - 223 */
864 112, 128, 113, 130, 114, 182, 115, 132, /* 224 - 231 */
865 116, 200, 117, 132, 118, 158, 119, 206, /* 232 - 239 */
866 120, 240, 121, 162, 122, 147, 123, 152, /* 240 - 247 */
867 124, 166, 125, 214, 126, 138, 127, 153, /* 248 - 255 */
871 /* Return the number of multiplications required to calculate
872 powi(x,n) where n is less than POWI_TABLE_SIZE. This is a
873 subroutine of powi_cost. CACHE is an array indicating
874 which exponents have already been calculated. */
877 powi_lookup_cost (unsigned HOST_WIDE_INT n, bool *cache)
879 /* If we've already calculated this exponent, then this evaluation
880 doesn't require any additional multiplications. */
885 return powi_lookup_cost (n - powi_table[n], cache)
886 + powi_lookup_cost (powi_table[n], cache) + 1;
889 /* Return the number of multiplications required to calculate
890 powi(x,n) for an arbitrary x, given the exponent N. This
891 function needs to be kept in sync with powi_as_mults below. */
894 powi_cost (HOST_WIDE_INT n)
896 bool cache[POWI_TABLE_SIZE];
897 unsigned HOST_WIDE_INT digit;
898 unsigned HOST_WIDE_INT val;
904 /* Ignore the reciprocal when calculating the cost. */
905 val = (n < 0) ? -n : n;
907 /* Initialize the exponent cache. */
908 memset (cache, 0, POWI_TABLE_SIZE * sizeof (bool));
913 while (val >= POWI_TABLE_SIZE)
917 digit = val & ((1 << POWI_WINDOW_SIZE) - 1);
918 result += powi_lookup_cost (digit, cache)
919 + POWI_WINDOW_SIZE + 1;
920 val >>= POWI_WINDOW_SIZE;
929 return result + powi_lookup_cost (val, cache);
932 /* Recursive subroutine of powi_as_mults. This function takes the
933 array, CACHE, of already calculated exponents and an exponent N and
934 returns a tree that corresponds to CACHE[1]**N, with type TYPE. */
937 powi_as_mults_1 (gimple_stmt_iterator *gsi, location_t loc, tree type,
938 HOST_WIDE_INT n, tree *cache, tree target)
940 tree op0, op1, ssa_target;
941 unsigned HOST_WIDE_INT digit;
944 if (n < POWI_TABLE_SIZE && cache[n])
947 ssa_target = make_ssa_name (target, NULL);
949 if (n < POWI_TABLE_SIZE)
951 cache[n] = ssa_target;
952 op0 = powi_as_mults_1 (gsi, loc, type, n - powi_table[n], cache, target);
953 op1 = powi_as_mults_1 (gsi, loc, type, powi_table[n], cache, target);
957 digit = n & ((1 << POWI_WINDOW_SIZE) - 1);
958 op0 = powi_as_mults_1 (gsi, loc, type, n - digit, cache, target);
959 op1 = powi_as_mults_1 (gsi, loc, type, digit, cache, target);
963 op0 = powi_as_mults_1 (gsi, loc, type, n >> 1, cache, target);
967 mult_stmt = gimple_build_assign_with_ops (MULT_EXPR, ssa_target, op0, op1);
968 gimple_set_location (mult_stmt, loc);
969 gsi_insert_before (gsi, mult_stmt, GSI_SAME_STMT);
974 /* Convert ARG0**N to a tree of multiplications of ARG0 with itself.
975 This function needs to be kept in sync with powi_cost above. */
978 powi_as_mults (gimple_stmt_iterator *gsi, location_t loc,
979 tree arg0, HOST_WIDE_INT n)
981 tree cache[POWI_TABLE_SIZE], result, type = TREE_TYPE (arg0), target;
985 return build_real (type, dconst1);
987 memset (cache, 0, sizeof (cache));
990 target = create_tmp_var (type, "powmult");
991 add_referenced_var (target);
993 result = powi_as_mults_1 (gsi, loc, type, (n < 0) ? -n : n, cache, target);
998 /* If the original exponent was negative, reciprocate the result. */
999 target = make_ssa_name (target, NULL);
1000 div_stmt = gimple_build_assign_with_ops (RDIV_EXPR, target,
1001 build_real (type, dconst1),
1003 gimple_set_location (div_stmt, loc);
1004 gsi_insert_before (gsi, div_stmt, GSI_SAME_STMT);
1009 /* ARG0 and N are the two arguments to a powi builtin in GSI with
1010 location info LOC. If the arguments are appropriate, create an
1011 equivalent sequence of statements prior to GSI using an optimal
1012 number of multiplications, and return an expession holding the
1016 gimple_expand_builtin_powi (gimple_stmt_iterator *gsi, location_t loc,
1017 tree arg0, HOST_WIDE_INT n)
1019 /* Avoid largest negative number. */
1021 && ((n >= -1 && n <= 2)
1022 || (optimize_function_for_speed_p (cfun)
1023 && powi_cost (n) <= POWI_MAX_MULTS)))
1024 return powi_as_mults (gsi, loc, arg0, n);
1029 /* Build a gimple call statement that calls FN with argument ARG.
1030 Set the lhs of the call statement to a fresh SSA name for
1031 variable VAR. If VAR is NULL, first allocate it. Insert the
1032 statement prior to GSI's current position, and return the fresh
1036 build_and_insert_call (gimple_stmt_iterator *gsi, location_t loc,
1037 tree *var, tree fn, tree arg)
1044 *var = create_tmp_var (TREE_TYPE (arg), "powroot");
1045 add_referenced_var (*var);
1048 call_stmt = gimple_build_call (fn, 1, arg);
1049 ssa_target = make_ssa_name (*var, NULL);
1050 gimple_set_lhs (call_stmt, ssa_target);
1051 gimple_set_location (call_stmt, loc);
1052 gsi_insert_before (gsi, call_stmt, GSI_SAME_STMT);
1057 /* Build a gimple binary operation with the given CODE and arguments
1058 ARG0, ARG1, assigning the result to a new SSA name for variable
1059 TARGET. Insert the statement prior to GSI's current position, and
1060 return the fresh SSA name.*/
1063 build_and_insert_binop (gimple_stmt_iterator *gsi, location_t loc,
1064 tree target, enum tree_code code, tree arg0, tree arg1)
1066 tree result = make_ssa_name (target, NULL);
1067 gimple stmt = gimple_build_assign_with_ops (code, result, arg0, arg1);
1068 gimple_set_location (stmt, loc);
1069 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1073 /* ARG0 and ARG1 are the two arguments to a pow builtin call in GSI
1074 with location info LOC. If possible, create an equivalent and
1075 less expensive sequence of statements prior to GSI, and return an
1076 expession holding the result. */
1079 gimple_expand_builtin_pow (gimple_stmt_iterator *gsi, location_t loc,
1080 tree arg0, tree arg1)
1082 REAL_VALUE_TYPE c, cint, dconst1_4, dconst3_4, dconst1_3, dconst1_6;
1083 REAL_VALUE_TYPE c2, dconst3;
1085 tree type, sqrtfn, cbrtfn, sqrt_arg0, sqrt_sqrt, result, cbrt_x, powi_cbrt_x;
1086 tree target = NULL_TREE;
1087 enum machine_mode mode;
1088 bool hw_sqrt_exists;
1090 /* If the exponent isn't a constant, there's nothing of interest
1092 if (TREE_CODE (arg1) != REAL_CST)
1095 /* If the exponent is equivalent to an integer, expand to an optimal
1096 multiplication sequence when profitable. */
1097 c = TREE_REAL_CST (arg1);
1098 n = real_to_integer (&c);
1099 real_from_integer (&cint, VOIDmode, n, n < 0 ? -1 : 0, 0);
1101 if (real_identical (&c, &cint)
1102 && ((n >= -1 && n <= 2)
1103 || (flag_unsafe_math_optimizations
1104 && optimize_insn_for_speed_p ()
1105 && powi_cost (n) <= POWI_MAX_MULTS)))
1106 return gimple_expand_builtin_powi (gsi, loc, arg0, n);
1108 /* Attempt various optimizations using sqrt and cbrt. */
1109 type = TREE_TYPE (arg0);
1110 mode = TYPE_MODE (type);
1111 sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT);
1113 /* Optimize pow(x,0.5) = sqrt(x). This replacement is always safe
1114 unless signed zeros must be maintained. pow(-0,0.5) = +0, while
1117 && REAL_VALUES_EQUAL (c, dconsthalf)
1118 && !HONOR_SIGNED_ZEROS (mode))
1119 return build_and_insert_call (gsi, loc, &target, sqrtfn, arg0);
1121 /* Optimize pow(x,0.25) = sqrt(sqrt(x)). Assume on most machines that
1122 a builtin sqrt instruction is smaller than a call to pow with 0.25,
1123 so do this optimization even if -Os. Don't do this optimization
1124 if we don't have a hardware sqrt insn. */
1125 dconst1_4 = dconst1;
1126 SET_REAL_EXP (&dconst1_4, REAL_EXP (&dconst1_4) - 2);
1127 hw_sqrt_exists = optab_handler(sqrt_optab, mode) != CODE_FOR_nothing;
1129 if (flag_unsafe_math_optimizations
1131 && REAL_VALUES_EQUAL (c, dconst1_4)
1135 sqrt_arg0 = build_and_insert_call (gsi, loc, &target, sqrtfn, arg0);
1138 return build_and_insert_call (gsi, loc, &target, sqrtfn, sqrt_arg0);
1141 /* Optimize pow(x,0.75) = sqrt(x) * sqrt(sqrt(x)) unless we are
1142 optimizing for space. Don't do this optimization if we don't have
1143 a hardware sqrt insn. */
1144 real_from_integer (&dconst3_4, VOIDmode, 3, 0, 0);
1145 SET_REAL_EXP (&dconst3_4, REAL_EXP (&dconst3_4) - 2);
1147 if (flag_unsafe_math_optimizations
1149 && optimize_function_for_speed_p (cfun)
1150 && REAL_VALUES_EQUAL (c, dconst3_4)
1154 sqrt_arg0 = build_and_insert_call (gsi, loc, &target, sqrtfn, arg0);
1157 sqrt_sqrt = build_and_insert_call (gsi, loc, &target, sqrtfn, sqrt_arg0);
1159 /* sqrt(x) * sqrt(sqrt(x)) */
1160 return build_and_insert_binop (gsi, loc, target, MULT_EXPR,
1161 sqrt_arg0, sqrt_sqrt);
1164 /* Optimize pow(x,1./3.) = cbrt(x). This requires unsafe math
1165 optimizations since 1./3. is not exactly representable. If x
1166 is negative and finite, the correct value of pow(x,1./3.) is
1167 a NaN with the "invalid" exception raised, because the value
1168 of 1./3. actually has an even denominator. The correct value
1169 of cbrt(x) is a negative real value. */
1170 cbrtfn = mathfn_built_in (type, BUILT_IN_CBRT);
1171 dconst1_3 = real_value_truncate (mode, dconst_third ());
1173 if (flag_unsafe_math_optimizations
1175 && (gimple_val_nonnegative_real_p (arg0) || !HONOR_NANS (mode))
1176 && REAL_VALUES_EQUAL (c, dconst1_3))
1177 return build_and_insert_call (gsi, loc, &target, cbrtfn, arg0);
1179 /* Optimize pow(x,1./6.) = cbrt(sqrt(x)). Don't do this optimization
1180 if we don't have a hardware sqrt insn. */
1181 dconst1_6 = dconst1_3;
1182 SET_REAL_EXP (&dconst1_6, REAL_EXP (&dconst1_6) - 1);
1184 if (flag_unsafe_math_optimizations
1187 && (gimple_val_nonnegative_real_p (arg0) || !HONOR_NANS (mode))
1188 && optimize_function_for_speed_p (cfun)
1190 && REAL_VALUES_EQUAL (c, dconst1_6))
1193 sqrt_arg0 = build_and_insert_call (gsi, loc, &target, sqrtfn, arg0);
1196 return build_and_insert_call (gsi, loc, &target, cbrtfn, sqrt_arg0);
1199 /* Optimize pow(x,c), where n = 2c for some nonzero integer n, into
1201 sqrt(x) * powi(x, n/2), n > 0;
1202 1.0 / (sqrt(x) * powi(x, abs(n/2))), n < 0.
1204 Do not calculate the powi factor when n/2 = 0. */
1205 real_arithmetic (&c2, MULT_EXPR, &c, &dconst2);
1206 n = real_to_integer (&c2);
1207 real_from_integer (&cint, VOIDmode, n, n < 0 ? -1 : 0, 0);
1209 if (flag_unsafe_math_optimizations
1211 && real_identical (&c2, &cint))
1213 tree powi_x_ndiv2 = NULL_TREE;
1215 /* Attempt to fold powi(arg0, abs(n/2)) into multiplies. If not
1216 possible or profitable, give up. Skip the degenerate case when
1217 n is 1 or -1, where the result is always 1. */
1220 powi_x_ndiv2 = gimple_expand_builtin_powi (gsi, loc, arg0, abs(n/2));
1225 /* Calculate sqrt(x). When n is not 1 or -1, multiply it by the
1226 result of the optimal multiply sequence just calculated. */
1227 sqrt_arg0 = build_and_insert_call (gsi, loc, &target, sqrtfn, arg0);
1232 result = build_and_insert_binop (gsi, loc, target, MULT_EXPR,
1233 sqrt_arg0, powi_x_ndiv2);
1235 /* If n is negative, reciprocate the result. */
1237 result = build_and_insert_binop (gsi, loc, target, RDIV_EXPR,
1238 build_real (type, dconst1), result);
1242 /* Optimize pow(x,c), where 3c = n for some nonzero integer n, into
1244 powi(x, n/3) * powi(cbrt(x), n%3), n > 0;
1245 1.0 / (powi(x, abs(n)/3) * powi(cbrt(x), abs(n)%3)), n < 0.
1247 Do not calculate the first factor when n/3 = 0. As cbrt(x) is
1248 different from pow(x, 1./3.) due to rounding and behavior with
1249 negative x, we need to constrain this transformation to unsafe
1250 math and positive x or finite math. */
1251 real_from_integer (&dconst3, VOIDmode, 3, 0, 0);
1252 real_arithmetic (&c2, MULT_EXPR, &c, &dconst3);
1253 real_round (&c2, mode, &c2);
1254 n = real_to_integer (&c2);
1255 real_from_integer (&cint, VOIDmode, n, n < 0 ? -1 : 0, 0);
1256 real_arithmetic (&c2, RDIV_EXPR, &cint, &dconst3);
1257 real_convert (&c2, mode, &c2);
1259 if (flag_unsafe_math_optimizations
1261 && (gimple_val_nonnegative_real_p (arg0) || !HONOR_NANS (mode))
1262 && real_identical (&c2, &c)
1263 && optimize_function_for_speed_p (cfun)
1264 && powi_cost (n / 3) <= POWI_MAX_MULTS)
1266 tree powi_x_ndiv3 = NULL_TREE;
1268 /* Attempt to fold powi(arg0, abs(n/3)) into multiplies. If not
1269 possible or profitable, give up. Skip the degenerate case when
1270 abs(n) < 3, where the result is always 1. */
1273 powi_x_ndiv3 = gimple_expand_builtin_powi (gsi, loc, arg0,
1279 /* Calculate powi(cbrt(x), n%3). Don't use gimple_expand_builtin_powi
1280 as that creates an unnecessary variable. Instead, just produce
1281 either cbrt(x) or cbrt(x) * cbrt(x). */
1282 cbrt_x = build_and_insert_call (gsi, loc, &target, cbrtfn, arg0);
1284 if (abs (n) % 3 == 1)
1285 powi_cbrt_x = cbrt_x;
1287 powi_cbrt_x = build_and_insert_binop (gsi, loc, target, MULT_EXPR,
1290 /* Multiply the two subexpressions, unless powi(x,abs(n)/3) = 1. */
1292 result = powi_cbrt_x;
1294 result = build_and_insert_binop (gsi, loc, target, MULT_EXPR,
1295 powi_x_ndiv3, powi_cbrt_x);
1297 /* If n is negative, reciprocate the result. */
1299 result = build_and_insert_binop (gsi, loc, target, RDIV_EXPR,
1300 build_real (type, dconst1), result);
1305 /* No optimizations succeeded. */
1309 /* Go through all calls to sin, cos and cexpi and call execute_cse_sincos_1
1310 on the SSA_NAME argument of each of them. Also expand powi(x,n) into
1311 an optimal number of multiplies, when n is a constant. */
1314 execute_cse_sincos (void)
1317 bool cfg_changed = false;
1319 calculate_dominance_info (CDI_DOMINATORS);
1320 memset (&sincos_stats, 0, sizeof (sincos_stats));
1324 gimple_stmt_iterator gsi;
1326 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1328 gimple stmt = gsi_stmt (gsi);
1331 if (is_gimple_call (stmt)
1332 && gimple_call_lhs (stmt)
1333 && (fndecl = gimple_call_fndecl (stmt))
1334 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
1336 tree arg, arg0, arg1, result;
1340 switch (DECL_FUNCTION_CODE (fndecl))
1342 CASE_FLT_FN (BUILT_IN_COS):
1343 CASE_FLT_FN (BUILT_IN_SIN):
1344 CASE_FLT_FN (BUILT_IN_CEXPI):
1345 /* Make sure we have either sincos or cexp. */
1346 if (!TARGET_HAS_SINCOS && !TARGET_C99_FUNCTIONS)
1349 arg = gimple_call_arg (stmt, 0);
1350 if (TREE_CODE (arg) == SSA_NAME)
1351 cfg_changed |= execute_cse_sincos_1 (arg);
1354 CASE_FLT_FN (BUILT_IN_POW):
1355 arg0 = gimple_call_arg (stmt, 0);
1356 arg1 = gimple_call_arg (stmt, 1);
1358 loc = gimple_location (stmt);
1359 result = gimple_expand_builtin_pow (&gsi, loc, arg0, arg1);
1363 tree lhs = gimple_get_lhs (stmt);
1364 gimple new_stmt = gimple_build_assign (lhs, result);
1365 gimple_set_location (new_stmt, loc);
1366 unlink_stmt_vdef (stmt);
1367 gsi_replace (&gsi, new_stmt, true);
1371 CASE_FLT_FN (BUILT_IN_POWI):
1372 arg0 = gimple_call_arg (stmt, 0);
1373 arg1 = gimple_call_arg (stmt, 1);
1374 if (!host_integerp (arg1, 0))
1377 n = TREE_INT_CST_LOW (arg1);
1378 loc = gimple_location (stmt);
1379 result = gimple_expand_builtin_powi (&gsi, loc, arg0, n);
1383 tree lhs = gimple_get_lhs (stmt);
1384 gimple new_stmt = gimple_build_assign (lhs, result);
1385 gimple_set_location (new_stmt, loc);
1386 unlink_stmt_vdef (stmt);
1387 gsi_replace (&gsi, new_stmt, true);
1397 statistics_counter_event (cfun, "sincos statements inserted",
1398 sincos_stats.inserted);
1400 free_dominance_info (CDI_DOMINATORS);
1401 return cfg_changed ? TODO_cleanup_cfg : 0;
1405 gate_cse_sincos (void)
1407 /* We no longer require either sincos or cexp, since powi expansion
1408 piggybacks on this pass. */
1412 struct gimple_opt_pass pass_cse_sincos =
1416 "sincos", /* name */
1417 gate_cse_sincos, /* gate */
1418 execute_cse_sincos, /* execute */
1421 0, /* static_pass_number */
1422 TV_NONE, /* tv_id */
1423 PROP_ssa, /* properties_required */
1424 0, /* properties_provided */
1425 0, /* properties_destroyed */
1426 0, /* todo_flags_start */
1427 TODO_dump_func | TODO_update_ssa | TODO_verify_ssa
1428 | TODO_verify_stmts /* todo_flags_finish */
1432 /* A symbolic number is used to detect byte permutation and selection
1433 patterns. Therefore the field N contains an artificial number
1434 consisting of byte size markers:
1436 0 - byte has the value 0
1437 1..size - byte contains the content of the byte
1438 number indexed with that value minus one */
1440 struct symbolic_number {
1441 unsigned HOST_WIDEST_INT n;
1445 /* Perform a SHIFT or ROTATE operation by COUNT bits on symbolic
1446 number N. Return false if the requested operation is not permitted
1447 on a symbolic number. */
1450 do_shift_rotate (enum tree_code code,
1451 struct symbolic_number *n,
1457 /* Zero out the extra bits of N in order to avoid them being shifted
1458 into the significant bits. */
1459 if (n->size < (int)sizeof (HOST_WIDEST_INT))
1460 n->n &= ((unsigned HOST_WIDEST_INT)1 << (n->size * BITS_PER_UNIT)) - 1;
1471 n->n = (n->n << count) | (n->n >> ((n->size * BITS_PER_UNIT) - count));
1474 n->n = (n->n >> count) | (n->n << ((n->size * BITS_PER_UNIT) - count));
1482 /* Perform sanity checking for the symbolic number N and the gimple
1486 verify_symbolic_number_p (struct symbolic_number *n, gimple stmt)
1490 lhs_type = gimple_expr_type (stmt);
1492 if (TREE_CODE (lhs_type) != INTEGER_TYPE)
1495 if (TYPE_PRECISION (lhs_type) != n->size * BITS_PER_UNIT)
1501 /* find_bswap_1 invokes itself recursively with N and tries to perform
1502 the operation given by the rhs of STMT on the result. If the
1503 operation could successfully be executed the function returns the
1504 tree expression of the source operand and NULL otherwise. */
1507 find_bswap_1 (gimple stmt, struct symbolic_number *n, int limit)
1509 enum tree_code code;
1510 tree rhs1, rhs2 = NULL;
1511 gimple rhs1_stmt, rhs2_stmt;
1513 enum gimple_rhs_class rhs_class;
1515 if (!limit || !is_gimple_assign (stmt))
1518 rhs1 = gimple_assign_rhs1 (stmt);
1520 if (TREE_CODE (rhs1) != SSA_NAME)
1523 code = gimple_assign_rhs_code (stmt);
1524 rhs_class = gimple_assign_rhs_class (stmt);
1525 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
1527 if (rhs_class == GIMPLE_BINARY_RHS)
1528 rhs2 = gimple_assign_rhs2 (stmt);
1530 /* Handle unary rhs and binary rhs with integer constants as second
1533 if (rhs_class == GIMPLE_UNARY_RHS
1534 || (rhs_class == GIMPLE_BINARY_RHS
1535 && TREE_CODE (rhs2) == INTEGER_CST))
1537 if (code != BIT_AND_EXPR
1538 && code != LSHIFT_EXPR
1539 && code != RSHIFT_EXPR
1540 && code != LROTATE_EXPR
1541 && code != RROTATE_EXPR
1543 && code != CONVERT_EXPR)
1546 source_expr1 = find_bswap_1 (rhs1_stmt, n, limit - 1);
1548 /* If find_bswap_1 returned NULL STMT is a leaf node and we have
1549 to initialize the symbolic number. */
1552 /* Set up the symbolic number N by setting each byte to a
1553 value between 1 and the byte size of rhs1. The highest
1554 order byte is set to n->size and the lowest order
1556 n->size = TYPE_PRECISION (TREE_TYPE (rhs1));
1557 if (n->size % BITS_PER_UNIT != 0)
1559 n->size /= BITS_PER_UNIT;
1560 n->n = (sizeof (HOST_WIDEST_INT) < 8 ? 0 :
1561 (unsigned HOST_WIDEST_INT)0x08070605 << 32 | 0x04030201);
1563 if (n->size < (int)sizeof (HOST_WIDEST_INT))
1564 n->n &= ((unsigned HOST_WIDEST_INT)1 <<
1565 (n->size * BITS_PER_UNIT)) - 1;
1567 source_expr1 = rhs1;
1575 unsigned HOST_WIDEST_INT val = widest_int_cst_value (rhs2);
1576 unsigned HOST_WIDEST_INT tmp = val;
1578 /* Only constants masking full bytes are allowed. */
1579 for (i = 0; i < n->size; i++, tmp >>= BITS_PER_UNIT)
1580 if ((tmp & 0xff) != 0 && (tmp & 0xff) != 0xff)
1590 if (!do_shift_rotate (code, n, (int)TREE_INT_CST_LOW (rhs2)))
1597 type_size = TYPE_PRECISION (gimple_expr_type (stmt));
1598 if (type_size % BITS_PER_UNIT != 0)
1601 if (type_size / BITS_PER_UNIT < (int)(sizeof (HOST_WIDEST_INT)))
1603 /* If STMT casts to a smaller type mask out the bits not
1604 belonging to the target type. */
1605 n->n &= ((unsigned HOST_WIDEST_INT)1 << type_size) - 1;
1607 n->size = type_size / BITS_PER_UNIT;
1613 return verify_symbolic_number_p (n, stmt) ? source_expr1 : NULL;
1616 /* Handle binary rhs. */
1618 if (rhs_class == GIMPLE_BINARY_RHS)
1620 struct symbolic_number n1, n2;
1623 if (code != BIT_IOR_EXPR)
1626 if (TREE_CODE (rhs2) != SSA_NAME)
1629 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
1634 source_expr1 = find_bswap_1 (rhs1_stmt, &n1, limit - 1);
1639 source_expr2 = find_bswap_1 (rhs2_stmt, &n2, limit - 1);
1641 if (source_expr1 != source_expr2
1642 || n1.size != n2.size)
1648 if (!verify_symbolic_number_p (n, stmt))
1655 return source_expr1;
1660 /* Check if STMT completes a bswap implementation consisting of ORs,
1661 SHIFTs and ANDs. Return the source tree expression on which the
1662 byte swap is performed and NULL if no bswap was found. */
1665 find_bswap (gimple stmt)
1667 /* The number which the find_bswap result should match in order to
1668 have a full byte swap. The number is shifted to the left according
1669 to the size of the symbolic number before using it. */
1670 unsigned HOST_WIDEST_INT cmp =
1671 sizeof (HOST_WIDEST_INT) < 8 ? 0 :
1672 (unsigned HOST_WIDEST_INT)0x01020304 << 32 | 0x05060708;
1674 struct symbolic_number n;
1677 /* The last parameter determines the depth search limit. It usually
1678 correlates directly to the number of bytes to be touched. We
1679 increase that number by one here in order to also cover signed ->
1680 unsigned conversions of the src operand as can be seen in
1682 source_expr = find_bswap_1 (stmt, &n,
1684 TYPE_SIZE_UNIT (gimple_expr_type (stmt))) + 1);
1689 /* Zero out the extra bits of N and CMP. */
1690 if (n.size < (int)sizeof (HOST_WIDEST_INT))
1692 unsigned HOST_WIDEST_INT mask =
1693 ((unsigned HOST_WIDEST_INT)1 << (n.size * BITS_PER_UNIT)) - 1;
1696 cmp >>= (sizeof (HOST_WIDEST_INT) - n.size) * BITS_PER_UNIT;
1699 /* A complete byte swap should make the symbolic number to start
1700 with the largest digit in the highest order byte. */
1707 /* Find manual byte swap implementations and turn them into a bswap
1708 builtin invokation. */
1711 execute_optimize_bswap (void)
1714 bool bswap32_p, bswap64_p;
1715 bool changed = false;
1716 tree bswap32_type = NULL_TREE, bswap64_type = NULL_TREE;
1718 if (BITS_PER_UNIT != 8)
1721 if (sizeof (HOST_WIDEST_INT) < 8)
1724 bswap32_p = (built_in_decls[BUILT_IN_BSWAP32]
1725 && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing);
1726 bswap64_p = (built_in_decls[BUILT_IN_BSWAP64]
1727 && (optab_handler (bswap_optab, DImode) != CODE_FOR_nothing
1728 || (bswap32_p && word_mode == SImode)));
1730 if (!bswap32_p && !bswap64_p)
1733 /* Determine the argument type of the builtins. The code later on
1734 assumes that the return and argument type are the same. */
1737 tree fndecl = built_in_decls[BUILT_IN_BSWAP32];
1738 bswap32_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
1743 tree fndecl = built_in_decls[BUILT_IN_BSWAP64];
1744 bswap64_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
1747 memset (&bswap_stats, 0, sizeof (bswap_stats));
1751 gimple_stmt_iterator gsi;
1753 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1755 gimple stmt = gsi_stmt (gsi);
1756 tree bswap_src, bswap_type;
1758 tree fndecl = NULL_TREE;
1762 if (!is_gimple_assign (stmt)
1763 || gimple_assign_rhs_code (stmt) != BIT_IOR_EXPR)
1766 type_size = TYPE_PRECISION (gimple_expr_type (stmt));
1773 fndecl = built_in_decls[BUILT_IN_BSWAP32];
1774 bswap_type = bswap32_type;
1780 fndecl = built_in_decls[BUILT_IN_BSWAP64];
1781 bswap_type = bswap64_type;
1791 bswap_src = find_bswap (stmt);
1797 if (type_size == 32)
1798 bswap_stats.found_32bit++;
1800 bswap_stats.found_64bit++;
1802 bswap_tmp = bswap_src;
1804 /* Convert the src expression if necessary. */
1805 if (!useless_type_conversion_p (TREE_TYPE (bswap_tmp), bswap_type))
1807 gimple convert_stmt;
1809 bswap_tmp = create_tmp_var (bswap_type, "bswapsrc");
1810 add_referenced_var (bswap_tmp);
1811 bswap_tmp = make_ssa_name (bswap_tmp, NULL);
1813 convert_stmt = gimple_build_assign_with_ops (
1814 CONVERT_EXPR, bswap_tmp, bswap_src, NULL);
1815 gsi_insert_before (&gsi, convert_stmt, GSI_SAME_STMT);
1818 call = gimple_build_call (fndecl, 1, bswap_tmp);
1820 bswap_tmp = gimple_assign_lhs (stmt);
1822 /* Convert the result if necessary. */
1823 if (!useless_type_conversion_p (TREE_TYPE (bswap_tmp), bswap_type))
1825 gimple convert_stmt;
1827 bswap_tmp = create_tmp_var (bswap_type, "bswapdst");
1828 add_referenced_var (bswap_tmp);
1829 bswap_tmp = make_ssa_name (bswap_tmp, NULL);
1830 convert_stmt = gimple_build_assign_with_ops (
1831 CONVERT_EXPR, gimple_assign_lhs (stmt), bswap_tmp, NULL);
1832 gsi_insert_after (&gsi, convert_stmt, GSI_SAME_STMT);
1835 gimple_call_set_lhs (call, bswap_tmp);
1839 fprintf (dump_file, "%d bit bswap implementation found at: ",
1841 print_gimple_stmt (dump_file, stmt, 0, 0);
1844 gsi_insert_after (&gsi, call, GSI_SAME_STMT);
1845 gsi_remove (&gsi, true);
1849 statistics_counter_event (cfun, "32-bit bswap implementations found",
1850 bswap_stats.found_32bit);
1851 statistics_counter_event (cfun, "64-bit bswap implementations found",
1852 bswap_stats.found_64bit);
1854 return (changed ? TODO_dump_func | TODO_update_ssa | TODO_verify_ssa
1855 | TODO_verify_stmts : 0);
1859 gate_optimize_bswap (void)
1861 return flag_expensive_optimizations && optimize;
1864 struct gimple_opt_pass pass_optimize_bswap =
1869 gate_optimize_bswap, /* gate */
1870 execute_optimize_bswap, /* execute */
1873 0, /* static_pass_number */
1874 TV_NONE, /* tv_id */
1875 PROP_ssa, /* properties_required */
1876 0, /* properties_provided */
1877 0, /* properties_destroyed */
1878 0, /* todo_flags_start */
1879 0 /* todo_flags_finish */
1883 /* Return true if RHS is a suitable operand for a widening multiplication.
1884 There are two cases:
1886 - RHS makes some value twice as wide. Store that value in *NEW_RHS_OUT
1887 if so, and store its type in *TYPE_OUT.
1889 - RHS is an integer constant. Store that value in *NEW_RHS_OUT if so,
1890 but leave *TYPE_OUT untouched. */
1893 is_widening_mult_rhs_p (tree rhs, tree *type_out, tree *new_rhs_out)
1896 tree type, type1, rhs1;
1897 enum tree_code rhs_code;
1899 if (TREE_CODE (rhs) == SSA_NAME)
1901 type = TREE_TYPE (rhs);
1902 stmt = SSA_NAME_DEF_STMT (rhs);
1903 if (!is_gimple_assign (stmt))
1906 rhs_code = gimple_assign_rhs_code (stmt);
1907 if (TREE_CODE (type) == INTEGER_TYPE
1908 ? !CONVERT_EXPR_CODE_P (rhs_code)
1909 : rhs_code != FIXED_CONVERT_EXPR)
1912 rhs1 = gimple_assign_rhs1 (stmt);
1913 type1 = TREE_TYPE (rhs1);
1914 if (TREE_CODE (type1) != TREE_CODE (type)
1915 || TYPE_PRECISION (type1) * 2 != TYPE_PRECISION (type))
1918 *new_rhs_out = rhs1;
1923 if (TREE_CODE (rhs) == INTEGER_CST)
1933 /* Return true if STMT performs a widening multiplication. If so,
1934 store the unwidened types of the operands in *TYPE1_OUT and *TYPE2_OUT
1935 respectively. Also fill *RHS1_OUT and *RHS2_OUT such that converting
1936 those operands to types *TYPE1_OUT and *TYPE2_OUT would give the
1937 operands of the multiplication. */
1940 is_widening_mult_p (gimple stmt,
1941 tree *type1_out, tree *rhs1_out,
1942 tree *type2_out, tree *rhs2_out)
1946 type = TREE_TYPE (gimple_assign_lhs (stmt));
1947 if (TREE_CODE (type) != INTEGER_TYPE
1948 && TREE_CODE (type) != FIXED_POINT_TYPE)
1951 if (!is_widening_mult_rhs_p (gimple_assign_rhs1 (stmt), type1_out, rhs1_out))
1954 if (!is_widening_mult_rhs_p (gimple_assign_rhs2 (stmt), type2_out, rhs2_out))
1957 if (*type1_out == NULL)
1959 if (*type2_out == NULL || !int_fits_type_p (*rhs1_out, *type2_out))
1961 *type1_out = *type2_out;
1964 if (*type2_out == NULL)
1966 if (!int_fits_type_p (*rhs2_out, *type1_out))
1968 *type2_out = *type1_out;
1974 /* Process a single gimple statement STMT, which has a MULT_EXPR as
1975 its rhs, and try to convert it into a WIDEN_MULT_EXPR. The return
1976 value is true iff we converted the statement. */
1979 convert_mult_to_widen (gimple stmt)
1981 tree lhs, rhs1, rhs2, type, type1, type2;
1982 enum insn_code handler;
1984 lhs = gimple_assign_lhs (stmt);
1985 type = TREE_TYPE (lhs);
1986 if (TREE_CODE (type) != INTEGER_TYPE)
1989 if (!is_widening_mult_p (stmt, &type1, &rhs1, &type2, &rhs2))
1992 if (TYPE_UNSIGNED (type1) && TYPE_UNSIGNED (type2))
1993 handler = optab_handler (umul_widen_optab, TYPE_MODE (type));
1994 else if (!TYPE_UNSIGNED (type1) && !TYPE_UNSIGNED (type2))
1995 handler = optab_handler (smul_widen_optab, TYPE_MODE (type));
1997 handler = optab_handler (usmul_widen_optab, TYPE_MODE (type));
1999 if (handler == CODE_FOR_nothing)
2002 gimple_assign_set_rhs1 (stmt, fold_convert (type1, rhs1));
2003 gimple_assign_set_rhs2 (stmt, fold_convert (type2, rhs2));
2004 gimple_assign_set_rhs_code (stmt, WIDEN_MULT_EXPR);
2006 widen_mul_stats.widen_mults_inserted++;
2010 /* Process a single gimple statement STMT, which is found at the
2011 iterator GSI and has a either a PLUS_EXPR or a MINUS_EXPR as its
2012 rhs (given by CODE), and try to convert it into a
2013 WIDEN_MULT_PLUS_EXPR or a WIDEN_MULT_MINUS_EXPR. The return value
2014 is true iff we converted the statement. */
2017 convert_plusminus_to_widen (gimple_stmt_iterator *gsi, gimple stmt,
2018 enum tree_code code)
2020 gimple rhs1_stmt = NULL, rhs2_stmt = NULL;
2021 tree type, type1, type2;
2022 tree lhs, rhs1, rhs2, mult_rhs1, mult_rhs2, add_rhs;
2023 enum tree_code rhs1_code = ERROR_MARK, rhs2_code = ERROR_MARK;
2025 enum tree_code wmult_code;
2027 lhs = gimple_assign_lhs (stmt);
2028 type = TREE_TYPE (lhs);
2029 if (TREE_CODE (type) != INTEGER_TYPE
2030 && TREE_CODE (type) != FIXED_POINT_TYPE)
2033 if (code == MINUS_EXPR)
2034 wmult_code = WIDEN_MULT_MINUS_EXPR;
2036 wmult_code = WIDEN_MULT_PLUS_EXPR;
2038 rhs1 = gimple_assign_rhs1 (stmt);
2039 rhs2 = gimple_assign_rhs2 (stmt);
2041 if (TREE_CODE (rhs1) == SSA_NAME)
2043 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
2044 if (is_gimple_assign (rhs1_stmt))
2045 rhs1_code = gimple_assign_rhs_code (rhs1_stmt);
2050 if (TREE_CODE (rhs2) == SSA_NAME)
2052 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
2053 if (is_gimple_assign (rhs2_stmt))
2054 rhs2_code = gimple_assign_rhs_code (rhs2_stmt);
2059 if (code == PLUS_EXPR && rhs1_code == MULT_EXPR)
2061 if (!is_widening_mult_p (rhs1_stmt, &type1, &mult_rhs1,
2062 &type2, &mult_rhs2))
2066 else if (rhs2_code == MULT_EXPR)
2068 if (!is_widening_mult_p (rhs2_stmt, &type1, &mult_rhs1,
2069 &type2, &mult_rhs2))
2073 else if (code == PLUS_EXPR && rhs1_code == WIDEN_MULT_EXPR)
2075 mult_rhs1 = gimple_assign_rhs1 (rhs1_stmt);
2076 mult_rhs2 = gimple_assign_rhs2 (rhs1_stmt);
2077 type1 = TREE_TYPE (mult_rhs1);
2078 type2 = TREE_TYPE (mult_rhs2);
2081 else if (rhs2_code == WIDEN_MULT_EXPR)
2083 mult_rhs1 = gimple_assign_rhs1 (rhs2_stmt);
2084 mult_rhs2 = gimple_assign_rhs2 (rhs2_stmt);
2085 type1 = TREE_TYPE (mult_rhs1);
2086 type2 = TREE_TYPE (mult_rhs2);
2092 if (TYPE_UNSIGNED (type1) != TYPE_UNSIGNED (type2))
2095 /* Verify that the machine can perform a widening multiply
2096 accumulate in this mode/signedness combination, otherwise
2097 this transformation is likely to pessimize code. */
2098 this_optab = optab_for_tree_code (wmult_code, type1, optab_default);
2099 if (optab_handler (this_optab, TYPE_MODE (type)) == CODE_FOR_nothing)
2102 /* ??? May need some type verification here? */
2104 gimple_assign_set_rhs_with_ops_1 (gsi, wmult_code,
2105 fold_convert (type1, mult_rhs1),
2106 fold_convert (type2, mult_rhs2),
2108 update_stmt (gsi_stmt (*gsi));
2109 widen_mul_stats.maccs_inserted++;
2113 /* Combine the multiplication at MUL_STMT with operands MULOP1 and MULOP2
2114 with uses in additions and subtractions to form fused multiply-add
2115 operations. Returns true if successful and MUL_STMT should be removed. */
2118 convert_mult_to_fma (gimple mul_stmt, tree op1, tree op2)
2120 tree mul_result = gimple_get_lhs (mul_stmt);
2121 tree type = TREE_TYPE (mul_result);
2122 gimple use_stmt, neguse_stmt, fma_stmt;
2123 use_operand_p use_p;
2124 imm_use_iterator imm_iter;
2126 if (FLOAT_TYPE_P (type)
2127 && flag_fp_contract_mode == FP_CONTRACT_OFF)
2130 /* We don't want to do bitfield reduction ops. */
2131 if (INTEGRAL_TYPE_P (type)
2132 && (TYPE_PRECISION (type)
2133 != GET_MODE_PRECISION (TYPE_MODE (type))))
2136 /* If the target doesn't support it, don't generate it. We assume that
2137 if fma isn't available then fms, fnma or fnms are not either. */
2138 if (optab_handler (fma_optab, TYPE_MODE (type)) == CODE_FOR_nothing)
2141 /* Make sure that the multiplication statement becomes dead after
2142 the transformation, thus that all uses are transformed to FMAs.
2143 This means we assume that an FMA operation has the same cost
2145 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, mul_result)
2147 enum tree_code use_code;
2148 tree result = mul_result;
2149 bool negate_p = false;
2151 use_stmt = USE_STMT (use_p);
2153 if (is_gimple_debug (use_stmt))
2156 /* For now restrict this operations to single basic blocks. In theory
2157 we would want to support sinking the multiplication in
2163 to form a fma in the then block and sink the multiplication to the
2165 if (gimple_bb (use_stmt) != gimple_bb (mul_stmt))
2168 if (!is_gimple_assign (use_stmt))
2171 use_code = gimple_assign_rhs_code (use_stmt);
2173 /* A negate on the multiplication leads to FNMA. */
2174 if (use_code == NEGATE_EXPR)
2179 result = gimple_assign_lhs (use_stmt);
2181 /* Make sure the negate statement becomes dead with this
2182 single transformation. */
2183 if (!single_imm_use (gimple_assign_lhs (use_stmt),
2184 &use_p, &neguse_stmt))
2187 /* Make sure the multiplication isn't also used on that stmt. */
2188 FOR_EACH_SSA_TREE_OPERAND (use, neguse_stmt, iter, SSA_OP_USE)
2189 if (use == mul_result)
2193 use_stmt = neguse_stmt;
2194 if (gimple_bb (use_stmt) != gimple_bb (mul_stmt))
2196 if (!is_gimple_assign (use_stmt))
2199 use_code = gimple_assign_rhs_code (use_stmt);
2206 if (gimple_assign_rhs2 (use_stmt) == result)
2207 negate_p = !negate_p;
2212 /* FMA can only be formed from PLUS and MINUS. */
2216 /* We can't handle a * b + a * b. */
2217 if (gimple_assign_rhs1 (use_stmt) == gimple_assign_rhs2 (use_stmt))
2220 /* While it is possible to validate whether or not the exact form
2221 that we've recognized is available in the backend, the assumption
2222 is that the transformation is never a loss. For instance, suppose
2223 the target only has the plain FMA pattern available. Consider
2224 a*b-c -> fma(a,b,-c): we've exchanged MUL+SUB for FMA+NEG, which
2225 is still two operations. Consider -(a*b)-c -> fma(-a,b,-c): we
2226 still have 3 operations, but in the FMA form the two NEGs are
2227 independant and could be run in parallel. */
2230 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, mul_result)
2232 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
2233 enum tree_code use_code;
2234 tree addop, mulop1 = op1, result = mul_result;
2235 bool negate_p = false;
2237 if (is_gimple_debug (use_stmt))
2240 use_code = gimple_assign_rhs_code (use_stmt);
2241 if (use_code == NEGATE_EXPR)
2243 result = gimple_assign_lhs (use_stmt);
2244 single_imm_use (gimple_assign_lhs (use_stmt), &use_p, &neguse_stmt);
2245 gsi_remove (&gsi, true);
2246 release_defs (use_stmt);
2248 use_stmt = neguse_stmt;
2249 gsi = gsi_for_stmt (use_stmt);
2250 use_code = gimple_assign_rhs_code (use_stmt);
2254 if (gimple_assign_rhs1 (use_stmt) == result)
2256 addop = gimple_assign_rhs2 (use_stmt);
2257 /* a * b - c -> a * b + (-c) */
2258 if (gimple_assign_rhs_code (use_stmt) == MINUS_EXPR)
2259 addop = force_gimple_operand_gsi (&gsi,
2260 build1 (NEGATE_EXPR,
2262 true, NULL_TREE, true,
2267 addop = gimple_assign_rhs1 (use_stmt);
2268 /* a - b * c -> (-b) * c + a */
2269 if (gimple_assign_rhs_code (use_stmt) == MINUS_EXPR)
2270 negate_p = !negate_p;
2274 mulop1 = force_gimple_operand_gsi (&gsi,
2275 build1 (NEGATE_EXPR,
2277 true, NULL_TREE, true,
2280 fma_stmt = gimple_build_assign_with_ops3 (FMA_EXPR,
2281 gimple_assign_lhs (use_stmt),
2284 gsi_replace (&gsi, fma_stmt, true);
2285 widen_mul_stats.fmas_inserted++;
2291 /* Find integer multiplications where the operands are extended from
2292 smaller types, and replace the MULT_EXPR with a WIDEN_MULT_EXPR
2293 where appropriate. */
2296 execute_optimize_widening_mul (void)
2299 bool cfg_changed = false;
2301 memset (&widen_mul_stats, 0, sizeof (widen_mul_stats));
2305 gimple_stmt_iterator gsi;
2307 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi);)
2309 gimple stmt = gsi_stmt (gsi);
2310 enum tree_code code;
2312 if (is_gimple_assign (stmt))
2314 code = gimple_assign_rhs_code (stmt);
2318 if (!convert_mult_to_widen (stmt)
2319 && convert_mult_to_fma (stmt,
2320 gimple_assign_rhs1 (stmt),
2321 gimple_assign_rhs2 (stmt)))
2323 gsi_remove (&gsi, true);
2324 release_defs (stmt);
2331 convert_plusminus_to_widen (&gsi, stmt, code);
2337 else if (is_gimple_call (stmt)
2338 && gimple_call_lhs (stmt))
2340 tree fndecl = gimple_call_fndecl (stmt);
2342 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
2344 switch (DECL_FUNCTION_CODE (fndecl))
2349 if (TREE_CODE (gimple_call_arg (stmt, 1)) == REAL_CST
2350 && REAL_VALUES_EQUAL
2351 (TREE_REAL_CST (gimple_call_arg (stmt, 1)),
2353 && convert_mult_to_fma (stmt,
2354 gimple_call_arg (stmt, 0),
2355 gimple_call_arg (stmt, 0)))
2357 unlink_stmt_vdef (stmt);
2358 gsi_remove (&gsi, true);
2359 release_defs (stmt);
2360 if (gimple_purge_dead_eh_edges (bb))
2374 statistics_counter_event (cfun, "widening multiplications inserted",
2375 widen_mul_stats.widen_mults_inserted);
2376 statistics_counter_event (cfun, "widening maccs inserted",
2377 widen_mul_stats.maccs_inserted);
2378 statistics_counter_event (cfun, "fused multiply-adds inserted",
2379 widen_mul_stats.fmas_inserted);
2381 return cfg_changed ? TODO_cleanup_cfg : 0;
2385 gate_optimize_widening_mul (void)
2387 return flag_expensive_optimizations && optimize;
2390 struct gimple_opt_pass pass_optimize_widening_mul =
2394 "widening_mul", /* name */
2395 gate_optimize_widening_mul, /* gate */
2396 execute_optimize_widening_mul, /* execute */
2399 0, /* static_pass_number */
2400 TV_NONE, /* tv_id */
2401 PROP_ssa, /* properties_required */
2402 0, /* properties_provided */
2403 0, /* properties_destroyed */
2404 0, /* todo_flags_start */
2408 | TODO_update_ssa /* todo_flags_finish */