1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
45 #include "basic-block.h"
48 /* Each optab contains info on how this target machine
49 can perform a particular operation
50 for all sizes and kinds of operands.
52 The operation to be performed is often specified
53 by passing one of these optabs as an argument.
55 See expr.h for documentation of these optabs. */
57 optab optab_table[OTI_MAX];
59 rtx libfunc_table[LTI_MAX];
61 /* Tables of patterns for converting one mode to another. */
62 convert_optab convert_optab_table[COI_MAX];
64 /* Contains the optab used for each rtx code. */
65 optab code_to_optab[NUM_RTX_CODE + 1];
67 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
68 gives the gen_function to make a branch to test that condition. */
70 rtxfun bcc_gen_fctn[NUM_RTX_CODE];
72 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
73 gives the insn code to make a store-condition insn
74 to test that condition. */
76 enum insn_code setcc_gen_code[NUM_RTX_CODE];
78 #ifdef HAVE_conditional_move
79 /* Indexed by the machine mode, gives the insn code to make a conditional
80 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
81 setcc_gen_code to cut down on the number of named patterns. Consider a day
82 when a lot more rtx codes are conditional (eg: for the ARM). */
84 enum insn_code movcc_gen_code[NUM_MACHINE_MODES];
87 /* Indexed by the machine mode, gives the insn code for vector conditional
90 enum insn_code vcond_gen_code[NUM_MACHINE_MODES];
91 enum insn_code vcondu_gen_code[NUM_MACHINE_MODES];
93 /* The insn generating function can not take an rtx_code argument.
94 TRAP_RTX is used as an rtx argument. Its code is replaced with
95 the code to be used in the trap insn and all other fields are ignored. */
96 static GTY(()) rtx trap_rtx;
98 static int add_equal_note (rtx, rtx, enum rtx_code, rtx, rtx);
99 static rtx widen_operand (rtx, enum machine_mode, enum machine_mode, int,
101 static void prepare_cmp_insn (rtx *, rtx *, enum rtx_code *, rtx,
102 enum machine_mode *, int *,
103 enum can_compare_purpose);
104 static enum insn_code can_fix_p (enum machine_mode, enum machine_mode, int,
106 static enum insn_code can_float_p (enum machine_mode, enum machine_mode, int);
107 static optab new_optab (void);
108 static convert_optab new_convert_optab (void);
109 static inline optab init_optab (enum rtx_code);
110 static inline optab init_optabv (enum rtx_code);
111 static inline convert_optab init_convert_optab (enum rtx_code);
112 static void init_libfuncs (optab, int, int, const char *, int);
113 static void init_integral_libfuncs (optab, const char *, int);
114 static void init_floating_libfuncs (optab, const char *, int);
115 static void init_interclass_conv_libfuncs (convert_optab, const char *,
116 enum mode_class, enum mode_class);
117 static void init_intraclass_conv_libfuncs (convert_optab, const char *,
118 enum mode_class, bool);
119 static void emit_cmp_and_jump_insn_1 (rtx, rtx, enum machine_mode,
120 enum rtx_code, int, rtx);
121 static void prepare_float_lib_cmp (rtx *, rtx *, enum rtx_code *,
122 enum machine_mode *, int *);
123 static rtx widen_clz (enum machine_mode, rtx, rtx);
124 static rtx expand_parity (enum machine_mode, rtx, rtx);
125 static rtx expand_ffs (enum machine_mode, rtx, rtx);
126 static rtx expand_ctz (enum machine_mode, rtx, rtx);
127 static enum rtx_code get_rtx_code (enum tree_code, bool);
128 static rtx vector_compare_rtx (tree, bool, enum insn_code);
130 /* Current libcall id. It doesn't matter what these are, as long
131 as they are unique to each libcall that is emitted. */
132 static HOST_WIDE_INT libcall_id = 0;
134 #ifndef HAVE_conditional_trap
135 #define HAVE_conditional_trap 0
136 #define gen_conditional_trap(a,b) (gcc_unreachable (), NULL_RTX)
139 /* Prefixes for the current version of decimal floating point (BID vs. DPD) */
140 #if ENABLE_DECIMAL_BID_FORMAT
141 #define DECIMAL_PREFIX "bid_"
143 #define DECIMAL_PREFIX "dpd_"
147 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
148 the result of operation CODE applied to OP0 (and OP1 if it is a binary
151 If the last insn does not set TARGET, don't do anything, but return 1.
153 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
154 don't add the REG_EQUAL note but return 0. Our caller can then try
155 again, ensuring that TARGET is not one of the operands. */
158 add_equal_note (rtx insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
160 rtx last_insn, insn, set;
163 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
165 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
166 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
167 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
168 && GET_RTX_CLASS (code) != RTX_COMPARE
169 && GET_RTX_CLASS (code) != RTX_UNARY)
172 if (GET_CODE (target) == ZERO_EXTRACT)
175 for (last_insn = insns;
176 NEXT_INSN (last_insn) != NULL_RTX;
177 last_insn = NEXT_INSN (last_insn))
180 set = single_set (last_insn);
184 if (! rtx_equal_p (SET_DEST (set), target)
185 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
186 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
187 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
190 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
191 besides the last insn. */
192 if (reg_overlap_mentioned_p (target, op0)
193 || (op1 && reg_overlap_mentioned_p (target, op1)))
195 insn = PREV_INSN (last_insn);
196 while (insn != NULL_RTX)
198 if (reg_set_p (target, insn))
201 insn = PREV_INSN (insn);
205 if (GET_RTX_CLASS (code) == RTX_UNARY)
206 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
208 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
210 set_unique_reg_note (last_insn, REG_EQUAL, note);
215 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
216 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
217 not actually do a sign-extend or zero-extend, but can leave the
218 higher-order bits of the result rtx undefined, for example, in the case
219 of logical operations, but not right shifts. */
222 widen_operand (rtx op, enum machine_mode mode, enum machine_mode oldmode,
223 int unsignedp, int no_extend)
227 /* If we don't have to extend and this is a constant, return it. */
228 if (no_extend && GET_MODE (op) == VOIDmode)
231 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
232 extend since it will be more efficient to do so unless the signedness of
233 a promoted object differs from our extension. */
235 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
236 && SUBREG_PROMOTED_UNSIGNED_P (op) == unsignedp))
237 return convert_modes (mode, oldmode, op, unsignedp);
239 /* If MODE is no wider than a single word, we return a paradoxical
241 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
242 return gen_rtx_SUBREG (mode, force_reg (GET_MODE (op), op), 0);
244 /* Otherwise, get an object of MODE, clobber it, and set the low-order
247 result = gen_reg_rtx (mode);
248 emit_insn (gen_rtx_CLOBBER (VOIDmode, result));
249 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
253 /* Return the optab used for computing the operation given by
254 the tree code, CODE. This function is not always usable (for
255 example, it cannot give complete results for multiplication
256 or division) but probably ought to be relied on more widely
257 throughout the expander. */
259 optab_for_tree_code (enum tree_code code, tree type)
271 return one_cmpl_optab;
280 return TYPE_UNSIGNED (type) ? umod_optab : smod_optab;
288 return TYPE_UNSIGNED (type) ? udiv_optab : sdiv_optab;
294 return TYPE_UNSIGNED (type) ? lshr_optab : ashr_optab;
303 return TYPE_UNSIGNED (type) ? umax_optab : smax_optab;
306 return TYPE_UNSIGNED (type) ? umin_optab : smin_optab;
308 case REALIGN_LOAD_EXPR:
309 return vec_realign_load_optab;
312 return TYPE_UNSIGNED (type) ? usum_widen_optab : ssum_widen_optab;
315 return TYPE_UNSIGNED (type) ? udot_prod_optab : sdot_prod_optab;
318 return TYPE_UNSIGNED (type) ? reduc_umax_optab : reduc_smax_optab;
321 return TYPE_UNSIGNED (type) ? reduc_umin_optab : reduc_smin_optab;
323 case REDUC_PLUS_EXPR:
324 return TYPE_UNSIGNED (type) ? reduc_uplus_optab : reduc_splus_optab;
326 case VEC_LSHIFT_EXPR:
327 return vec_shl_optab;
329 case VEC_RSHIFT_EXPR:
330 return vec_shr_optab;
332 case VEC_WIDEN_MULT_HI_EXPR:
333 return TYPE_UNSIGNED (type) ?
334 vec_widen_umult_hi_optab : vec_widen_smult_hi_optab;
336 case VEC_WIDEN_MULT_LO_EXPR:
337 return TYPE_UNSIGNED (type) ?
338 vec_widen_umult_lo_optab : vec_widen_smult_lo_optab;
340 case VEC_UNPACK_HI_EXPR:
341 return TYPE_UNSIGNED (type) ?
342 vec_unpacku_hi_optab : vec_unpacks_hi_optab;
344 case VEC_UNPACK_LO_EXPR:
345 return TYPE_UNSIGNED (type) ?
346 vec_unpacku_lo_optab : vec_unpacks_lo_optab;
348 case VEC_UNPACK_FLOAT_HI_EXPR:
349 /* The signedness is determined from input operand. */
350 return TYPE_UNSIGNED (type) ?
351 vec_unpacku_float_hi_optab : vec_unpacks_float_hi_optab;
353 case VEC_UNPACK_FLOAT_LO_EXPR:
354 /* The signedness is determined from input operand. */
355 return TYPE_UNSIGNED (type) ?
356 vec_unpacku_float_lo_optab : vec_unpacks_float_lo_optab;
358 case VEC_PACK_TRUNC_EXPR:
359 return vec_pack_trunc_optab;
361 case VEC_PACK_SAT_EXPR:
362 return TYPE_UNSIGNED (type) ? vec_pack_usat_optab : vec_pack_ssat_optab;
364 case VEC_PACK_FIX_TRUNC_EXPR:
365 /* The signedness is determined from output operand. */
366 return TYPE_UNSIGNED (type) ?
367 vec_pack_ufix_trunc_optab : vec_pack_sfix_trunc_optab;
373 trapv = INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type);
376 case POINTER_PLUS_EXPR:
378 return trapv ? addv_optab : add_optab;
381 return trapv ? subv_optab : sub_optab;
384 return trapv ? smulv_optab : smul_optab;
387 return trapv ? negv_optab : neg_optab;
390 return trapv ? absv_optab : abs_optab;
392 case VEC_EXTRACT_EVEN_EXPR:
393 return vec_extract_even_optab;
395 case VEC_EXTRACT_ODD_EXPR:
396 return vec_extract_odd_optab;
398 case VEC_INTERLEAVE_HIGH_EXPR:
399 return vec_interleave_high_optab;
401 case VEC_INTERLEAVE_LOW_EXPR:
402 return vec_interleave_low_optab;
410 /* Expand vector widening operations.
412 There are two different classes of operations handled here:
413 1) Operations whose result is wider than all the arguments to the operation.
414 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
415 In this case OP0 and optionally OP1 would be initialized,
416 but WIDE_OP wouldn't (not relevant for this case).
417 2) Operations whose result is of the same size as the last argument to the
418 operation, but wider than all the other arguments to the operation.
419 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
420 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
422 E.g, when called to expand the following operations, this is how
423 the arguments will be initialized:
425 widening-sum 2 oprnd0 - oprnd1
426 widening-dot-product 3 oprnd0 oprnd1 oprnd2
427 widening-mult 2 oprnd0 oprnd1 -
428 type-promotion (vec-unpack) 1 oprnd0 - - */
431 expand_widen_pattern_expr (tree exp, rtx op0, rtx op1, rtx wide_op, rtx target,
434 tree oprnd0, oprnd1, oprnd2;
435 enum machine_mode wmode = 0, tmode0, tmode1 = 0;
436 optab widen_pattern_optab;
438 enum machine_mode xmode0, xmode1 = 0, wxmode = 0;
441 rtx xop0, xop1, wxop;
442 int nops = TREE_OPERAND_LENGTH (exp);
444 oprnd0 = TREE_OPERAND (exp, 0);
445 tmode0 = TYPE_MODE (TREE_TYPE (oprnd0));
446 widen_pattern_optab =
447 optab_for_tree_code (TREE_CODE (exp), TREE_TYPE (oprnd0));
448 icode = (int) optab_handler (widen_pattern_optab, tmode0)->insn_code;
449 gcc_assert (icode != CODE_FOR_nothing);
450 xmode0 = insn_data[icode].operand[1].mode;
454 oprnd1 = TREE_OPERAND (exp, 1);
455 tmode1 = TYPE_MODE (TREE_TYPE (oprnd1));
456 xmode1 = insn_data[icode].operand[2].mode;
459 /* The last operand is of a wider mode than the rest of the operands. */
467 gcc_assert (tmode1 == tmode0);
469 oprnd2 = TREE_OPERAND (exp, 2);
470 wmode = TYPE_MODE (TREE_TYPE (oprnd2));
471 wxmode = insn_data[icode].operand[3].mode;
475 wmode = wxmode = insn_data[icode].operand[0].mode;
478 || ! (*insn_data[icode].operand[0].predicate) (target, wmode))
479 temp = gen_reg_rtx (wmode);
487 /* In case the insn wants input operands in modes different from
488 those of the actual operands, convert the operands. It would
489 seem that we don't need to convert CONST_INTs, but we do, so
490 that they're properly zero-extended, sign-extended or truncated
493 if (GET_MODE (op0) != xmode0 && xmode0 != VOIDmode)
494 xop0 = convert_modes (xmode0,
495 GET_MODE (op0) != VOIDmode
501 if (GET_MODE (op1) != xmode1 && xmode1 != VOIDmode)
502 xop1 = convert_modes (xmode1,
503 GET_MODE (op1) != VOIDmode
509 if (GET_MODE (wide_op) != wxmode && wxmode != VOIDmode)
510 wxop = convert_modes (wxmode,
511 GET_MODE (wide_op) != VOIDmode
516 /* Now, if insn's predicates don't allow our operands, put them into
519 if (! (*insn_data[icode].operand[1].predicate) (xop0, xmode0)
520 && xmode0 != VOIDmode)
521 xop0 = copy_to_mode_reg (xmode0, xop0);
525 if (! (*insn_data[icode].operand[2].predicate) (xop1, xmode1)
526 && xmode1 != VOIDmode)
527 xop1 = copy_to_mode_reg (xmode1, xop1);
531 if (! (*insn_data[icode].operand[3].predicate) (wxop, wxmode)
532 && wxmode != VOIDmode)
533 wxop = copy_to_mode_reg (wxmode, wxop);
535 pat = GEN_FCN (icode) (temp, xop0, xop1, wxop);
538 pat = GEN_FCN (icode) (temp, xop0, xop1);
544 if (! (*insn_data[icode].operand[2].predicate) (wxop, wxmode)
545 && wxmode != VOIDmode)
546 wxop = copy_to_mode_reg (wxmode, wxop);
548 pat = GEN_FCN (icode) (temp, xop0, wxop);
551 pat = GEN_FCN (icode) (temp, xop0);
558 /* Generate code to perform an operation specified by TERNARY_OPTAB
559 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
561 UNSIGNEDP is for the case where we have to widen the operands
562 to perform the operation. It says to use zero-extension.
564 If TARGET is nonzero, the value
565 is generated there, if it is convenient to do so.
566 In all cases an rtx is returned for the locus of the value;
567 this may or may not be TARGET. */
570 expand_ternary_op (enum machine_mode mode, optab ternary_optab, rtx op0,
571 rtx op1, rtx op2, rtx target, int unsignedp)
573 int icode = (int) optab_handler (ternary_optab, mode)->insn_code;
574 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
575 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
576 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
579 rtx xop0 = op0, xop1 = op1, xop2 = op2;
581 gcc_assert (optab_handler (ternary_optab, mode)->insn_code
582 != CODE_FOR_nothing);
584 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
585 temp = gen_reg_rtx (mode);
589 /* In case the insn wants input operands in modes different from
590 those of the actual operands, convert the operands. It would
591 seem that we don't need to convert CONST_INTs, but we do, so
592 that they're properly zero-extended, sign-extended or truncated
595 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
596 xop0 = convert_modes (mode0,
597 GET_MODE (op0) != VOIDmode
602 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
603 xop1 = convert_modes (mode1,
604 GET_MODE (op1) != VOIDmode
609 if (GET_MODE (op2) != mode2 && mode2 != VOIDmode)
610 xop2 = convert_modes (mode2,
611 GET_MODE (op2) != VOIDmode
616 /* Now, if insn's predicates don't allow our operands, put them into
619 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
620 && mode0 != VOIDmode)
621 xop0 = copy_to_mode_reg (mode0, xop0);
623 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
624 && mode1 != VOIDmode)
625 xop1 = copy_to_mode_reg (mode1, xop1);
627 if (!insn_data[icode].operand[3].predicate (xop2, mode2)
628 && mode2 != VOIDmode)
629 xop2 = copy_to_mode_reg (mode2, xop2);
631 pat = GEN_FCN (icode) (temp, xop0, xop1, xop2);
638 /* Like expand_binop, but return a constant rtx if the result can be
639 calculated at compile time. The arguments and return value are
640 otherwise the same as for expand_binop. */
643 simplify_expand_binop (enum machine_mode mode, optab binoptab,
644 rtx op0, rtx op1, rtx target, int unsignedp,
645 enum optab_methods methods)
647 if (CONSTANT_P (op0) && CONSTANT_P (op1))
649 rtx x = simplify_binary_operation (binoptab->code, mode, op0, op1);
655 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
658 /* Like simplify_expand_binop, but always put the result in TARGET.
659 Return true if the expansion succeeded. */
662 force_expand_binop (enum machine_mode mode, optab binoptab,
663 rtx op0, rtx op1, rtx target, int unsignedp,
664 enum optab_methods methods)
666 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
667 target, unsignedp, methods);
671 emit_move_insn (target, x);
675 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
678 expand_vec_shift_expr (tree vec_shift_expr, rtx target)
680 enum insn_code icode;
681 rtx rtx_op1, rtx_op2;
682 enum machine_mode mode1;
683 enum machine_mode mode2;
684 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_shift_expr));
685 tree vec_oprnd = TREE_OPERAND (vec_shift_expr, 0);
686 tree shift_oprnd = TREE_OPERAND (vec_shift_expr, 1);
690 switch (TREE_CODE (vec_shift_expr))
692 case VEC_RSHIFT_EXPR:
693 shift_optab = vec_shr_optab;
695 case VEC_LSHIFT_EXPR:
696 shift_optab = vec_shl_optab;
702 icode = (int) optab_handler (shift_optab, mode)->insn_code;
703 gcc_assert (icode != CODE_FOR_nothing);
705 mode1 = insn_data[icode].operand[1].mode;
706 mode2 = insn_data[icode].operand[2].mode;
708 rtx_op1 = expand_normal (vec_oprnd);
709 if (!(*insn_data[icode].operand[1].predicate) (rtx_op1, mode1)
710 && mode1 != VOIDmode)
711 rtx_op1 = force_reg (mode1, rtx_op1);
713 rtx_op2 = expand_normal (shift_oprnd);
714 if (!(*insn_data[icode].operand[2].predicate) (rtx_op2, mode2)
715 && mode2 != VOIDmode)
716 rtx_op2 = force_reg (mode2, rtx_op2);
719 || ! (*insn_data[icode].operand[0].predicate) (target, mode))
720 target = gen_reg_rtx (mode);
722 /* Emit instruction */
723 pat = GEN_FCN (icode) (target, rtx_op1, rtx_op2);
730 /* This subroutine of expand_doubleword_shift handles the cases in which
731 the effective shift value is >= BITS_PER_WORD. The arguments and return
732 value are the same as for the parent routine, except that SUPERWORD_OP1
733 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
734 INTO_TARGET may be null if the caller has decided to calculate it. */
737 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
738 rtx outof_target, rtx into_target,
739 int unsignedp, enum optab_methods methods)
741 if (into_target != 0)
742 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
743 into_target, unsignedp, methods))
746 if (outof_target != 0)
748 /* For a signed right shift, we must fill OUTOF_TARGET with copies
749 of the sign bit, otherwise we must fill it with zeros. */
750 if (binoptab != ashr_optab)
751 emit_move_insn (outof_target, CONST0_RTX (word_mode));
753 if (!force_expand_binop (word_mode, binoptab,
754 outof_input, GEN_INT (BITS_PER_WORD - 1),
755 outof_target, unsignedp, methods))
761 /* This subroutine of expand_doubleword_shift handles the cases in which
762 the effective shift value is < BITS_PER_WORD. The arguments and return
763 value are the same as for the parent routine. */
766 expand_subword_shift (enum machine_mode op1_mode, optab binoptab,
767 rtx outof_input, rtx into_input, rtx op1,
768 rtx outof_target, rtx into_target,
769 int unsignedp, enum optab_methods methods,
770 unsigned HOST_WIDE_INT shift_mask)
772 optab reverse_unsigned_shift, unsigned_shift;
775 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
776 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
778 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
779 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
780 the opposite direction to BINOPTAB. */
781 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
783 carries = outof_input;
784 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
785 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
790 /* We must avoid shifting by BITS_PER_WORD bits since that is either
791 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
792 has unknown behavior. Do a single shift first, then shift by the
793 remainder. It's OK to use ~OP1 as the remainder if shift counts
794 are truncated to the mode size. */
795 carries = expand_binop (word_mode, reverse_unsigned_shift,
796 outof_input, const1_rtx, 0, unsignedp, methods);
797 if (shift_mask == BITS_PER_WORD - 1)
799 tmp = immed_double_const (-1, -1, op1_mode);
800 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
805 tmp = immed_double_const (BITS_PER_WORD - 1, 0, op1_mode);
806 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
810 if (tmp == 0 || carries == 0)
812 carries = expand_binop (word_mode, reverse_unsigned_shift,
813 carries, tmp, 0, unsignedp, methods);
817 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
818 so the result can go directly into INTO_TARGET if convenient. */
819 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
820 into_target, unsignedp, methods);
824 /* Now OR in the bits carried over from OUTOF_INPUT. */
825 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
826 into_target, unsignedp, methods))
829 /* Use a standard word_mode shift for the out-of half. */
830 if (outof_target != 0)
831 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
832 outof_target, unsignedp, methods))
839 #ifdef HAVE_conditional_move
840 /* Try implementing expand_doubleword_shift using conditional moves.
841 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
842 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
843 are the shift counts to use in the former and latter case. All other
844 arguments are the same as the parent routine. */
847 expand_doubleword_shift_condmove (enum machine_mode op1_mode, optab binoptab,
848 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
849 rtx outof_input, rtx into_input,
850 rtx subword_op1, rtx superword_op1,
851 rtx outof_target, rtx into_target,
852 int unsignedp, enum optab_methods methods,
853 unsigned HOST_WIDE_INT shift_mask)
855 rtx outof_superword, into_superword;
857 /* Put the superword version of the output into OUTOF_SUPERWORD and
859 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
860 if (outof_target != 0 && subword_op1 == superword_op1)
862 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
863 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
864 into_superword = outof_target;
865 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
866 outof_superword, 0, unsignedp, methods))
871 into_superword = gen_reg_rtx (word_mode);
872 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
873 outof_superword, into_superword,
878 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
879 if (!expand_subword_shift (op1_mode, binoptab,
880 outof_input, into_input, subword_op1,
881 outof_target, into_target,
882 unsignedp, methods, shift_mask))
885 /* Select between them. Do the INTO half first because INTO_SUPERWORD
886 might be the current value of OUTOF_TARGET. */
887 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
888 into_target, into_superword, word_mode, false))
891 if (outof_target != 0)
892 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
893 outof_target, outof_superword,
901 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
902 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
903 input operand; the shift moves bits in the direction OUTOF_INPUT->
904 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
905 of the target. OP1 is the shift count and OP1_MODE is its mode.
906 If OP1 is constant, it will have been truncated as appropriate
907 and is known to be nonzero.
909 If SHIFT_MASK is zero, the result of word shifts is undefined when the
910 shift count is outside the range [0, BITS_PER_WORD). This routine must
911 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
913 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
914 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
915 fill with zeros or sign bits as appropriate.
917 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
918 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
919 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
920 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
923 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
924 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
925 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
926 function wants to calculate it itself.
928 Return true if the shift could be successfully synthesized. */
931 expand_doubleword_shift (enum machine_mode op1_mode, optab binoptab,
932 rtx outof_input, rtx into_input, rtx op1,
933 rtx outof_target, rtx into_target,
934 int unsignedp, enum optab_methods methods,
935 unsigned HOST_WIDE_INT shift_mask)
937 rtx superword_op1, tmp, cmp1, cmp2;
938 rtx subword_label, done_label;
939 enum rtx_code cmp_code;
941 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
942 fill the result with sign or zero bits as appropriate. If so, the value
943 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
944 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
945 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
947 This isn't worthwhile for constant shifts since the optimizers will
948 cope better with in-range shift counts. */
949 if (shift_mask >= BITS_PER_WORD
951 && !CONSTANT_P (op1))
953 if (!expand_doubleword_shift (op1_mode, binoptab,
954 outof_input, into_input, op1,
956 unsignedp, methods, shift_mask))
958 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
959 outof_target, unsignedp, methods))
964 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
965 is true when the effective shift value is less than BITS_PER_WORD.
966 Set SUPERWORD_OP1 to the shift count that should be used to shift
967 OUTOF_INPUT into INTO_TARGET when the condition is false. */
968 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
969 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
971 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
972 is a subword shift count. */
973 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
975 cmp2 = CONST0_RTX (op1_mode);
981 /* Set CMP1 to OP1 - BITS_PER_WORD. */
982 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
984 cmp2 = CONST0_RTX (op1_mode);
986 superword_op1 = cmp1;
991 /* If we can compute the condition at compile time, pick the
992 appropriate subroutine. */
993 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
994 if (tmp != 0 && GET_CODE (tmp) == CONST_INT)
996 if (tmp == const0_rtx)
997 return expand_superword_shift (binoptab, outof_input, superword_op1,
998 outof_target, into_target,
1001 return expand_subword_shift (op1_mode, binoptab,
1002 outof_input, into_input, op1,
1003 outof_target, into_target,
1004 unsignedp, methods, shift_mask);
1007 #ifdef HAVE_conditional_move
1008 /* Try using conditional moves to generate straight-line code. */
1010 rtx start = get_last_insn ();
1011 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
1012 cmp_code, cmp1, cmp2,
1013 outof_input, into_input,
1015 outof_target, into_target,
1016 unsignedp, methods, shift_mask))
1018 delete_insns_since (start);
1022 /* As a last resort, use branches to select the correct alternative. */
1023 subword_label = gen_label_rtx ();
1024 done_label = gen_label_rtx ();
1027 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
1028 0, 0, subword_label);
1031 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
1032 outof_target, into_target,
1033 unsignedp, methods))
1036 emit_jump_insn (gen_jump (done_label));
1038 emit_label (subword_label);
1040 if (!expand_subword_shift (op1_mode, binoptab,
1041 outof_input, into_input, op1,
1042 outof_target, into_target,
1043 unsignedp, methods, shift_mask))
1046 emit_label (done_label);
1050 /* Subroutine of expand_binop. Perform a double word multiplication of
1051 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
1052 as the target's word_mode. This function return NULL_RTX if anything
1053 goes wrong, in which case it may have already emitted instructions
1054 which need to be deleted.
1056 If we want to multiply two two-word values and have normal and widening
1057 multiplies of single-word values, we can do this with three smaller
1058 multiplications. Note that we do not make a REG_NO_CONFLICT block here
1059 because we are not operating on one word at a time.
1061 The multiplication proceeds as follows:
1062 _______________________
1063 [__op0_high_|__op0_low__]
1064 _______________________
1065 * [__op1_high_|__op1_low__]
1066 _______________________________________________
1067 _______________________
1068 (1) [__op0_low__*__op1_low__]
1069 _______________________
1070 (2a) [__op0_low__*__op1_high_]
1071 _______________________
1072 (2b) [__op0_high_*__op1_low__]
1073 _______________________
1074 (3) [__op0_high_*__op1_high_]
1077 This gives a 4-word result. Since we are only interested in the
1078 lower 2 words, partial result (3) and the upper words of (2a) and
1079 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1080 calculated using non-widening multiplication.
1082 (1), however, needs to be calculated with an unsigned widening
1083 multiplication. If this operation is not directly supported we
1084 try using a signed widening multiplication and adjust the result.
1085 This adjustment works as follows:
1087 If both operands are positive then no adjustment is needed.
1089 If the operands have different signs, for example op0_low < 0 and
1090 op1_low >= 0, the instruction treats the most significant bit of
1091 op0_low as a sign bit instead of a bit with significance
1092 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1093 with 2**BITS_PER_WORD - op0_low, and two's complements the
1094 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1097 Similarly, if both operands are negative, we need to add
1098 (op0_low + op1_low) * 2**BITS_PER_WORD.
1100 We use a trick to adjust quickly. We logically shift op0_low right
1101 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1102 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1103 logical shift exists, we do an arithmetic right shift and subtract
1107 expand_doubleword_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
1108 bool umulp, enum optab_methods methods)
1110 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
1111 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
1112 rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
1113 rtx product, adjust, product_high, temp;
1115 rtx op0_high = operand_subword_force (op0, high, mode);
1116 rtx op0_low = operand_subword_force (op0, low, mode);
1117 rtx op1_high = operand_subword_force (op1, high, mode);
1118 rtx op1_low = operand_subword_force (op1, low, mode);
1120 /* If we're using an unsigned multiply to directly compute the product
1121 of the low-order words of the operands and perform any required
1122 adjustments of the operands, we begin by trying two more multiplications
1123 and then computing the appropriate sum.
1125 We have checked above that the required addition is provided.
1126 Full-word addition will normally always succeed, especially if
1127 it is provided at all, so we don't worry about its failure. The
1128 multiplication may well fail, however, so we do handle that. */
1132 /* ??? This could be done with emit_store_flag where available. */
1133 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
1134 NULL_RTX, 1, methods);
1136 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
1137 NULL_RTX, 0, OPTAB_DIRECT);
1140 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
1141 NULL_RTX, 0, methods);
1144 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
1145 NULL_RTX, 0, OPTAB_DIRECT);
1152 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
1153 NULL_RTX, 0, OPTAB_DIRECT);
1157 /* OP0_HIGH should now be dead. */
1161 /* ??? This could be done with emit_store_flag where available. */
1162 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
1163 NULL_RTX, 1, methods);
1165 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
1166 NULL_RTX, 0, OPTAB_DIRECT);
1169 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
1170 NULL_RTX, 0, methods);
1173 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
1174 NULL_RTX, 0, OPTAB_DIRECT);
1181 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
1182 NULL_RTX, 0, OPTAB_DIRECT);
1186 /* OP1_HIGH should now be dead. */
1188 adjust = expand_binop (word_mode, add_optab, adjust, temp,
1189 adjust, 0, OPTAB_DIRECT);
1191 if (target && !REG_P (target))
1195 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
1196 target, 1, OPTAB_DIRECT);
1198 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
1199 target, 1, OPTAB_DIRECT);
1204 product_high = operand_subword (product, high, 1, mode);
1205 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
1206 REG_P (product_high) ? product_high : adjust,
1208 emit_move_insn (product_high, adjust);
1212 /* Wrapper around expand_binop which takes an rtx code to specify
1213 the operation to perform, not an optab pointer. All other
1214 arguments are the same. */
1216 expand_simple_binop (enum machine_mode mode, enum rtx_code code, rtx op0,
1217 rtx op1, rtx target, int unsignedp,
1218 enum optab_methods methods)
1220 optab binop = code_to_optab[(int) code];
1223 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
1226 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1227 binop. Order them according to commutative_operand_precedence and, if
1228 possible, try to put TARGET or a pseudo first. */
1230 swap_commutative_operands_with_target (rtx target, rtx op0, rtx op1)
1232 int op0_prec = commutative_operand_precedence (op0);
1233 int op1_prec = commutative_operand_precedence (op1);
1235 if (op0_prec < op1_prec)
1238 if (op0_prec > op1_prec)
1241 /* With equal precedence, both orders are ok, but it is better if the
1242 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
1243 if (target == 0 || REG_P (target))
1244 return (REG_P (op1) && !REG_P (op0)) || target == op1;
1246 return rtx_equal_p (op1, target);
1250 /* Helper function for expand_binop: handle the case where there
1251 is an insn that directly implements the indicated operation.
1252 Returns null if this is not possible. */
1254 expand_binop_directly (enum machine_mode mode, optab binoptab,
1256 rtx target, int unsignedp, enum optab_methods methods,
1257 int commutative_op, rtx last)
1259 int icode = (int) optab_handler (binoptab, mode)->insn_code;
1260 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
1261 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
1262 enum machine_mode tmp_mode;
1264 rtx xop0 = op0, xop1 = op1;
1270 temp = gen_reg_rtx (mode);
1272 /* If it is a commutative operator and the modes would match
1273 if we would swap the operands, we can save the conversions. */
1276 if (GET_MODE (op0) != mode0 && GET_MODE (op1) != mode1
1277 && GET_MODE (op0) == mode1 && GET_MODE (op1) == mode0)
1281 tmp = op0; op0 = op1; op1 = tmp;
1282 tmp = xop0; xop0 = xop1; xop1 = tmp;
1286 /* In case the insn wants input operands in modes different from
1287 those of the actual operands, convert the operands. It would
1288 seem that we don't need to convert CONST_INTs, but we do, so
1289 that they're properly zero-extended, sign-extended or truncated
1292 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
1293 xop0 = convert_modes (mode0,
1294 GET_MODE (op0) != VOIDmode
1299 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
1300 xop1 = convert_modes (mode1,
1301 GET_MODE (op1) != VOIDmode
1306 /* Now, if insn's predicates don't allow our operands, put them into
1309 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
1310 && mode0 != VOIDmode)
1311 xop0 = copy_to_mode_reg (mode0, xop0);
1313 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
1314 && mode1 != VOIDmode)
1315 xop1 = copy_to_mode_reg (mode1, xop1);
1317 if (binoptab == vec_pack_trunc_optab
1318 || binoptab == vec_pack_usat_optab
1319 || binoptab == vec_pack_ssat_optab
1320 || binoptab == vec_pack_ufix_trunc_optab
1321 || binoptab == vec_pack_sfix_trunc_optab)
1323 /* The mode of the result is different then the mode of the
1325 tmp_mode = insn_data[icode].operand[0].mode;
1326 if (GET_MODE_NUNITS (tmp_mode) != 2 * GET_MODE_NUNITS (mode))
1332 if (!insn_data[icode].operand[0].predicate (temp, tmp_mode))
1333 temp = gen_reg_rtx (tmp_mode);
1335 pat = GEN_FCN (icode) (temp, xop0, xop1);
1338 /* If PAT is composed of more than one insn, try to add an appropriate
1339 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1340 operand, call expand_binop again, this time without a target. */
1341 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1342 && ! add_equal_note (pat, temp, binoptab->code, xop0, xop1))
1344 delete_insns_since (last);
1345 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1346 unsignedp, methods);
1353 delete_insns_since (last);
1357 /* Generate code to perform an operation specified by BINOPTAB
1358 on operands OP0 and OP1, with result having machine-mode MODE.
1360 UNSIGNEDP is for the case where we have to widen the operands
1361 to perform the operation. It says to use zero-extension.
1363 If TARGET is nonzero, the value
1364 is generated there, if it is convenient to do so.
1365 In all cases an rtx is returned for the locus of the value;
1366 this may or may not be TARGET. */
1369 expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1,
1370 rtx target, int unsignedp, enum optab_methods methods)
1372 enum optab_methods next_methods
1373 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1374 ? OPTAB_WIDEN : methods);
1375 enum mode_class class;
1376 enum machine_mode wider_mode;
1378 int commutative_op = 0;
1379 int shift_op = (binoptab->code == ASHIFT
1380 || binoptab->code == ASHIFTRT
1381 || binoptab->code == LSHIFTRT
1382 || binoptab->code == ROTATE
1383 || binoptab->code == ROTATERT);
1384 rtx entry_last = get_last_insn ();
1387 class = GET_MODE_CLASS (mode);
1389 /* If subtracting an integer constant, convert this into an addition of
1390 the negated constant. */
1392 if (binoptab == sub_optab && GET_CODE (op1) == CONST_INT)
1394 op1 = negate_rtx (mode, op1);
1395 binoptab = add_optab;
1398 /* If we are inside an appropriately-short loop and we are optimizing,
1399 force expensive constants into a register. */
1400 if (CONSTANT_P (op0) && optimize
1401 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
1403 if (GET_MODE (op0) != VOIDmode)
1404 op0 = convert_modes (mode, VOIDmode, op0, unsignedp);
1405 op0 = force_reg (mode, op0);
1408 if (CONSTANT_P (op1) && optimize
1409 && ! shift_op && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
1411 if (GET_MODE (op1) != VOIDmode)
1412 op1 = convert_modes (mode, VOIDmode, op1, unsignedp);
1413 op1 = force_reg (mode, op1);
1416 /* Record where to delete back to if we backtrack. */
1417 last = get_last_insn ();
1419 /* If operation is commutative,
1420 try to make the first operand a register.
1421 Even better, try to make it the same as the target.
1422 Also try to make the last operand a constant. */
1423 if (GET_RTX_CLASS (binoptab->code) == RTX_COMM_ARITH
1424 || binoptab == smul_widen_optab
1425 || binoptab == umul_widen_optab
1426 || binoptab == smul_highpart_optab
1427 || binoptab == umul_highpart_optab)
1431 if (swap_commutative_operands_with_target (target, op0, op1))
1439 /* If we can do it with a three-operand insn, do so. */
1441 if (methods != OPTAB_MUST_WIDEN
1442 && optab_handler (binoptab, mode)->insn_code != CODE_FOR_nothing)
1444 temp = expand_binop_directly (mode, binoptab, op0, op1, target,
1445 unsignedp, methods, commutative_op, last);
1450 /* If we were trying to rotate, and that didn't work, try rotating
1451 the other direction before falling back to shifts and bitwise-or. */
1452 if (((binoptab == rotl_optab
1453 && optab_handler (rotr_optab, mode)->insn_code != CODE_FOR_nothing)
1454 || (binoptab == rotr_optab
1455 && optab_handler (rotl_optab, mode)->insn_code != CODE_FOR_nothing))
1456 && class == MODE_INT)
1458 optab otheroptab = (binoptab == rotl_optab ? rotr_optab : rotl_optab);
1460 unsigned int bits = GET_MODE_BITSIZE (mode);
1462 if (GET_CODE (op1) == CONST_INT)
1463 newop1 = GEN_INT (bits - INTVAL (op1));
1464 else if (targetm.shift_truncation_mask (mode) == bits - 1)
1465 newop1 = negate_rtx (mode, op1);
1467 newop1 = expand_binop (mode, sub_optab,
1468 GEN_INT (bits), op1,
1469 NULL_RTX, unsignedp, OPTAB_DIRECT);
1471 temp = expand_binop_directly (mode, otheroptab, op0, newop1,
1472 target, unsignedp, methods,
1473 commutative_op, last);
1478 /* If this is a multiply, see if we can do a widening operation that
1479 takes operands of this mode and makes a wider mode. */
1481 if (binoptab == smul_optab
1482 && GET_MODE_WIDER_MODE (mode) != VOIDmode
1483 && ((optab_handler ((unsignedp ? umul_widen_optab : smul_widen_optab),
1484 GET_MODE_WIDER_MODE (mode))->insn_code)
1485 != CODE_FOR_nothing))
1487 temp = expand_binop (GET_MODE_WIDER_MODE (mode),
1488 unsignedp ? umul_widen_optab : smul_widen_optab,
1489 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1493 if (GET_MODE_CLASS (mode) == MODE_INT
1494 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1495 GET_MODE_BITSIZE (GET_MODE (temp))))
1496 return gen_lowpart (mode, temp);
1498 return convert_to_mode (mode, temp, unsignedp);
1502 /* Look for a wider mode of the same class for which we think we
1503 can open-code the operation. Check for a widening multiply at the
1504 wider mode as well. */
1506 if (CLASS_HAS_WIDER_MODES_P (class)
1507 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1508 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1509 wider_mode != VOIDmode;
1510 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1512 if (optab_handler (binoptab, wider_mode)->insn_code != CODE_FOR_nothing
1513 || (binoptab == smul_optab
1514 && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
1515 && ((optab_handler ((unsignedp ? umul_widen_optab
1516 : smul_widen_optab),
1517 GET_MODE_WIDER_MODE (wider_mode))->insn_code)
1518 != CODE_FOR_nothing)))
1520 rtx xop0 = op0, xop1 = op1;
1523 /* For certain integer operations, we need not actually extend
1524 the narrow operands, as long as we will truncate
1525 the results to the same narrowness. */
1527 if ((binoptab == ior_optab || binoptab == and_optab
1528 || binoptab == xor_optab
1529 || binoptab == add_optab || binoptab == sub_optab
1530 || binoptab == smul_optab || binoptab == ashl_optab)
1531 && class == MODE_INT)
1534 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1536 /* The second operand of a shift must always be extended. */
1537 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1538 no_extend && binoptab != ashl_optab);
1540 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1541 unsignedp, OPTAB_DIRECT);
1544 if (class != MODE_INT
1545 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1546 GET_MODE_BITSIZE (wider_mode)))
1549 target = gen_reg_rtx (mode);
1550 convert_move (target, temp, 0);
1554 return gen_lowpart (mode, temp);
1557 delete_insns_since (last);
1561 /* These can be done a word at a time. */
1562 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1563 && class == MODE_INT
1564 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
1565 && optab_handler (binoptab, word_mode)->insn_code != CODE_FOR_nothing)
1571 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1572 won't be accurate, so use a new target. */
1573 if (target == 0 || target == op0 || target == op1)
1574 target = gen_reg_rtx (mode);
1578 /* Do the actual arithmetic. */
1579 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
1581 rtx target_piece = operand_subword (target, i, 1, mode);
1582 rtx x = expand_binop (word_mode, binoptab,
1583 operand_subword_force (op0, i, mode),
1584 operand_subword_force (op1, i, mode),
1585 target_piece, unsignedp, next_methods);
1590 if (target_piece != x)
1591 emit_move_insn (target_piece, x);
1594 insns = get_insns ();
1597 if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
1599 if (binoptab->code != UNKNOWN)
1601 = gen_rtx_fmt_ee (binoptab->code, mode,
1602 copy_rtx (op0), copy_rtx (op1));
1606 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1611 /* Synthesize double word shifts from single word shifts. */
1612 if ((binoptab == lshr_optab || binoptab == ashl_optab
1613 || binoptab == ashr_optab)
1614 && class == MODE_INT
1615 && (GET_CODE (op1) == CONST_INT || !optimize_size)
1616 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1617 && optab_handler (binoptab, word_mode)->insn_code != CODE_FOR_nothing
1618 && optab_handler (ashl_optab, word_mode)->insn_code != CODE_FOR_nothing
1619 && optab_handler (lshr_optab, word_mode)->insn_code != CODE_FOR_nothing)
1621 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1622 enum machine_mode op1_mode;
1624 double_shift_mask = targetm.shift_truncation_mask (mode);
1625 shift_mask = targetm.shift_truncation_mask (word_mode);
1626 op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode;
1628 /* Apply the truncation to constant shifts. */
1629 if (double_shift_mask > 0 && GET_CODE (op1) == CONST_INT)
1630 op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
1632 if (op1 == CONST0_RTX (op1_mode))
1635 /* Make sure that this is a combination that expand_doubleword_shift
1636 can handle. See the comments there for details. */
1637 if (double_shift_mask == 0
1638 || (shift_mask == BITS_PER_WORD - 1
1639 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1641 rtx insns, equiv_value;
1642 rtx into_target, outof_target;
1643 rtx into_input, outof_input;
1644 int left_shift, outof_word;
1646 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1647 won't be accurate, so use a new target. */
1648 if (target == 0 || target == op0 || target == op1)
1649 target = gen_reg_rtx (mode);
1653 /* OUTOF_* is the word we are shifting bits away from, and
1654 INTO_* is the word that we are shifting bits towards, thus
1655 they differ depending on the direction of the shift and
1656 WORDS_BIG_ENDIAN. */
1658 left_shift = binoptab == ashl_optab;
1659 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1661 outof_target = operand_subword (target, outof_word, 1, mode);
1662 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1664 outof_input = operand_subword_force (op0, outof_word, mode);
1665 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1667 if (expand_doubleword_shift (op1_mode, binoptab,
1668 outof_input, into_input, op1,
1669 outof_target, into_target,
1670 unsignedp, next_methods, shift_mask))
1672 insns = get_insns ();
1675 equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, op0, op1);
1676 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1683 /* Synthesize double word rotates from single word shifts. */
1684 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1685 && class == MODE_INT
1686 && GET_CODE (op1) == CONST_INT
1687 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1688 && optab_handler (ashl_optab, word_mode)->insn_code != CODE_FOR_nothing
1689 && optab_handler (lshr_optab, word_mode)->insn_code != CODE_FOR_nothing)
1692 rtx into_target, outof_target;
1693 rtx into_input, outof_input;
1695 int shift_count, left_shift, outof_word;
1697 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1698 won't be accurate, so use a new target. Do this also if target is not
1699 a REG, first because having a register instead may open optimization
1700 opportunities, and second because if target and op0 happen to be MEMs
1701 designating the same location, we would risk clobbering it too early
1702 in the code sequence we generate below. */
1703 if (target == 0 || target == op0 || target == op1 || ! REG_P (target))
1704 target = gen_reg_rtx (mode);
1708 shift_count = INTVAL (op1);
1710 /* OUTOF_* is the word we are shifting bits away from, and
1711 INTO_* is the word that we are shifting bits towards, thus
1712 they differ depending on the direction of the shift and
1713 WORDS_BIG_ENDIAN. */
1715 left_shift = (binoptab == rotl_optab);
1716 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1718 outof_target = operand_subword (target, outof_word, 1, mode);
1719 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1721 outof_input = operand_subword_force (op0, outof_word, mode);
1722 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1724 if (shift_count == BITS_PER_WORD)
1726 /* This is just a word swap. */
1727 emit_move_insn (outof_target, into_input);
1728 emit_move_insn (into_target, outof_input);
1733 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1734 rtx first_shift_count, second_shift_count;
1735 optab reverse_unsigned_shift, unsigned_shift;
1737 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1738 ? lshr_optab : ashl_optab);
1740 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1741 ? ashl_optab : lshr_optab);
1743 if (shift_count > BITS_PER_WORD)
1745 first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1746 second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1750 first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1751 second_shift_count = GEN_INT (shift_count);
1754 into_temp1 = expand_binop (word_mode, unsigned_shift,
1755 outof_input, first_shift_count,
1756 NULL_RTX, unsignedp, next_methods);
1757 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1758 into_input, second_shift_count,
1759 NULL_RTX, unsignedp, next_methods);
1761 if (into_temp1 != 0 && into_temp2 != 0)
1762 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1763 into_target, unsignedp, next_methods);
1767 if (inter != 0 && inter != into_target)
1768 emit_move_insn (into_target, inter);
1770 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1771 into_input, first_shift_count,
1772 NULL_RTX, unsignedp, next_methods);
1773 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1774 outof_input, second_shift_count,
1775 NULL_RTX, unsignedp, next_methods);
1777 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1778 inter = expand_binop (word_mode, ior_optab,
1779 outof_temp1, outof_temp2,
1780 outof_target, unsignedp, next_methods);
1782 if (inter != 0 && inter != outof_target)
1783 emit_move_insn (outof_target, inter);
1786 insns = get_insns ();
1791 /* One may be tempted to wrap the insns in a REG_NO_CONFLICT
1792 block to help the register allocator a bit. But a multi-word
1793 rotate will need all the input bits when setting the output
1794 bits, so there clearly is a conflict between the input and
1795 output registers. So we can't use a no-conflict block here. */
1801 /* These can be done a word at a time by propagating carries. */
1802 if ((binoptab == add_optab || binoptab == sub_optab)
1803 && class == MODE_INT
1804 && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
1805 && optab_handler (binoptab, word_mode)->insn_code != CODE_FOR_nothing)
1808 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1809 const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
1810 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1811 rtx xop0, xop1, xtarget;
1813 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1814 value is one of those, use it. Otherwise, use 1 since it is the
1815 one easiest to get. */
1816 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1817 int normalizep = STORE_FLAG_VALUE;
1822 /* Prepare the operands. */
1823 xop0 = force_reg (mode, op0);
1824 xop1 = force_reg (mode, op1);
1826 xtarget = gen_reg_rtx (mode);
1828 if (target == 0 || !REG_P (target))
1831 /* Indicate for flow that the entire target reg is being set. */
1833 emit_insn (gen_rtx_CLOBBER (VOIDmode, xtarget));
1835 /* Do the actual arithmetic. */
1836 for (i = 0; i < nwords; i++)
1838 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1839 rtx target_piece = operand_subword (xtarget, index, 1, mode);
1840 rtx op0_piece = operand_subword_force (xop0, index, mode);
1841 rtx op1_piece = operand_subword_force (xop1, index, mode);
1844 /* Main add/subtract of the input operands. */
1845 x = expand_binop (word_mode, binoptab,
1846 op0_piece, op1_piece,
1847 target_piece, unsignedp, next_methods);
1853 /* Store carry from main add/subtract. */
1854 carry_out = gen_reg_rtx (word_mode);
1855 carry_out = emit_store_flag_force (carry_out,
1856 (binoptab == add_optab
1859 word_mode, 1, normalizep);
1866 /* Add/subtract previous carry to main result. */
1867 newx = expand_binop (word_mode,
1868 normalizep == 1 ? binoptab : otheroptab,
1870 NULL_RTX, 1, next_methods);
1874 /* Get out carry from adding/subtracting carry in. */
1875 rtx carry_tmp = gen_reg_rtx (word_mode);
1876 carry_tmp = emit_store_flag_force (carry_tmp,
1877 (binoptab == add_optab
1880 word_mode, 1, normalizep);
1882 /* Logical-ior the two poss. carry together. */
1883 carry_out = expand_binop (word_mode, ior_optab,
1884 carry_out, carry_tmp,
1885 carry_out, 0, next_methods);
1889 emit_move_insn (target_piece, newx);
1893 if (x != target_piece)
1894 emit_move_insn (target_piece, x);
1897 carry_in = carry_out;
1900 if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
1902 if (optab_handler (mov_optab, mode)->insn_code != CODE_FOR_nothing
1903 || ! rtx_equal_p (target, xtarget))
1905 rtx temp = emit_move_insn (target, xtarget);
1907 set_unique_reg_note (temp,
1909 gen_rtx_fmt_ee (binoptab->code, mode,
1920 delete_insns_since (last);
1923 /* Attempt to synthesize double word multiplies using a sequence of word
1924 mode multiplications. We first attempt to generate a sequence using a
1925 more efficient unsigned widening multiply, and if that fails we then
1926 try using a signed widening multiply. */
1928 if (binoptab == smul_optab
1929 && class == MODE_INT
1930 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1931 && optab_handler (smul_optab, word_mode)->insn_code != CODE_FOR_nothing
1932 && optab_handler (add_optab, word_mode)->insn_code != CODE_FOR_nothing)
1934 rtx product = NULL_RTX;
1936 if (optab_handler (umul_widen_optab, mode)->insn_code
1937 != CODE_FOR_nothing)
1939 product = expand_doubleword_mult (mode, op0, op1, target,
1942 delete_insns_since (last);
1945 if (product == NULL_RTX
1946 && optab_handler (smul_widen_optab, mode)->insn_code
1947 != CODE_FOR_nothing)
1949 product = expand_doubleword_mult (mode, op0, op1, target,
1952 delete_insns_since (last);
1955 if (product != NULL_RTX)
1957 if (optab_handler (mov_optab, mode)->insn_code != CODE_FOR_nothing)
1959 temp = emit_move_insn (target ? target : product, product);
1960 set_unique_reg_note (temp,
1962 gen_rtx_fmt_ee (MULT, mode,
1970 /* It can't be open-coded in this mode.
1971 Use a library call if one is available and caller says that's ok. */
1973 if (optab_handler (binoptab, mode)->libfunc
1974 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
1978 enum machine_mode op1_mode = mode;
1985 op1_mode = targetm.libgcc_shift_count_mode ();
1986 /* Specify unsigned here,
1987 since negative shift counts are meaningless. */
1988 op1x = convert_to_mode (op1_mode, op1, 1);
1991 if (GET_MODE (op0) != VOIDmode
1992 && GET_MODE (op0) != mode)
1993 op0 = convert_to_mode (mode, op0, unsignedp);
1995 /* Pass 1 for NO_QUEUE so we don't lose any increments
1996 if the libcall is cse'd or moved. */
1997 value = emit_library_call_value (optab_handler (binoptab, mode)->libfunc,
1998 NULL_RTX, LCT_CONST, mode, 2,
1999 op0, mode, op1x, op1_mode);
2001 insns = get_insns ();
2004 target = gen_reg_rtx (mode);
2005 emit_libcall_block (insns, target, value,
2006 gen_rtx_fmt_ee (binoptab->code, mode, op0, op1));
2011 delete_insns_since (last);
2013 /* It can't be done in this mode. Can we do it in a wider mode? */
2015 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
2016 || methods == OPTAB_MUST_WIDEN))
2018 /* Caller says, don't even try. */
2019 delete_insns_since (entry_last);
2023 /* Compute the value of METHODS to pass to recursive calls.
2024 Don't allow widening to be tried recursively. */
2026 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
2028 /* Look for a wider mode of the same class for which it appears we can do
2031 if (CLASS_HAS_WIDER_MODES_P (class))
2033 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2034 wider_mode != VOIDmode;
2035 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2037 if ((optab_handler (binoptab, wider_mode)->insn_code
2038 != CODE_FOR_nothing)
2039 || (methods == OPTAB_LIB
2040 && optab_handler (binoptab, wider_mode)->libfunc))
2042 rtx xop0 = op0, xop1 = op1;
2045 /* For certain integer operations, we need not actually extend
2046 the narrow operands, as long as we will truncate
2047 the results to the same narrowness. */
2049 if ((binoptab == ior_optab || binoptab == and_optab
2050 || binoptab == xor_optab
2051 || binoptab == add_optab || binoptab == sub_optab
2052 || binoptab == smul_optab || binoptab == ashl_optab)
2053 && class == MODE_INT)
2056 xop0 = widen_operand (xop0, wider_mode, mode,
2057 unsignedp, no_extend);
2059 /* The second operand of a shift must always be extended. */
2060 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
2061 no_extend && binoptab != ashl_optab);
2063 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
2064 unsignedp, methods);
2067 if (class != MODE_INT
2068 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
2069 GET_MODE_BITSIZE (wider_mode)))
2072 target = gen_reg_rtx (mode);
2073 convert_move (target, temp, 0);
2077 return gen_lowpart (mode, temp);
2080 delete_insns_since (last);
2085 delete_insns_since (entry_last);
2089 /* Expand a binary operator which has both signed and unsigned forms.
2090 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2093 If we widen unsigned operands, we may use a signed wider operation instead
2094 of an unsigned wider operation, since the result would be the same. */
2097 sign_expand_binop (enum machine_mode mode, optab uoptab, optab soptab,
2098 rtx op0, rtx op1, rtx target, int unsignedp,
2099 enum optab_methods methods)
2102 optab direct_optab = unsignedp ? uoptab : soptab;
2103 struct optab wide_soptab;
2105 /* Do it without widening, if possible. */
2106 temp = expand_binop (mode, direct_optab, op0, op1, target,
2107 unsignedp, OPTAB_DIRECT);
2108 if (temp || methods == OPTAB_DIRECT)
2111 /* Try widening to a signed int. Make a fake signed optab that
2112 hides any signed insn for direct use. */
2113 wide_soptab = *soptab;
2114 optab_handler (&wide_soptab, mode)->insn_code = CODE_FOR_nothing;
2115 optab_handler (&wide_soptab, mode)->libfunc = 0;
2117 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2118 unsignedp, OPTAB_WIDEN);
2120 /* For unsigned operands, try widening to an unsigned int. */
2121 if (temp == 0 && unsignedp)
2122 temp = expand_binop (mode, uoptab, op0, op1, target,
2123 unsignedp, OPTAB_WIDEN);
2124 if (temp || methods == OPTAB_WIDEN)
2127 /* Use the right width lib call if that exists. */
2128 temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_LIB);
2129 if (temp || methods == OPTAB_LIB)
2132 /* Must widen and use a lib call, use either signed or unsigned. */
2133 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2134 unsignedp, methods);
2138 return expand_binop (mode, uoptab, op0, op1, target,
2139 unsignedp, methods);
2143 /* Generate code to perform an operation specified by UNOPPTAB
2144 on operand OP0, with two results to TARG0 and TARG1.
2145 We assume that the order of the operands for the instruction
2146 is TARG0, TARG1, OP0.
2148 Either TARG0 or TARG1 may be zero, but what that means is that
2149 the result is not actually wanted. We will generate it into
2150 a dummy pseudo-reg and discard it. They may not both be zero.
2152 Returns 1 if this operation can be performed; 0 if not. */
2155 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
2158 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2159 enum mode_class class;
2160 enum machine_mode wider_mode;
2161 rtx entry_last = get_last_insn ();
2164 class = GET_MODE_CLASS (mode);
2167 targ0 = gen_reg_rtx (mode);
2169 targ1 = gen_reg_rtx (mode);
2171 /* Record where to go back to if we fail. */
2172 last = get_last_insn ();
2174 if (optab_handler (unoptab, mode)->insn_code != CODE_FOR_nothing)
2176 int icode = (int) optab_handler (unoptab, mode)->insn_code;
2177 enum machine_mode mode0 = insn_data[icode].operand[2].mode;
2181 if (GET_MODE (xop0) != VOIDmode
2182 && GET_MODE (xop0) != mode0)
2183 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2185 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2186 if (!insn_data[icode].operand[2].predicate (xop0, mode0))
2187 xop0 = copy_to_mode_reg (mode0, xop0);
2189 /* We could handle this, but we should always be called with a pseudo
2190 for our targets and all insns should take them as outputs. */
2191 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2192 gcc_assert (insn_data[icode].operand[1].predicate (targ1, mode));
2194 pat = GEN_FCN (icode) (targ0, targ1, xop0);
2201 delete_insns_since (last);
2204 /* It can't be done in this mode. Can we do it in a wider mode? */
2206 if (CLASS_HAS_WIDER_MODES_P (class))
2208 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2209 wider_mode != VOIDmode;
2210 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2212 if (optab_handler (unoptab, wider_mode)->insn_code
2213 != CODE_FOR_nothing)
2215 rtx t0 = gen_reg_rtx (wider_mode);
2216 rtx t1 = gen_reg_rtx (wider_mode);
2217 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2219 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
2221 convert_move (targ0, t0, unsignedp);
2222 convert_move (targ1, t1, unsignedp);
2226 delete_insns_since (last);
2231 delete_insns_since (entry_last);
2235 /* Generate code to perform an operation specified by BINOPTAB
2236 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2237 We assume that the order of the operands for the instruction
2238 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2239 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2241 Either TARG0 or TARG1 may be zero, but what that means is that
2242 the result is not actually wanted. We will generate it into
2243 a dummy pseudo-reg and discard it. They may not both be zero.
2245 Returns 1 if this operation can be performed; 0 if not. */
2248 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
2251 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2252 enum mode_class class;
2253 enum machine_mode wider_mode;
2254 rtx entry_last = get_last_insn ();
2257 class = GET_MODE_CLASS (mode);
2259 /* If we are inside an appropriately-short loop and we are optimizing,
2260 force expensive constants into a register. */
2261 if (CONSTANT_P (op0) && optimize
2262 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
2263 op0 = force_reg (mode, op0);
2265 if (CONSTANT_P (op1) && optimize
2266 && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
2267 op1 = force_reg (mode, op1);
2270 targ0 = gen_reg_rtx (mode);
2272 targ1 = gen_reg_rtx (mode);
2274 /* Record where to go back to if we fail. */
2275 last = get_last_insn ();
2277 if (optab_handler (binoptab, mode)->insn_code != CODE_FOR_nothing)
2279 int icode = (int) optab_handler (binoptab, mode)->insn_code;
2280 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2281 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
2283 rtx xop0 = op0, xop1 = op1;
2285 /* In case the insn wants input operands in modes different from
2286 those of the actual operands, convert the operands. It would
2287 seem that we don't need to convert CONST_INTs, but we do, so
2288 that they're properly zero-extended, sign-extended or truncated
2291 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
2292 xop0 = convert_modes (mode0,
2293 GET_MODE (op0) != VOIDmode
2298 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
2299 xop1 = convert_modes (mode1,
2300 GET_MODE (op1) != VOIDmode
2305 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2306 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2307 xop0 = copy_to_mode_reg (mode0, xop0);
2309 if (!insn_data[icode].operand[2].predicate (xop1, mode1))
2310 xop1 = copy_to_mode_reg (mode1, xop1);
2312 /* We could handle this, but we should always be called with a pseudo
2313 for our targets and all insns should take them as outputs. */
2314 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2315 gcc_assert (insn_data[icode].operand[3].predicate (targ1, mode));
2317 pat = GEN_FCN (icode) (targ0, xop0, xop1, targ1);
2324 delete_insns_since (last);
2327 /* It can't be done in this mode. Can we do it in a wider mode? */
2329 if (CLASS_HAS_WIDER_MODES_P (class))
2331 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2332 wider_mode != VOIDmode;
2333 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2335 if (optab_handler (binoptab, wider_mode)->insn_code
2336 != CODE_FOR_nothing)
2338 rtx t0 = gen_reg_rtx (wider_mode);
2339 rtx t1 = gen_reg_rtx (wider_mode);
2340 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2341 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2343 if (expand_twoval_binop (binoptab, cop0, cop1,
2346 convert_move (targ0, t0, unsignedp);
2347 convert_move (targ1, t1, unsignedp);
2351 delete_insns_since (last);
2356 delete_insns_since (entry_last);
2360 /* Expand the two-valued library call indicated by BINOPTAB, but
2361 preserve only one of the values. If TARG0 is non-NULL, the first
2362 value is placed into TARG0; otherwise the second value is placed
2363 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2364 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2365 This routine assumes that the value returned by the library call is
2366 as if the return value was of an integral mode twice as wide as the
2367 mode of OP0. Returns 1 if the call was successful. */
2370 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2371 rtx targ0, rtx targ1, enum rtx_code code)
2373 enum machine_mode mode;
2374 enum machine_mode libval_mode;
2378 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2379 gcc_assert (!targ0 != !targ1);
2381 mode = GET_MODE (op0);
2382 if (!optab_handler (binoptab, mode)->libfunc)
2385 /* The value returned by the library function will have twice as
2386 many bits as the nominal MODE. */
2387 libval_mode = smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode),
2390 libval = emit_library_call_value (optab_handler (binoptab, mode)->libfunc,
2391 NULL_RTX, LCT_CONST,
2395 /* Get the part of VAL containing the value that we want. */
2396 libval = simplify_gen_subreg (mode, libval, libval_mode,
2397 targ0 ? 0 : GET_MODE_SIZE (mode));
2398 insns = get_insns ();
2400 /* Move the into the desired location. */
2401 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2402 gen_rtx_fmt_ee (code, mode, op0, op1));
2408 /* Wrapper around expand_unop which takes an rtx code to specify
2409 the operation to perform, not an optab pointer. All other
2410 arguments are the same. */
2412 expand_simple_unop (enum machine_mode mode, enum rtx_code code, rtx op0,
2413 rtx target, int unsignedp)
2415 optab unop = code_to_optab[(int) code];
2418 return expand_unop (mode, unop, op0, target, unsignedp);
2424 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2426 widen_clz (enum machine_mode mode, rtx op0, rtx target)
2428 enum mode_class class = GET_MODE_CLASS (mode);
2429 if (CLASS_HAS_WIDER_MODES_P (class))
2431 enum machine_mode wider_mode;
2432 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2433 wider_mode != VOIDmode;
2434 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2436 if (optab_handler (clz_optab, wider_mode)->insn_code
2437 != CODE_FOR_nothing)
2439 rtx xop0, temp, last;
2441 last = get_last_insn ();
2444 target = gen_reg_rtx (mode);
2445 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2446 temp = expand_unop (wider_mode, clz_optab, xop0, NULL_RTX, true);
2448 temp = expand_binop (wider_mode, sub_optab, temp,
2449 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2450 - GET_MODE_BITSIZE (mode)),
2451 target, true, OPTAB_DIRECT);
2453 delete_insns_since (last);
2465 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2467 widen_bswap (enum machine_mode mode, rtx op0, rtx target)
2469 enum mode_class class = GET_MODE_CLASS (mode);
2470 enum machine_mode wider_mode;
2473 if (!CLASS_HAS_WIDER_MODES_P (class))
2476 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2477 wider_mode != VOIDmode;
2478 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2479 if (optab_handler (bswap_optab, wider_mode)->insn_code != CODE_FOR_nothing)
2484 last = get_last_insn ();
2486 x = widen_operand (op0, wider_mode, mode, true, true);
2487 x = expand_unop (wider_mode, bswap_optab, x, NULL_RTX, true);
2490 x = expand_shift (RSHIFT_EXPR, wider_mode, x,
2491 size_int (GET_MODE_BITSIZE (wider_mode)
2492 - GET_MODE_BITSIZE (mode)),
2498 target = gen_reg_rtx (mode);
2499 emit_move_insn (target, gen_lowpart (mode, x));
2502 delete_insns_since (last);
2507 /* Try calculating bswap as two bswaps of two word-sized operands. */
2510 expand_doubleword_bswap (enum machine_mode mode, rtx op, rtx target)
2514 t1 = expand_unop (word_mode, bswap_optab,
2515 operand_subword_force (op, 0, mode), NULL_RTX, true);
2516 t0 = expand_unop (word_mode, bswap_optab,
2517 operand_subword_force (op, 1, mode), NULL_RTX, true);
2520 target = gen_reg_rtx (mode);
2522 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
2523 emit_move_insn (operand_subword (target, 0, 1, mode), t0);
2524 emit_move_insn (operand_subword (target, 1, 1, mode), t1);
2529 /* Try calculating (parity x) as (and (popcount x) 1), where
2530 popcount can also be done in a wider mode. */
2532 expand_parity (enum machine_mode mode, rtx op0, rtx target)
2534 enum mode_class class = GET_MODE_CLASS (mode);
2535 if (CLASS_HAS_WIDER_MODES_P (class))
2537 enum machine_mode wider_mode;
2538 for (wider_mode = mode; wider_mode != VOIDmode;
2539 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2541 if (optab_handler (popcount_optab, wider_mode)->insn_code
2542 != CODE_FOR_nothing)
2544 rtx xop0, temp, last;
2546 last = get_last_insn ();
2549 target = gen_reg_rtx (mode);
2550 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2551 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2554 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2555 target, true, OPTAB_DIRECT);
2557 delete_insns_since (last);
2566 /* Try calculating ffs(x) using clz(x). Since the ffs builtin promises
2567 to return zero for a zero value and clz may have an undefined value
2568 in that case, only do this if we know clz returns the right thing so
2569 that we don't have to generate a test and branch. */
2571 expand_ffs (enum machine_mode mode, rtx op0, rtx target)
2574 if (clz_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
2575 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2
2576 && val == GET_MODE_BITSIZE (mode))
2578 rtx last = get_last_insn ();
2581 temp = expand_unop (mode, neg_optab, op0, NULL_RTX, true);
2583 temp = expand_binop (mode, and_optab, op0, temp, NULL_RTX,
2584 true, OPTAB_DIRECT);
2586 temp = expand_unop (mode, clz_optab, temp, NULL_RTX, true);
2588 temp = expand_binop (mode, sub_optab,
2589 GEN_INT (GET_MODE_BITSIZE (mode)),
2591 target, true, OPTAB_DIRECT);
2593 delete_insns_since (last);
2599 /* We can compute ctz(x) using clz(x) with a similar recipe. Here the ctz
2600 builtin has an undefined result on zero, just like clz, so we don't have
2601 to do that check. */
2603 expand_ctz (enum machine_mode mode, rtx op0, rtx target)
2605 if (clz_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2607 rtx last = get_last_insn ();
2610 temp = expand_unop (mode, neg_optab, op0, NULL_RTX, true);
2612 temp = expand_binop (mode, and_optab, op0, temp, NULL_RTX,
2613 true, OPTAB_DIRECT);
2615 temp = expand_unop (mode, clz_optab, temp, NULL_RTX, true);
2617 temp = expand_binop (mode, xor_optab, temp,
2618 GEN_INT (GET_MODE_BITSIZE (mode) - 1),
2620 true, OPTAB_DIRECT);
2622 delete_insns_since (last);
2628 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2629 conditions, VAL may already be a SUBREG against which we cannot generate
2630 a further SUBREG. In this case, we expect forcing the value into a
2631 register will work around the situation. */
2634 lowpart_subreg_maybe_copy (enum machine_mode omode, rtx val,
2635 enum machine_mode imode)
2638 ret = lowpart_subreg (omode, val, imode);
2641 val = force_reg (imode, val);
2642 ret = lowpart_subreg (omode, val, imode);
2643 gcc_assert (ret != NULL);
2648 /* Expand a floating point absolute value or negation operation via a
2649 logical operation on the sign bit. */
2652 expand_absneg_bit (enum rtx_code code, enum machine_mode mode,
2653 rtx op0, rtx target)
2655 const struct real_format *fmt;
2656 int bitpos, word, nwords, i;
2657 enum machine_mode imode;
2658 HOST_WIDE_INT hi, lo;
2661 /* The format has to have a simple sign bit. */
2662 fmt = REAL_MODE_FORMAT (mode);
2666 bitpos = fmt->signbit_rw;
2670 /* Don't create negative zeros if the format doesn't support them. */
2671 if (code == NEG && !fmt->has_signed_zero)
2674 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2676 imode = int_mode_for_mode (mode);
2677 if (imode == BLKmode)
2686 if (FLOAT_WORDS_BIG_ENDIAN)
2687 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2689 word = bitpos / BITS_PER_WORD;
2690 bitpos = bitpos % BITS_PER_WORD;
2691 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2694 if (bitpos < HOST_BITS_PER_WIDE_INT)
2697 lo = (HOST_WIDE_INT) 1 << bitpos;
2701 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2707 if (target == 0 || target == op0)
2708 target = gen_reg_rtx (mode);
2714 for (i = 0; i < nwords; ++i)
2716 rtx targ_piece = operand_subword (target, i, 1, mode);
2717 rtx op0_piece = operand_subword_force (op0, i, mode);
2721 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2723 immed_double_const (lo, hi, imode),
2724 targ_piece, 1, OPTAB_LIB_WIDEN);
2725 if (temp != targ_piece)
2726 emit_move_insn (targ_piece, temp);
2729 emit_move_insn (targ_piece, op0_piece);
2732 insns = get_insns ();
2735 temp = gen_rtx_fmt_e (code, mode, copy_rtx (op0));
2736 emit_no_conflict_block (insns, target, op0, NULL_RTX, temp);
2740 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2741 gen_lowpart (imode, op0),
2742 immed_double_const (lo, hi, imode),
2743 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2744 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2746 set_unique_reg_note (get_last_insn (), REG_EQUAL,
2747 gen_rtx_fmt_e (code, mode, copy_rtx (op0)));
2753 /* Generate code to perform an operation specified by UNOPTAB
2754 on operand OP0, with result having machine-mode MODE.
2756 UNSIGNEDP is for the case where we have to widen the operands
2757 to perform the operation. It says to use zero-extension.
2759 If TARGET is nonzero, the value
2760 is generated there, if it is convenient to do so.
2761 In all cases an rtx is returned for the locus of the value;
2762 this may or may not be TARGET. */
2765 expand_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
2768 enum mode_class class;
2769 enum machine_mode wider_mode;
2771 rtx last = get_last_insn ();
2774 class = GET_MODE_CLASS (mode);
2776 if (optab_handler (unoptab, mode)->insn_code != CODE_FOR_nothing)
2778 int icode = (int) optab_handler (unoptab, mode)->insn_code;
2779 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2785 temp = gen_reg_rtx (mode);
2787 if (GET_MODE (xop0) != VOIDmode
2788 && GET_MODE (xop0) != mode0)
2789 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2791 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2793 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2794 xop0 = copy_to_mode_reg (mode0, xop0);
2796 if (!insn_data[icode].operand[0].predicate (temp, mode))
2797 temp = gen_reg_rtx (mode);
2799 pat = GEN_FCN (icode) (temp, xop0);
2802 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
2803 && ! add_equal_note (pat, temp, unoptab->code, xop0, NULL_RTX))
2805 delete_insns_since (last);
2806 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
2814 delete_insns_since (last);
2817 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2819 /* Widening clz needs special treatment. */
2820 if (unoptab == clz_optab)
2822 temp = widen_clz (mode, op0, target);
2829 /* Widening (or narrowing) bswap needs special treatment. */
2830 if (unoptab == bswap_optab)
2832 temp = widen_bswap (mode, op0, target);
2836 if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
2837 && optab_handler (unoptab, word_mode)->insn_code != CODE_FOR_nothing)
2839 temp = expand_doubleword_bswap (mode, op0, target);
2847 if (CLASS_HAS_WIDER_MODES_P (class))
2848 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2849 wider_mode != VOIDmode;
2850 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2852 if (optab_handler (unoptab, wider_mode)->insn_code != CODE_FOR_nothing)
2856 /* For certain operations, we need not actually extend
2857 the narrow operand, as long as we will truncate the
2858 results to the same narrowness. */
2860 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2861 (unoptab == neg_optab
2862 || unoptab == one_cmpl_optab)
2863 && class == MODE_INT);
2865 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2870 if (class != MODE_INT
2871 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
2872 GET_MODE_BITSIZE (wider_mode)))
2875 target = gen_reg_rtx (mode);
2876 convert_move (target, temp, 0);
2880 return gen_lowpart (mode, temp);
2883 delete_insns_since (last);
2887 /* These can be done a word at a time. */
2888 if (unoptab == one_cmpl_optab
2889 && class == MODE_INT
2890 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
2891 && optab_handler (unoptab, word_mode)->insn_code != CODE_FOR_nothing)
2896 if (target == 0 || target == op0)
2897 target = gen_reg_rtx (mode);
2901 /* Do the actual arithmetic. */
2902 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
2904 rtx target_piece = operand_subword (target, i, 1, mode);
2905 rtx x = expand_unop (word_mode, unoptab,
2906 operand_subword_force (op0, i, mode),
2907 target_piece, unsignedp);
2909 if (target_piece != x)
2910 emit_move_insn (target_piece, x);
2913 insns = get_insns ();
2916 emit_no_conflict_block (insns, target, op0, NULL_RTX,
2917 gen_rtx_fmt_e (unoptab->code, mode,
2922 if (unoptab->code == NEG)
2924 /* Try negating floating point values by flipping the sign bit. */
2925 if (SCALAR_FLOAT_MODE_P (mode))
2927 temp = expand_absneg_bit (NEG, mode, op0, target);
2932 /* If there is no negation pattern, and we have no negative zero,
2933 try subtracting from zero. */
2934 if (!HONOR_SIGNED_ZEROS (mode))
2936 temp = expand_binop (mode, (unoptab == negv_optab
2937 ? subv_optab : sub_optab),
2938 CONST0_RTX (mode), op0, target,
2939 unsignedp, OPTAB_DIRECT);
2945 /* Try calculating parity (x) as popcount (x) % 2. */
2946 if (unoptab == parity_optab)
2948 temp = expand_parity (mode, op0, target);
2953 /* Try implementing ffs (x) in terms of clz (x). */
2954 if (unoptab == ffs_optab)
2956 temp = expand_ffs (mode, op0, target);
2961 /* Try implementing ctz (x) in terms of clz (x). */
2962 if (unoptab == ctz_optab)
2964 temp = expand_ctz (mode, op0, target);
2970 /* Now try a library call in this mode. */
2971 if (optab_handler (unoptab, mode)->libfunc)
2975 enum machine_mode outmode = mode;
2977 /* All of these functions return small values. Thus we choose to
2978 have them return something that isn't a double-word. */
2979 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
2980 || unoptab == popcount_optab || unoptab == parity_optab)
2982 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node)));
2986 /* Pass 1 for NO_QUEUE so we don't lose any increments
2987 if the libcall is cse'd or moved. */
2988 value = emit_library_call_value (optab_handler (unoptab, mode)->libfunc,
2989 NULL_RTX, LCT_CONST, outmode,
2991 insns = get_insns ();
2994 target = gen_reg_rtx (outmode);
2995 emit_libcall_block (insns, target, value,
2996 gen_rtx_fmt_e (unoptab->code, outmode, op0));
3001 /* It can't be done in this mode. Can we do it in a wider mode? */
3003 if (CLASS_HAS_WIDER_MODES_P (class))
3005 for (wider_mode = GET_MODE_WIDER_MODE (mode);
3006 wider_mode != VOIDmode;
3007 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
3009 if ((optab_handler (unoptab, wider_mode)->insn_code
3010 != CODE_FOR_nothing)
3011 || optab_handler (unoptab, wider_mode)->libfunc)
3015 /* For certain operations, we need not actually extend
3016 the narrow operand, as long as we will truncate the
3017 results to the same narrowness. */
3019 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
3020 (unoptab == neg_optab
3021 || unoptab == one_cmpl_optab)
3022 && class == MODE_INT);
3024 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
3027 /* If we are generating clz using wider mode, adjust the
3029 if (unoptab == clz_optab && temp != 0)
3030 temp = expand_binop (wider_mode, sub_optab, temp,
3031 GEN_INT (GET_MODE_BITSIZE (wider_mode)
3032 - GET_MODE_BITSIZE (mode)),
3033 target, true, OPTAB_DIRECT);
3037 if (class != MODE_INT)
3040 target = gen_reg_rtx (mode);
3041 convert_move (target, temp, 0);
3045 return gen_lowpart (mode, temp);
3048 delete_insns_since (last);
3053 /* One final attempt at implementing negation via subtraction,
3054 this time allowing widening of the operand. */
3055 if (unoptab->code == NEG && !HONOR_SIGNED_ZEROS (mode))
3058 temp = expand_binop (mode,
3059 unoptab == negv_optab ? subv_optab : sub_optab,
3060 CONST0_RTX (mode), op0,
3061 target, unsignedp, OPTAB_LIB_WIDEN);
3069 /* Emit code to compute the absolute value of OP0, with result to
3070 TARGET if convenient. (TARGET may be 0.) The return value says
3071 where the result actually is to be found.
3073 MODE is the mode of the operand; the mode of the result is
3074 different but can be deduced from MODE.
3079 expand_abs_nojump (enum machine_mode mode, rtx op0, rtx target,
3080 int result_unsignedp)
3085 result_unsignedp = 1;
3087 /* First try to do it with a special abs instruction. */
3088 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
3093 /* For floating point modes, try clearing the sign bit. */
3094 if (SCALAR_FLOAT_MODE_P (mode))
3096 temp = expand_absneg_bit (ABS, mode, op0, target);
3101 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3102 if (optab_handler (smax_optab, mode)->insn_code != CODE_FOR_nothing
3103 && !HONOR_SIGNED_ZEROS (mode))
3105 rtx last = get_last_insn ();
3107 temp = expand_unop (mode, neg_optab, op0, NULL_RTX, 0);
3109 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3115 delete_insns_since (last);
3118 /* If this machine has expensive jumps, we can do integer absolute
3119 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3120 where W is the width of MODE. */
3122 if (GET_MODE_CLASS (mode) == MODE_INT && BRANCH_COST >= 2)
3124 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
3125 size_int (GET_MODE_BITSIZE (mode) - 1),
3128 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
3131 temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab,
3132 temp, extended, target, 0, OPTAB_LIB_WIDEN);
3142 expand_abs (enum machine_mode mode, rtx op0, rtx target,
3143 int result_unsignedp, int safe)
3148 result_unsignedp = 1;
3150 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
3154 /* If that does not win, use conditional jump and negate. */
3156 /* It is safe to use the target if it is the same
3157 as the source if this is also a pseudo register */
3158 if (op0 == target && REG_P (op0)
3159 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
3162 op1 = gen_label_rtx ();
3163 if (target == 0 || ! safe
3164 || GET_MODE (target) != mode
3165 || (MEM_P (target) && MEM_VOLATILE_P (target))
3167 && REGNO (target) < FIRST_PSEUDO_REGISTER))
3168 target = gen_reg_rtx (mode);
3170 emit_move_insn (target, op0);
3173 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
3174 NULL_RTX, NULL_RTX, op1);
3176 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3179 emit_move_insn (target, op0);
3185 /* A subroutine of expand_copysign, perform the copysign operation using the
3186 abs and neg primitives advertised to exist on the target. The assumption
3187 is that we have a split register file, and leaving op0 in fp registers,
3188 and not playing with subregs so much, will help the register allocator. */
3191 expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3192 int bitpos, bool op0_is_abs)
3194 enum machine_mode imode;
3201 /* Check if the back end provides an insn that handles signbit for the
3203 icode = (int) signbit_optab->handlers [(int) mode].insn_code;
3204 if (icode != CODE_FOR_nothing)
3206 imode = insn_data[icode].operand[0].mode;
3207 sign = gen_reg_rtx (imode);
3208 emit_unop_insn (icode, sign, op1, UNKNOWN);
3212 HOST_WIDE_INT hi, lo;
3214 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3216 imode = int_mode_for_mode (mode);
3217 if (imode == BLKmode)
3219 op1 = gen_lowpart (imode, op1);
3226 if (FLOAT_WORDS_BIG_ENDIAN)
3227 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3229 word = bitpos / BITS_PER_WORD;
3230 bitpos = bitpos % BITS_PER_WORD;
3231 op1 = operand_subword_force (op1, word, mode);
3234 if (bitpos < HOST_BITS_PER_WIDE_INT)
3237 lo = (HOST_WIDE_INT) 1 << bitpos;
3241 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
3245 sign = gen_reg_rtx (imode);
3246 sign = expand_binop (imode, and_optab, op1,
3247 immed_double_const (lo, hi, imode),
3248 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3253 op0 = expand_unop (mode, abs_optab, op0, target, 0);
3260 if (target == NULL_RTX)
3261 target = copy_to_reg (op0);
3263 emit_move_insn (target, op0);
3266 label = gen_label_rtx ();
3267 emit_cmp_and_jump_insns (sign, const0_rtx, EQ, NULL_RTX, imode, 1, label);
3269 if (GET_CODE (op0) == CONST_DOUBLE)
3270 op0 = simplify_unary_operation (NEG, mode, op0, mode);
3272 op0 = expand_unop (mode, neg_optab, op0, target, 0);
3274 emit_move_insn (target, op0);
3282 /* A subroutine of expand_copysign, perform the entire copysign operation
3283 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3284 is true if op0 is known to have its sign bit clear. */
3287 expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3288 int bitpos, bool op0_is_abs)
3290 enum machine_mode imode;
3291 HOST_WIDE_INT hi, lo;
3292 int word, nwords, i;
3295 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3297 imode = int_mode_for_mode (mode);
3298 if (imode == BLKmode)
3307 if (FLOAT_WORDS_BIG_ENDIAN)
3308 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3310 word = bitpos / BITS_PER_WORD;
3311 bitpos = bitpos % BITS_PER_WORD;
3312 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
3315 if (bitpos < HOST_BITS_PER_WIDE_INT)
3318 lo = (HOST_WIDE_INT) 1 << bitpos;
3322 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
3326 if (target == 0 || target == op0 || target == op1)
3327 target = gen_reg_rtx (mode);
3333 for (i = 0; i < nwords; ++i)
3335 rtx targ_piece = operand_subword (target, i, 1, mode);
3336 rtx op0_piece = operand_subword_force (op0, i, mode);
3341 op0_piece = expand_binop (imode, and_optab, op0_piece,
3342 immed_double_const (~lo, ~hi, imode),
3343 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3345 op1 = expand_binop (imode, and_optab,
3346 operand_subword_force (op1, i, mode),
3347 immed_double_const (lo, hi, imode),
3348 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3350 temp = expand_binop (imode, ior_optab, op0_piece, op1,
3351 targ_piece, 1, OPTAB_LIB_WIDEN);
3352 if (temp != targ_piece)
3353 emit_move_insn (targ_piece, temp);
3356 emit_move_insn (targ_piece, op0_piece);
3359 insns = get_insns ();
3362 emit_no_conflict_block (insns, target, op0, op1, NULL_RTX);
3366 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
3367 immed_double_const (lo, hi, imode),
3368 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3370 op0 = gen_lowpart (imode, op0);
3372 op0 = expand_binop (imode, and_optab, op0,
3373 immed_double_const (~lo, ~hi, imode),
3374 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3376 temp = expand_binop (imode, ior_optab, op0, op1,
3377 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3378 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3384 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3385 scalar floating point mode. Return NULL if we do not know how to
3386 expand the operation inline. */
3389 expand_copysign (rtx op0, rtx op1, rtx target)
3391 enum machine_mode mode = GET_MODE (op0);
3392 const struct real_format *fmt;
3396 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3397 gcc_assert (GET_MODE (op1) == mode);
3399 /* First try to do it with a special instruction. */
3400 temp = expand_binop (mode, copysign_optab, op0, op1,
3401 target, 0, OPTAB_DIRECT);
3405 fmt = REAL_MODE_FORMAT (mode);
3406 if (fmt == NULL || !fmt->has_signed_zero)
3410 if (GET_CODE (op0) == CONST_DOUBLE)
3412 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
3413 op0 = simplify_unary_operation (ABS, mode, op0, mode);
3417 if (fmt->signbit_ro >= 0
3418 && (GET_CODE (op0) == CONST_DOUBLE
3419 || (optab_handler (neg_optab, mode)->insn_code != CODE_FOR_nothing
3420 && optab_handler (abs_optab, mode)->insn_code != CODE_FOR_nothing)))
3422 temp = expand_copysign_absneg (mode, op0, op1, target,
3423 fmt->signbit_ro, op0_is_abs);
3428 if (fmt->signbit_rw < 0)
3430 return expand_copysign_bit (mode, op0, op1, target,
3431 fmt->signbit_rw, op0_is_abs);
3434 /* Generate an instruction whose insn-code is INSN_CODE,
3435 with two operands: an output TARGET and an input OP0.
3436 TARGET *must* be nonzero, and the output is always stored there.
3437 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3438 the value that is stored into TARGET. */
3441 emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code)
3444 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
3449 /* Now, if insn does not accept our operands, put them into pseudos. */
3451 if (!insn_data[icode].operand[1].predicate (op0, mode0))
3452 op0 = copy_to_mode_reg (mode0, op0);
3454 if (!insn_data[icode].operand[0].predicate (temp, GET_MODE (temp)))
3455 temp = gen_reg_rtx (GET_MODE (temp));
3457 pat = GEN_FCN (icode) (temp, op0);
3459 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && code != UNKNOWN)
3460 add_equal_note (pat, temp, code, op0, NULL_RTX);
3465 emit_move_insn (target, temp);
3468 struct no_conflict_data
3470 rtx target, first, insn;
3474 /* Called via note_stores by emit_no_conflict_block and emit_libcall_block.
3475 Set P->must_stay if the currently examined clobber / store has to stay
3476 in the list of insns that constitute the actual no_conflict block /
3479 no_conflict_move_test (rtx dest, const_rtx set, void *p0)
3481 struct no_conflict_data *p= p0;
3483 /* If this inns directly contributes to setting the target, it must stay. */
3484 if (reg_overlap_mentioned_p (p->target, dest))
3485 p->must_stay = true;
3486 /* If we haven't committed to keeping any other insns in the list yet,
3487 there is nothing more to check. */
3488 else if (p->insn == p->first)
3490 /* If this insn sets / clobbers a register that feeds one of the insns
3491 already in the list, this insn has to stay too. */
3492 else if (reg_overlap_mentioned_p (dest, PATTERN (p->first))
3493 || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest)))
3494 || reg_used_between_p (dest, p->first, p->insn)
3495 /* Likewise if this insn depends on a register set by a previous
3496 insn in the list, or if it sets a result (presumably a hard
3497 register) that is set or clobbered by a previous insn.
3498 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3499 SET_DEST perform the former check on the address, and the latter
3500 check on the MEM. */
3501 || (GET_CODE (set) == SET
3502 && (modified_in_p (SET_SRC (set), p->first)
3503 || modified_in_p (SET_DEST (set), p->first)
3504 || modified_between_p (SET_SRC (set), p->first, p->insn)
3505 || modified_between_p (SET_DEST (set), p->first, p->insn))))
3506 p->must_stay = true;
3509 /* Encapsulate the block starting at FIRST and ending with LAST, which is
3510 logically equivalent to EQUIV, so it gets manipulated as a unit if it
3511 is possible to do so. */
3514 maybe_encapsulate_block (rtx first, rtx last, rtx equiv)
3516 if (!flag_non_call_exceptions || !may_trap_p (equiv))
3518 /* We can't attach the REG_LIBCALL and REG_RETVAL notes when the
3519 encapsulated region would not be in one basic block, i.e. when
3520 there is a control_flow_insn_p insn between FIRST and LAST. */
3521 bool attach_libcall_retval_notes = true;
3522 rtx insn, next = NEXT_INSN (last);
3524 for (insn = first; insn != next; insn = NEXT_INSN (insn))
3525 if (control_flow_insn_p (insn))
3527 attach_libcall_retval_notes = false;
3531 if (attach_libcall_retval_notes)
3533 REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
3535 REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first,
3537 next = NEXT_INSN (last);
3538 for (insn = first; insn != next; insn = NEXT_INSN (insn))
3539 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_LIBCALL_ID,
3540 GEN_INT (libcall_id),
3547 /* Emit code to perform a series of operations on a multi-word quantity, one
3550 Such a block is preceded by a CLOBBER of the output, consists of multiple
3551 insns, each setting one word of the output, and followed by a SET copying
3552 the output to itself.
3554 Each of the insns setting words of the output receives a REG_NO_CONFLICT
3555 note indicating that it doesn't conflict with the (also multi-word)
3556 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
3559 INSNS is a block of code generated to perform the operation, not including
3560 the CLOBBER and final copy. All insns that compute intermediate values
3561 are first emitted, followed by the block as described above.
3563 TARGET, OP0, and OP1 are the output and inputs of the operations,
3564 respectively. OP1 may be zero for a unary operation.
3566 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
3569 If TARGET is not a register, INSNS is simply emitted with no special
3570 processing. Likewise if anything in INSNS is not an INSN or if
3571 there is a libcall block inside INSNS.
3573 The final insn emitted is returned. */
3576 emit_no_conflict_block (rtx insns, rtx target, rtx op0, rtx op1, rtx equiv)
3578 rtx prev, next, first, last, insn;
3580 if (!REG_P (target) || reload_in_progress)
3581 return emit_insn (insns);
3583 for (insn = insns; insn; insn = NEXT_INSN (insn))
3584 if (!NONJUMP_INSN_P (insn)
3585 || find_reg_note (insn, REG_LIBCALL, NULL_RTX))
3586 return emit_insn (insns);
3588 /* First emit all insns that do not store into words of the output and remove
3589 these from the list. */
3590 for (insn = insns; insn; insn = next)
3593 struct no_conflict_data data;
3595 next = NEXT_INSN (insn);
3597 /* Some ports (cris) create a libcall regions at their own. We must
3598 avoid any potential nesting of LIBCALLs. */
3599 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3600 remove_note (insn, note);
3601 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3602 remove_note (insn, note);
3603 if ((note = find_reg_note (insn, REG_LIBCALL_ID, NULL)) != NULL)
3604 remove_note (insn, note);
3606 data.target = target;
3610 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3611 if (! data.must_stay)
3613 if (PREV_INSN (insn))
3614 NEXT_INSN (PREV_INSN (insn)) = next;
3619 PREV_INSN (next) = PREV_INSN (insn);
3625 prev = get_last_insn ();
3627 /* Now write the CLOBBER of the output, followed by the setting of each
3628 of the words, followed by the final copy. */
3629 if (target != op0 && target != op1)
3630 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
3632 for (insn = insns; insn; insn = next)
3634 next = NEXT_INSN (insn);
3637 if (op1 && REG_P (op1))
3638 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op1,
3641 if (op0 && REG_P (op0))
3642 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op0,
3646 if (optab_handler (mov_optab, GET_MODE (target))->insn_code
3647 != CODE_FOR_nothing)
3649 last = emit_move_insn (target, target);
3651 set_unique_reg_note (last, REG_EQUAL, equiv);
3655 last = get_last_insn ();
3657 /* Remove any existing REG_EQUAL note from "last", or else it will
3658 be mistaken for a note referring to the full contents of the
3659 alleged libcall value when found together with the REG_RETVAL
3660 note added below. An existing note can come from an insn
3661 expansion at "last". */
3662 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3666 first = get_insns ();
3668 first = NEXT_INSN (prev);
3670 maybe_encapsulate_block (first, last, equiv);
3675 /* Emit code to make a call to a constant function or a library call.
3677 INSNS is a list containing all insns emitted in the call.
3678 These insns leave the result in RESULT. Our block is to copy RESULT
3679 to TARGET, which is logically equivalent to EQUIV.
3681 We first emit any insns that set a pseudo on the assumption that these are
3682 loading constants into registers; doing so allows them to be safely cse'ed
3683 between blocks. Then we emit all the other insns in the block, followed by
3684 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3685 note with an operand of EQUIV.
3687 Moving assignments to pseudos outside of the block is done to improve
3688 the generated code, but is not required to generate correct code,
3689 hence being unable to move an assignment is not grounds for not making
3690 a libcall block. There are two reasons why it is safe to leave these
3691 insns inside the block: First, we know that these pseudos cannot be
3692 used in generated RTL outside the block since they are created for
3693 temporary purposes within the block. Second, CSE will not record the
3694 values of anything set inside a libcall block, so we know they must
3695 be dead at the end of the block.
3697 Except for the first group of insns (the ones setting pseudos), the
3698 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3700 emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv)
3702 rtx final_dest = target;
3703 rtx prev, next, first, last, insn;
3705 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3706 into a MEM later. Protect the libcall block from this change. */
3707 if (! REG_P (target) || REG_USERVAR_P (target))
3708 target = gen_reg_rtx (GET_MODE (target));
3710 /* If we're using non-call exceptions, a libcall corresponding to an
3711 operation that may trap may also trap. */
3712 if (flag_non_call_exceptions && may_trap_p (equiv))
3714 for (insn = insns; insn; insn = NEXT_INSN (insn))
3717 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3719 if (note != 0 && INTVAL (XEXP (note, 0)) <= 0)
3720 remove_note (insn, note);
3724 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3725 reg note to indicate that this call cannot throw or execute a nonlocal
3726 goto (unless there is already a REG_EH_REGION note, in which case
3728 for (insn = insns; insn; insn = NEXT_INSN (insn))
3731 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3734 XEXP (note, 0) = constm1_rtx;
3736 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx,
3740 /* First emit all insns that set pseudos. Remove them from the list as
3741 we go. Avoid insns that set pseudos which were referenced in previous
3742 insns. These can be generated by move_by_pieces, for example,
3743 to update an address. Similarly, avoid insns that reference things
3744 set in previous insns. */
3746 for (insn = insns; insn; insn = next)
3748 rtx set = single_set (insn);
3751 /* Some ports (cris) create a libcall regions at their own. We must
3752 avoid any potential nesting of LIBCALLs. */
3753 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3754 remove_note (insn, note);
3755 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3756 remove_note (insn, note);
3757 if ((note = find_reg_note (insn, REG_LIBCALL_ID, NULL)) != NULL)
3758 remove_note (insn, note);
3760 next = NEXT_INSN (insn);
3762 if (set != 0 && REG_P (SET_DEST (set))
3763 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3765 struct no_conflict_data data;
3767 data.target = const0_rtx;
3771 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3772 if (! data.must_stay)
3774 if (PREV_INSN (insn))
3775 NEXT_INSN (PREV_INSN (insn)) = next;
3780 PREV_INSN (next) = PREV_INSN (insn);
3786 /* Some ports use a loop to copy large arguments onto the stack.
3787 Don't move anything outside such a loop. */
3792 prev = get_last_insn ();
3794 /* Write the remaining insns followed by the final copy. */
3796 for (insn = insns; insn; insn = next)
3798 next = NEXT_INSN (insn);
3803 last = emit_move_insn (target, result);
3804 if (optab_handler (mov_optab, GET_MODE (target))->insn_code
3805 != CODE_FOR_nothing)
3806 set_unique_reg_note (last, REG_EQUAL, copy_rtx (equiv));
3809 /* Remove any existing REG_EQUAL note from "last", or else it will
3810 be mistaken for a note referring to the full contents of the
3811 libcall value when found together with the REG_RETVAL note added
3812 below. An existing note can come from an insn expansion at
3814 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3817 if (final_dest != target)
3818 emit_move_insn (final_dest, target);
3821 first = get_insns ();
3823 first = NEXT_INSN (prev);
3825 maybe_encapsulate_block (first, last, equiv);
3828 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3829 PURPOSE describes how this comparison will be used. CODE is the rtx
3830 comparison code we will be using.
3832 ??? Actually, CODE is slightly weaker than that. A target is still
3833 required to implement all of the normal bcc operations, but not
3834 required to implement all (or any) of the unordered bcc operations. */
3837 can_compare_p (enum rtx_code code, enum machine_mode mode,
3838 enum can_compare_purpose purpose)
3842 if (optab_handler (cmp_optab, mode)->insn_code != CODE_FOR_nothing)
3844 if (purpose == ccp_jump)
3845 return bcc_gen_fctn[(int) code] != NULL;
3846 else if (purpose == ccp_store_flag)
3847 return setcc_gen_code[(int) code] != CODE_FOR_nothing;
3849 /* There's only one cmov entry point, and it's allowed to fail. */
3852 if (purpose == ccp_jump
3853 && optab_handler (cbranch_optab, mode)->insn_code != CODE_FOR_nothing)
3855 if (purpose == ccp_cmov
3856 && optab_handler (cmov_optab, mode)->insn_code != CODE_FOR_nothing)
3858 if (purpose == ccp_store_flag
3859 && optab_handler (cstore_optab, mode)->insn_code != CODE_FOR_nothing)
3861 mode = GET_MODE_WIDER_MODE (mode);
3863 while (mode != VOIDmode);
3868 /* This function is called when we are going to emit a compare instruction that
3869 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3871 *PMODE is the mode of the inputs (in case they are const_int).
3872 *PUNSIGNEDP nonzero says that the operands are unsigned;
3873 this matters if they need to be widened.
3875 If they have mode BLKmode, then SIZE specifies the size of both operands.
3877 This function performs all the setup necessary so that the caller only has
3878 to emit a single comparison insn. This setup can involve doing a BLKmode
3879 comparison or emitting a library call to perform the comparison if no insn
3880 is available to handle it.
3881 The values which are passed in through pointers can be modified; the caller
3882 should perform the comparison on the modified values. Constant
3883 comparisons must have already been folded. */
3886 prepare_cmp_insn (rtx *px, rtx *py, enum rtx_code *pcomparison, rtx size,
3887 enum machine_mode *pmode, int *punsignedp,
3888 enum can_compare_purpose purpose)
3890 enum machine_mode mode = *pmode;
3891 rtx x = *px, y = *py;
3892 int unsignedp = *punsignedp;
3894 /* If we are inside an appropriately-short loop and we are optimizing,
3895 force expensive constants into a register. */
3896 if (CONSTANT_P (x) && optimize
3897 && rtx_cost (x, COMPARE) > COSTS_N_INSNS (1))
3898 x = force_reg (mode, x);
3900 if (CONSTANT_P (y) && optimize
3901 && rtx_cost (y, COMPARE) > COSTS_N_INSNS (1))
3902 y = force_reg (mode, y);
3905 /* Make sure if we have a canonical comparison. The RTL
3906 documentation states that canonical comparisons are required only
3907 for targets which have cc0. */
3908 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
3911 /* Don't let both operands fail to indicate the mode. */
3912 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
3913 x = force_reg (mode, x);
3915 /* Handle all BLKmode compares. */
3917 if (mode == BLKmode)
3919 enum machine_mode cmp_mode, result_mode;
3920 enum insn_code cmp_code;
3925 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
3929 /* Try to use a memory block compare insn - either cmpstr
3930 or cmpmem will do. */
3931 for (cmp_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
3932 cmp_mode != VOIDmode;
3933 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode))
3935 cmp_code = cmpmem_optab[cmp_mode];
3936 if (cmp_code == CODE_FOR_nothing)
3937 cmp_code = cmpstr_optab[cmp_mode];
3938 if (cmp_code == CODE_FOR_nothing)
3939 cmp_code = cmpstrn_optab[cmp_mode];
3940 if (cmp_code == CODE_FOR_nothing)
3943 /* Must make sure the size fits the insn's mode. */
3944 if ((GET_CODE (size) == CONST_INT
3945 && INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode)))
3946 || (GET_MODE_BITSIZE (GET_MODE (size))
3947 > GET_MODE_BITSIZE (cmp_mode)))
3950 result_mode = insn_data[cmp_code].operand[0].mode;
3951 result = gen_reg_rtx (result_mode);
3952 size = convert_to_mode (cmp_mode, size, 1);
3953 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
3957 *pmode = result_mode;
3961 /* Otherwise call a library function, memcmp. */
3962 libfunc = memcmp_libfunc;
3963 length_type = sizetype;
3964 result_mode = TYPE_MODE (integer_type_node);
3965 cmp_mode = TYPE_MODE (length_type);
3966 size = convert_to_mode (TYPE_MODE (length_type), size,
3967 TYPE_UNSIGNED (length_type));
3969 result = emit_library_call_value (libfunc, 0, LCT_PURE_MAKE_BLOCK,
3976 *pmode = result_mode;
3980 /* Don't allow operands to the compare to trap, as that can put the
3981 compare and branch in different basic blocks. */
3982 if (flag_non_call_exceptions)
3985 x = force_reg (mode, x);
3987 y = force_reg (mode, y);
3992 if (can_compare_p (*pcomparison, mode, purpose))
3995 /* Handle a lib call just for the mode we are using. */
3997 if (optab_handler (cmp_optab, mode)->libfunc && !SCALAR_FLOAT_MODE_P (mode))
3999 rtx libfunc = optab_handler (cmp_optab, mode)->libfunc;
4002 /* If we want unsigned, and this mode has a distinct unsigned
4003 comparison routine, use that. */
4004 if (unsignedp && optab_handler (ucmp_optab, mode)->libfunc)
4005 libfunc = optab_handler (ucmp_optab, mode)->libfunc;
4007 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST_MAKE_BLOCK,
4008 targetm.libgcc_cmp_return_mode (),
4009 2, x, mode, y, mode);
4011 /* There are two kinds of comparison routines. Biased routines
4012 return 0/1/2, and unbiased routines return -1/0/1. Other parts
4013 of gcc expect that the comparison operation is equivalent
4014 to the modified comparison. For signed comparisons compare the
4015 result against 1 in the biased case, and zero in the unbiased
4016 case. For unsigned comparisons always compare against 1 after
4017 biasing the unbiased result by adding 1. This gives us a way to
4023 if (!TARGET_LIB_INT_CMP_BIASED)
4026 *px = plus_constant (result, 1);
4033 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
4034 prepare_float_lib_cmp (px, py, pcomparison, pmode, punsignedp);
4037 /* Before emitting an insn with code ICODE, make sure that X, which is going
4038 to be used for operand OPNUM of the insn, is converted from mode MODE to
4039 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
4040 that it is accepted by the operand predicate. Return the new value. */
4043 prepare_operand (int icode, rtx x, int opnum, enum machine_mode mode,
4044 enum machine_mode wider_mode, int unsignedp)
4046 if (mode != wider_mode)
4047 x = convert_modes (wider_mode, mode, x, unsignedp);
4049 if (!insn_data[icode].operand[opnum].predicate
4050 (x, insn_data[icode].operand[opnum].mode))
4052 if (reload_completed)
4054 x = copy_to_mode_reg (insn_data[icode].operand[opnum].mode, x);
4060 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
4061 we can do the comparison.
4062 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
4063 be NULL_RTX which indicates that only a comparison is to be generated. */
4066 emit_cmp_and_jump_insn_1 (rtx x, rtx y, enum machine_mode mode,
4067 enum rtx_code comparison, int unsignedp, rtx label)
4069 rtx test = gen_rtx_fmt_ee (comparison, mode, x, y);
4070 enum mode_class class = GET_MODE_CLASS (mode);
4071 enum machine_mode wider_mode = mode;
4073 /* Try combined insns first. */
4076 enum insn_code icode;
4077 PUT_MODE (test, wider_mode);
4081 icode = optab_handler (cbranch_optab, wider_mode)->insn_code;
4083 if (icode != CODE_FOR_nothing
4084 && insn_data[icode].operand[0].predicate (test, wider_mode))
4086 x = prepare_operand (icode, x, 1, mode, wider_mode, unsignedp);
4087 y = prepare_operand (icode, y, 2, mode, wider_mode, unsignedp);
4088 emit_jump_insn (GEN_FCN (icode) (test, x, y, label));
4093 /* Handle some compares against zero. */
4094 icode = (int) optab_handler (tst_optab, wider_mode)->insn_code;
4095 if (y == CONST0_RTX (mode) && icode != CODE_FOR_nothing)
4097 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
4098 emit_insn (GEN_FCN (icode) (x));
4100 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
4104 /* Handle compares for which there is a directly suitable insn. */
4106 icode = (int) optab_handler (cmp_optab, wider_mode)->insn_code;
4107 if (icode != CODE_FOR_nothing)
4109 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
4110 y = prepare_operand (icode, y, 1, mode, wider_mode, unsignedp);
4111 emit_insn (GEN_FCN (icode) (x, y));
4113 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
4117 if (!CLASS_HAS_WIDER_MODES_P (class))
4120 wider_mode = GET_MODE_WIDER_MODE (wider_mode);
4122 while (wider_mode != VOIDmode);
4127 /* Generate code to compare X with Y so that the condition codes are
4128 set and to jump to LABEL if the condition is true. If X is a
4129 constant and Y is not a constant, then the comparison is swapped to
4130 ensure that the comparison RTL has the canonical form.
4132 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
4133 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
4134 the proper branch condition code.
4136 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4138 MODE is the mode of the inputs (in case they are const_int).
4140 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
4141 be passed unchanged to emit_cmp_insn, then potentially converted into an
4142 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
4145 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
4146 enum machine_mode mode, int unsignedp, rtx label)
4148 rtx op0 = x, op1 = y;
4150 /* Swap operands and condition to ensure canonical RTL. */
4151 if (swap_commutative_operands_p (x, y))
4153 /* If we're not emitting a branch, callers are required to pass
4154 operands in an order conforming to canonical RTL. We relax this
4155 for commutative comparisons so callers using EQ don't need to do
4156 swapping by hand. */
4157 gcc_assert (label || (comparison == swap_condition (comparison)));
4160 comparison = swap_condition (comparison);
4164 /* If OP0 is still a constant, then both X and Y must be constants.
4165 Force X into a register to create canonical RTL. */
4166 if (CONSTANT_P (op0))
4167 op0 = force_reg (mode, op0);
4171 comparison = unsigned_condition (comparison);
4173 prepare_cmp_insn (&op0, &op1, &comparison, size, &mode, &unsignedp,
4175 emit_cmp_and_jump_insn_1 (op0, op1, mode, comparison, unsignedp, label);
4178 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
4181 emit_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
4182 enum machine_mode mode, int unsignedp)
4184 emit_cmp_and_jump_insns (x, y, comparison, size, mode, unsignedp, 0);
4187 /* Emit a library call comparison between floating point X and Y.
4188 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4191 prepare_float_lib_cmp (rtx *px, rtx *py, enum rtx_code *pcomparison,
4192 enum machine_mode *pmode, int *punsignedp)
4194 enum rtx_code comparison = *pcomparison;
4195 enum rtx_code swapped = swap_condition (comparison);
4196 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
4199 enum machine_mode orig_mode = GET_MODE (x);
4200 enum machine_mode mode;
4201 rtx value, target, insns, equiv;
4203 bool reversed_p = false;
4205 for (mode = orig_mode;
4207 mode = GET_MODE_WIDER_MODE (mode))
4209 if ((libfunc = optab_handler (code_to_optab[comparison], mode)->libfunc))
4212 if ((libfunc = optab_handler (code_to_optab[swapped], mode)->libfunc))
4215 tmp = x; x = y; y = tmp;
4216 comparison = swapped;
4220 if ((libfunc = optab_handler (code_to_optab[reversed], mode)->libfunc)
4221 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, reversed))
4223 comparison = reversed;
4229 gcc_assert (mode != VOIDmode);
4231 if (mode != orig_mode)
4233 x = convert_to_mode (mode, x, 0);
4234 y = convert_to_mode (mode, y, 0);
4237 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4238 the RTL. The allows the RTL optimizers to delete the libcall if the
4239 condition can be determined at compile-time. */
4240 if (comparison == UNORDERED)
4242 rtx temp = simplify_gen_relational (NE, word_mode, mode, x, x);
4243 equiv = simplify_gen_relational (NE, word_mode, mode, y, y);
4244 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
4245 temp, const_true_rtx, equiv);
4249 equiv = simplify_gen_relational (comparison, word_mode, mode, x, y);
4250 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4252 rtx true_rtx, false_rtx;
4257 true_rtx = const0_rtx;
4258 false_rtx = const_true_rtx;
4262 true_rtx = const_true_rtx;
4263 false_rtx = const0_rtx;
4267 true_rtx = const1_rtx;
4268 false_rtx = const0_rtx;
4272 true_rtx = const0_rtx;
4273 false_rtx = constm1_rtx;
4277 true_rtx = constm1_rtx;
4278 false_rtx = const0_rtx;
4282 true_rtx = const0_rtx;
4283 false_rtx = const1_rtx;
4289 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
4290 equiv, true_rtx, false_rtx);
4295 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4296 word_mode, 2, x, mode, y, mode);
4297 insns = get_insns ();
4300 target = gen_reg_rtx (word_mode);
4301 emit_libcall_block (insns, target, value, equiv);
4303 if (comparison == UNORDERED
4304 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4305 comparison = reversed_p ? EQ : NE;
4310 *pcomparison = comparison;
4314 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4317 emit_indirect_jump (rtx loc)
4319 if (!insn_data[(int) CODE_FOR_indirect_jump].operand[0].predicate
4321 loc = copy_to_mode_reg (Pmode, loc);
4323 emit_jump_insn (gen_indirect_jump (loc));
4327 #ifdef HAVE_conditional_move
4329 /* Emit a conditional move instruction if the machine supports one for that
4330 condition and machine mode.
4332 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4333 the mode to use should they be constants. If it is VOIDmode, they cannot
4336 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4337 should be stored there. MODE is the mode to use should they be constants.
4338 If it is VOIDmode, they cannot both be constants.
4340 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4341 is not supported. */
4344 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
4345 enum machine_mode cmode, rtx op2, rtx op3,
4346 enum machine_mode mode, int unsignedp)
4348 rtx tem, subtarget, comparison, insn;
4349 enum insn_code icode;
4350 enum rtx_code reversed;
4352 /* If one operand is constant, make it the second one. Only do this
4353 if the other operand is not constant as well. */
4355 if (swap_commutative_operands_p (op0, op1))
4360 code = swap_condition (code);
4363 /* get_condition will prefer to generate LT and GT even if the old
4364 comparison was against zero, so undo that canonicalization here since
4365 comparisons against zero are cheaper. */
4366 if (code == LT && op1 == const1_rtx)
4367 code = LE, op1 = const0_rtx;
4368 else if (code == GT && op1 == constm1_rtx)
4369 code = GE, op1 = const0_rtx;
4371 if (cmode == VOIDmode)
4372 cmode = GET_MODE (op0);
4374 if (swap_commutative_operands_p (op2, op3)
4375 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4384 if (mode == VOIDmode)
4385 mode = GET_MODE (op2);
4387 icode = movcc_gen_code[mode];
4389 if (icode == CODE_FOR_nothing)
4393 target = gen_reg_rtx (mode);
4397 /* If the insn doesn't accept these operands, put them in pseudos. */
4399 if (!insn_data[icode].operand[0].predicate
4400 (subtarget, insn_data[icode].operand[0].mode))
4401 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4403 if (!insn_data[icode].operand[2].predicate
4404 (op2, insn_data[icode].operand[2].mode))
4405 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4407 if (!insn_data[icode].operand[3].predicate
4408 (op3, insn_data[icode].operand[3].mode))
4409 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4411 /* Everything should now be in the suitable form, so emit the compare insn
4412 and then the conditional move. */
4415 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4417 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4418 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4419 return NULL and let the caller figure out how best to deal with this
4421 if (GET_CODE (comparison) != code)
4424 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4426 /* If that failed, then give up. */
4432 if (subtarget != target)
4433 convert_move (target, subtarget, 0);
4438 /* Return nonzero if a conditional move of mode MODE is supported.
4440 This function is for combine so it can tell whether an insn that looks
4441 like a conditional move is actually supported by the hardware. If we
4442 guess wrong we lose a bit on optimization, but that's it. */
4443 /* ??? sparc64 supports conditionally moving integers values based on fp
4444 comparisons, and vice versa. How do we handle them? */
4447 can_conditionally_move_p (enum machine_mode mode)
4449 if (movcc_gen_code[mode] != CODE_FOR_nothing)
4455 #endif /* HAVE_conditional_move */
4457 /* Emit a conditional addition instruction if the machine supports one for that
4458 condition and machine mode.
4460 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4461 the mode to use should they be constants. If it is VOIDmode, they cannot
4464 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
4465 should be stored there. MODE is the mode to use should they be constants.
4466 If it is VOIDmode, they cannot both be constants.
4468 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4469 is not supported. */
4472 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
4473 enum machine_mode cmode, rtx op2, rtx op3,
4474 enum machine_mode mode, int unsignedp)
4476 rtx tem, subtarget, comparison, insn;
4477 enum insn_code icode;
4478 enum rtx_code reversed;
4480 /* If one operand is constant, make it the second one. Only do this
4481 if the other operand is not constant as well. */
4483 if (swap_commutative_operands_p (op0, op1))
4488 code = swap_condition (code);
4491 /* get_condition will prefer to generate LT and GT even if the old
4492 comparison was against zero, so undo that canonicalization here since
4493 comparisons against zero are cheaper. */
4494 if (code == LT && op1 == const1_rtx)
4495 code = LE, op1 = const0_rtx;
4496 else if (code == GT && op1 == constm1_rtx)
4497 code = GE, op1 = const0_rtx;
4499 if (cmode == VOIDmode)
4500 cmode = GET_MODE (op0);
4502 if (swap_commutative_operands_p (op2, op3)
4503 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4512 if (mode == VOIDmode)
4513 mode = GET_MODE (op2);
4515 icode = optab_handler (addcc_optab, mode)->insn_code;
4517 if (icode == CODE_FOR_nothing)
4521 target = gen_reg_rtx (mode);
4523 /* If the insn doesn't accept these operands, put them in pseudos. */
4525 if (!insn_data[icode].operand[0].predicate
4526 (target, insn_data[icode].operand[0].mode))
4527 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4531 if (!insn_data[icode].operand[2].predicate
4532 (op2, insn_data[icode].operand[2].mode))
4533 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4535 if (!insn_data[icode].operand[3].predicate
4536 (op3, insn_data[icode].operand[3].mode))
4537 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4539 /* Everything should now be in the suitable form, so emit the compare insn
4540 and then the conditional move. */
4543 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4545 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4546 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4547 return NULL and let the caller figure out how best to deal with this
4549 if (GET_CODE (comparison) != code)
4552 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4554 /* If that failed, then give up. */
4560 if (subtarget != target)
4561 convert_move (target, subtarget, 0);
4566 /* These functions attempt to generate an insn body, rather than
4567 emitting the insn, but if the gen function already emits them, we
4568 make no attempt to turn them back into naked patterns. */
4570 /* Generate and return an insn body to add Y to X. */
4573 gen_add2_insn (rtx x, rtx y)
4575 int icode = (int) optab_handler (add_optab, GET_MODE (x))->insn_code;
4577 gcc_assert (insn_data[icode].operand[0].predicate
4578 (x, insn_data[icode].operand[0].mode));
4579 gcc_assert (insn_data[icode].operand[1].predicate
4580 (x, insn_data[icode].operand[1].mode));
4581 gcc_assert (insn_data[icode].operand[2].predicate
4582 (y, insn_data[icode].operand[2].mode));
4584 return GEN_FCN (icode) (x, x, y);
4587 /* Generate and return an insn body to add r1 and c,
4588 storing the result in r0. */
4590 gen_add3_insn (rtx r0, rtx r1, rtx c)
4592 int icode = (int) optab_handler (add_optab, GET_MODE (r0))->insn_code;
4594 if (icode == CODE_FOR_nothing
4595 || !(insn_data[icode].operand[0].predicate
4596 (r0, insn_data[icode].operand[0].mode))
4597 || !(insn_data[icode].operand[1].predicate
4598 (r1, insn_data[icode].operand[1].mode))
4599 || !(insn_data[icode].operand[2].predicate
4600 (c, insn_data[icode].operand[2].mode)))
4603 return GEN_FCN (icode) (r0, r1, c);
4607 have_add2_insn (rtx x, rtx y)
4611 gcc_assert (GET_MODE (x) != VOIDmode);
4613 icode = (int) optab_handler (add_optab, GET_MODE (x))->insn_code;
4615 if (icode == CODE_FOR_nothing)
4618 if (!(insn_data[icode].operand[0].predicate
4619 (x, insn_data[icode].operand[0].mode))
4620 || !(insn_data[icode].operand[1].predicate
4621 (x, insn_data[icode].operand[1].mode))
4622 || !(insn_data[icode].operand[2].predicate
4623 (y, insn_data[icode].operand[2].mode)))
4629 /* Generate and return an insn body to subtract Y from X. */
4632 gen_sub2_insn (rtx x, rtx y)
4634 int icode = (int) optab_handler (sub_optab, GET_MODE (x))->insn_code;
4636 gcc_assert (insn_data[icode].operand[0].predicate
4637 (x, insn_data[icode].operand[0].mode));
4638 gcc_assert (insn_data[icode].operand[1].predicate
4639 (x, insn_data[icode].operand[1].mode));
4640 gcc_assert (insn_data[icode].operand[2].predicate
4641 (y, insn_data[icode].operand[2].mode));
4643 return GEN_FCN (icode) (x, x, y);
4646 /* Generate and return an insn body to subtract r1 and c,
4647 storing the result in r0. */
4649 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4651 int icode = (int) optab_handler (sub_optab, GET_MODE (r0))->insn_code;
4653 if (icode == CODE_FOR_nothing
4654 || !(insn_data[icode].operand[0].predicate
4655 (r0, insn_data[icode].operand[0].mode))
4656 || !(insn_data[icode].operand[1].predicate
4657 (r1, insn_data[icode].operand[1].mode))
4658 || !(insn_data[icode].operand[2].predicate
4659 (c, insn_data[icode].operand[2].mode)))
4662 return GEN_FCN (icode) (r0, r1, c);
4666 have_sub2_insn (rtx x, rtx y)
4670 gcc_assert (GET_MODE (x) != VOIDmode);
4672 icode = (int) optab_handler (sub_optab, GET_MODE (x))->insn_code;
4674 if (icode == CODE_FOR_nothing)
4677 if (!(insn_data[icode].operand[0].predicate
4678 (x, insn_data[icode].operand[0].mode))
4679 || !(insn_data[icode].operand[1].predicate
4680 (x, insn_data[icode].operand[1].mode))
4681 || !(insn_data[icode].operand[2].predicate
4682 (y, insn_data[icode].operand[2].mode)))
4688 /* Generate the body of an instruction to copy Y into X.
4689 It may be a list of insns, if one insn isn't enough. */
4692 gen_move_insn (rtx x, rtx y)
4697 emit_move_insn_1 (x, y);
4703 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4704 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4705 no such operation exists, CODE_FOR_nothing will be returned. */
4708 can_extend_p (enum machine_mode to_mode, enum machine_mode from_mode,
4712 #ifdef HAVE_ptr_extend
4714 return CODE_FOR_ptr_extend;
4717 tab = unsignedp ? zext_optab : sext_optab;
4718 return convert_optab_handler (tab, to_mode, from_mode)->insn_code;
4721 /* Generate the body of an insn to extend Y (with mode MFROM)
4722 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4725 gen_extend_insn (rtx x, rtx y, enum machine_mode mto,
4726 enum machine_mode mfrom, int unsignedp)
4728 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
4729 return GEN_FCN (icode) (x, y);
4732 /* can_fix_p and can_float_p say whether the target machine
4733 can directly convert a given fixed point type to
4734 a given floating point type, or vice versa.
4735 The returned value is the CODE_FOR_... value to use,
4736 or CODE_FOR_nothing if these modes cannot be directly converted.
4738 *TRUNCP_PTR is set to 1 if it is necessary to output
4739 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4741 static enum insn_code
4742 can_fix_p (enum machine_mode fixmode, enum machine_mode fltmode,
4743 int unsignedp, int *truncp_ptr)
4746 enum insn_code icode;
4748 tab = unsignedp ? ufixtrunc_optab : sfixtrunc_optab;
4749 icode = convert_optab_handler (tab, fixmode, fltmode)->insn_code;
4750 if (icode != CODE_FOR_nothing)
4756 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4757 for this to work. We need to rework the fix* and ftrunc* patterns
4758 and documentation. */
4759 tab = unsignedp ? ufix_optab : sfix_optab;
4760 icode = convert_optab_handler (tab, fixmode, fltmode)->insn_code;
4761 if (icode != CODE_FOR_nothing
4762 && optab_handler (ftrunc_optab, fltmode)->insn_code != CODE_FOR_nothing)
4769 return CODE_FOR_nothing;
4772 static enum insn_code
4773 can_float_p (enum machine_mode fltmode, enum machine_mode fixmode,
4778 tab = unsignedp ? ufloat_optab : sfloat_optab;
4779 return convert_optab_handler (tab, fltmode, fixmode)->insn_code;
4782 /* Generate code to convert FROM to floating point
4783 and store in TO. FROM must be fixed point and not VOIDmode.
4784 UNSIGNEDP nonzero means regard FROM as unsigned.
4785 Normally this is done by correcting the final value
4786 if it is negative. */
4789 expand_float (rtx to, rtx from, int unsignedp)
4791 enum insn_code icode;
4793 enum machine_mode fmode, imode;
4794 bool can_do_signed = false;
4796 /* Crash now, because we won't be able to decide which mode to use. */
4797 gcc_assert (GET_MODE (from) != VOIDmode);
4799 /* Look for an insn to do the conversion. Do it in the specified
4800 modes if possible; otherwise convert either input, output or both to
4801 wider mode. If the integer mode is wider than the mode of FROM,
4802 we can do the conversion signed even if the input is unsigned. */
4804 for (fmode = GET_MODE (to); fmode != VOIDmode;
4805 fmode = GET_MODE_WIDER_MODE (fmode))
4806 for (imode = GET_MODE (from); imode != VOIDmode;
4807 imode = GET_MODE_WIDER_MODE (imode))
4809 int doing_unsigned = unsignedp;
4811 if (fmode != GET_MODE (to)
4812 && significand_size (fmode) < GET_MODE_BITSIZE (GET_MODE (from)))
4815 icode = can_float_p (fmode, imode, unsignedp);
4816 if (icode == CODE_FOR_nothing && unsignedp)
4818 enum insn_code scode = can_float_p (fmode, imode, 0);
4819 if (scode != CODE_FOR_nothing)
4820 can_do_signed = true;
4821 if (imode != GET_MODE (from))
4822 icode = scode, doing_unsigned = 0;
4825 if (icode != CODE_FOR_nothing)
4827 if (imode != GET_MODE (from))
4828 from = convert_to_mode (imode, from, unsignedp);
4830 if (fmode != GET_MODE (to))
4831 target = gen_reg_rtx (fmode);
4833 emit_unop_insn (icode, target, from,
4834 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
4837 convert_move (to, target, 0);
4842 /* Unsigned integer, and no way to convert directly. For binary
4843 floating point modes, convert as signed, then conditionally adjust
4845 if (unsignedp && can_do_signed && !DECIMAL_FLOAT_MODE_P (GET_MODE (to)))
4847 rtx label = gen_label_rtx ();
4849 REAL_VALUE_TYPE offset;
4851 /* Look for a usable floating mode FMODE wider than the source and at
4852 least as wide as the target. Using FMODE will avoid rounding woes
4853 with unsigned values greater than the signed maximum value. */
4855 for (fmode = GET_MODE (to); fmode != VOIDmode;
4856 fmode = GET_MODE_WIDER_MODE (fmode))
4857 if (GET_MODE_BITSIZE (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
4858 && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
4861 if (fmode == VOIDmode)
4863 /* There is no such mode. Pretend the target is wide enough. */
4864 fmode = GET_MODE (to);
4866 /* Avoid double-rounding when TO is narrower than FROM. */
4867 if ((significand_size (fmode) + 1)
4868 < GET_MODE_BITSIZE (GET_MODE (from)))
4871 rtx neglabel = gen_label_rtx ();
4873 /* Don't use TARGET if it isn't a register, is a hard register,
4874 or is the wrong mode. */
4876 || REGNO (target) < FIRST_PSEUDO_REGISTER
4877 || GET_MODE (target) != fmode)
4878 target = gen_reg_rtx (fmode);
4880 imode = GET_MODE (from);
4881 do_pending_stack_adjust ();
4883 /* Test whether the sign bit is set. */
4884 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
4887 /* The sign bit is not set. Convert as signed. */
4888 expand_float (target, from, 0);
4889 emit_jump_insn (gen_jump (label));
4892 /* The sign bit is set.
4893 Convert to a usable (positive signed) value by shifting right
4894 one bit, while remembering if a nonzero bit was shifted
4895 out; i.e., compute (from & 1) | (from >> 1). */
4897 emit_label (neglabel);
4898 temp = expand_binop (imode, and_optab, from, const1_rtx,
4899 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4900 temp1 = expand_shift (RSHIFT_EXPR, imode, from, integer_one_node,
4902 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
4904 expand_float (target, temp, 0);
4906 /* Multiply by 2 to undo the shift above. */
4907 temp = expand_binop (fmode, add_optab, target, target,
4908 target, 0, OPTAB_LIB_WIDEN);
4910 emit_move_insn (target, temp);
4912 do_pending_stack_adjust ();
4918 /* If we are about to do some arithmetic to correct for an
4919 unsigned operand, do it in a pseudo-register. */
4921 if (GET_MODE (to) != fmode
4922 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
4923 target = gen_reg_rtx (fmode);
4925 /* Convert as signed integer to floating. */
4926 expand_float (target, from, 0);
4928 /* If FROM is negative (and therefore TO is negative),
4929 correct its value by 2**bitwidth. */
4931 do_pending_stack_adjust ();
4932 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from),
4936 real_2expN (&offset, GET_MODE_BITSIZE (GET_MODE (from)));
4937 temp = expand_binop (fmode, add_optab, target,
4938 CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode),
4939 target, 0, OPTAB_LIB_WIDEN);
4941 emit_move_insn (target, temp);
4943 do_pending_stack_adjust ();
4948 /* No hardware instruction available; call a library routine. */
4953 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
4955 if (GET_MODE_SIZE (GET_MODE (from)) < GET_MODE_SIZE (SImode))
4956 from = convert_to_mode (SImode, from, unsignedp);
4958 libfunc = convert_optab_handler (tab, GET_MODE (to),
4959 GET_MODE (from))->libfunc;
4960 gcc_assert (libfunc);
4964 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4965 GET_MODE (to), 1, from,
4967 insns = get_insns ();
4970 emit_libcall_block (insns, target, value,
4971 gen_rtx_FLOAT (GET_MODE (to), from));
4976 /* Copy result to requested destination
4977 if we have been computing in a temp location. */
4981 if (GET_MODE (target) == GET_MODE (to))
4982 emit_move_insn (to, target);
4984 convert_move (to, target, 0);
4988 /* Generate code to convert FROM to fixed point and store in TO. FROM
4989 must be floating point. */
4992 expand_fix (rtx to, rtx from, int unsignedp)
4994 enum insn_code icode;
4996 enum machine_mode fmode, imode;
4999 /* We first try to find a pair of modes, one real and one integer, at
5000 least as wide as FROM and TO, respectively, in which we can open-code
5001 this conversion. If the integer mode is wider than the mode of TO,
5002 we can do the conversion either signed or unsigned. */
5004 for (fmode = GET_MODE (from); fmode != VOIDmode;
5005 fmode = GET_MODE_WIDER_MODE (fmode))
5006 for (imode = GET_MODE (to); imode != VOIDmode;
5007 imode = GET_MODE_WIDER_MODE (imode))
5009 int doing_unsigned = unsignedp;
5011 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
5012 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
5013 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
5015 if (icode != CODE_FOR_nothing)
5017 if (fmode != GET_MODE (from))
5018 from = convert_to_mode (fmode, from, 0);
5022 rtx temp = gen_reg_rtx (GET_MODE (from));
5023 from = expand_unop (GET_MODE (from), ftrunc_optab, from,
5027 if (imode != GET_MODE (to))
5028 target = gen_reg_rtx (imode);
5030 emit_unop_insn (icode, target, from,
5031 doing_unsigned ? UNSIGNED_FIX : FIX);
5033 convert_move (to, target, unsignedp);
5038 /* For an unsigned conversion, there is one more way to do it.
5039 If we have a signed conversion, we generate code that compares
5040 the real value to the largest representable positive number. If if
5041 is smaller, the conversion is done normally. Otherwise, subtract
5042 one plus the highest signed number, convert, and add it back.
5044 We only need to check all real modes, since we know we didn't find
5045 anything with a wider integer mode.
5047 This code used to extend FP value into mode wider than the destination.
5048 This is not needed. Consider, for instance conversion from SFmode
5051 The hot path through the code is dealing with inputs smaller than 2^63
5052 and doing just the conversion, so there is no bits to lose.
5054 In the other path we know the value is positive in the range 2^63..2^64-1
5055 inclusive. (as for other imput overflow happens and result is undefined)
5056 So we know that the most important bit set in mantissa corresponds to
5057 2^63. The subtraction of 2^63 should not generate any rounding as it
5058 simply clears out that bit. The rest is trivial. */
5060 if (unsignedp && GET_MODE_BITSIZE (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
5061 for (fmode = GET_MODE (from); fmode != VOIDmode;
5062 fmode = GET_MODE_WIDER_MODE (fmode))
5063 if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0,
5067 REAL_VALUE_TYPE offset;
5068 rtx limit, lab1, lab2, insn;
5070 bitsize = GET_MODE_BITSIZE (GET_MODE (to));
5071 real_2expN (&offset, bitsize - 1);
5072 limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode);
5073 lab1 = gen_label_rtx ();
5074 lab2 = gen_label_rtx ();
5076 if (fmode != GET_MODE (from))
5077 from = convert_to_mode (fmode, from, 0);
5079 /* See if we need to do the subtraction. */
5080 do_pending_stack_adjust ();
5081 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from),
5084 /* If not, do the signed "fix" and branch around fixup code. */
5085 expand_fix (to, from, 0);
5086 emit_jump_insn (gen_jump (lab2));
5089 /* Otherwise, subtract 2**(N-1), convert to signed number,
5090 then add 2**(N-1). Do the addition using XOR since this
5091 will often generate better code. */
5093 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
5094 NULL_RTX, 0, OPTAB_LIB_WIDEN);
5095 expand_fix (to, target, 0);
5096 target = expand_binop (GET_MODE (to), xor_optab, to,
5098 ((HOST_WIDE_INT) 1 << (bitsize - 1),
5100 to, 1, OPTAB_LIB_WIDEN);
5103 emit_move_insn (to, target);
5107 if (optab_handler (mov_optab, GET_MODE (to))->insn_code
5108 != CODE_FOR_nothing)
5110 /* Make a place for a REG_NOTE and add it. */
5111 insn = emit_move_insn (to, to);
5112 set_unique_reg_note (insn,
5114 gen_rtx_fmt_e (UNSIGNED_FIX,
5122 /* We can't do it with an insn, so use a library call. But first ensure
5123 that the mode of TO is at least as wide as SImode, since those are the
5124 only library calls we know about. */
5126 if (GET_MODE_SIZE (GET_MODE (to)) < GET_MODE_SIZE (SImode))
5128 target = gen_reg_rtx (SImode);
5130 expand_fix (target, from, unsignedp);
5138 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
5139 libfunc = convert_optab_handler (tab, GET_MODE (to),
5140 GET_MODE (from))->libfunc;
5141 gcc_assert (libfunc);
5145 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
5146 GET_MODE (to), 1, from,
5148 insns = get_insns ();
5151 emit_libcall_block (insns, target, value,
5152 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
5153 GET_MODE (to), from));
5158 if (GET_MODE (to) == GET_MODE (target))
5159 emit_move_insn (to, target);
5161 convert_move (to, target, 0);
5165 /* Generate code to convert FROM to fixed point and store in TO. FROM
5166 must be floating point, TO must be signed. Use the conversion optab
5167 TAB to do the conversion. */
5170 expand_sfix_optab (rtx to, rtx from, convert_optab tab)
5172 enum insn_code icode;
5174 enum machine_mode fmode, imode;
5176 /* We first try to find a pair of modes, one real and one integer, at
5177 least as wide as FROM and TO, respectively, in which we can open-code
5178 this conversion. If the integer mode is wider than the mode of TO,
5179 we can do the conversion either signed or unsigned. */
5181 for (fmode = GET_MODE (from); fmode != VOIDmode;
5182 fmode = GET_MODE_WIDER_MODE (fmode))
5183 for (imode = GET_MODE (to); imode != VOIDmode;
5184 imode = GET_MODE_WIDER_MODE (imode))
5186 icode = convert_optab_handler (tab, imode, fmode)->insn_code;
5187 if (icode != CODE_FOR_nothing)
5189 if (fmode != GET_MODE (from))
5190 from = convert_to_mode (fmode, from, 0);
5192 if (imode != GET_MODE (to))
5193 target = gen_reg_rtx (imode);
5195 emit_unop_insn (icode, target, from, UNKNOWN);
5197 convert_move (to, target, 0);
5205 /* Report whether we have an instruction to perform the operation
5206 specified by CODE on operands of mode MODE. */
5208 have_insn_for (enum rtx_code code, enum machine_mode mode)
5210 return (code_to_optab[(int) code] != 0
5211 && (optab_handler (code_to_optab[(int) code], mode)->insn_code
5212 != CODE_FOR_nothing));
5215 /* Create a blank optab. */
5220 optab op = ggc_alloc (sizeof (struct optab));
5221 for (i = 0; i < NUM_MACHINE_MODES; i++)
5223 optab_handler (op, i)->insn_code = CODE_FOR_nothing;
5224 optab_handler (op, i)->libfunc = 0;
5230 static convert_optab
5231 new_convert_optab (void)
5234 convert_optab op = ggc_alloc (sizeof (struct convert_optab));
5235 for (i = 0; i < NUM_MACHINE_MODES; i++)
5236 for (j = 0; j < NUM_MACHINE_MODES; j++)
5238 convert_optab_handler (op, i, j)->insn_code = CODE_FOR_nothing;
5239 convert_optab_handler (op, i, j)->libfunc = 0;
5244 /* Same, but fill in its code as CODE, and write it into the
5245 code_to_optab table. */
5247 init_optab (enum rtx_code code)
5249 optab op = new_optab ();
5251 code_to_optab[(int) code] = op;
5255 /* Same, but fill in its code as CODE, and do _not_ write it into
5256 the code_to_optab table. */
5258 init_optabv (enum rtx_code code)
5260 optab op = new_optab ();
5265 /* Conversion optabs never go in the code_to_optab table. */
5266 static inline convert_optab
5267 init_convert_optab (enum rtx_code code)
5269 convert_optab op = new_convert_optab ();
5274 /* Initialize the libfunc fields of an entire group of entries in some
5275 optab. Each entry is set equal to a string consisting of a leading
5276 pair of underscores followed by a generic operation name followed by
5277 a mode name (downshifted to lowercase) followed by a single character
5278 representing the number of operands for the given operation (which is
5279 usually one of the characters '2', '3', or '4').
5281 OPTABLE is the table in which libfunc fields are to be initialized.
5282 FIRST_MODE is the first machine mode index in the given optab to
5284 LAST_MODE is the last machine mode index in the given optab to
5286 OPNAME is the generic (string) name of the operation.
5287 SUFFIX is the character which specifies the number of operands for
5288 the given generic operation.
5292 init_libfuncs (optab optable, int first_mode, int last_mode,
5293 const char *opname, int suffix)
5296 unsigned opname_len = strlen (opname);
5298 for (mode = first_mode; (int) mode <= (int) last_mode;
5299 mode = (enum machine_mode) ((int) mode + 1))
5301 const char *mname = GET_MODE_NAME (mode);
5302 unsigned mname_len = strlen (mname);
5303 char *libfunc_name = alloca (2 + opname_len + mname_len + 1 + 1);
5310 for (q = opname; *q; )
5312 for (q = mname; *q; q++)
5313 *p++ = TOLOWER (*q);
5317 optab_handler (optable, mode)->libfunc
5318 = init_one_libfunc (ggc_alloc_string (libfunc_name, p - libfunc_name));
5322 /* Initialize the libfunc fields of an entire group of entries in some
5323 optab which correspond to all integer mode operations. The parameters
5324 have the same meaning as similarly named ones for the `init_libfuncs'
5325 routine. (See above). */
5328 init_integral_libfuncs (optab optable, const char *opname, int suffix)
5330 int maxsize = 2*BITS_PER_WORD;
5331 if (maxsize < LONG_LONG_TYPE_SIZE)
5332 maxsize = LONG_LONG_TYPE_SIZE;
5333 init_libfuncs (optable, word_mode,
5334 mode_for_size (maxsize, MODE_INT, 0),
5338 /* Initialize the libfunc fields of an entire group of entries in some
5339 optab which correspond to all real mode operations. The parameters
5340 have the same meaning as similarly named ones for the `init_libfuncs'
5341 routine. (See above). */
5344 init_floating_libfuncs (optab optable, const char *opname, int suffix)
5346 char *dec_opname = alloca (sizeof (DECIMAL_PREFIX) + strlen (opname));
5348 /* For BID support, change the name to have either a bid_ or dpd_ prefix
5349 depending on the low level floating format used. */
5350 memcpy (dec_opname, DECIMAL_PREFIX, sizeof (DECIMAL_PREFIX) - 1);
5351 strcpy (dec_opname + sizeof (DECIMAL_PREFIX) - 1, opname);
5353 init_libfuncs (optable, MIN_MODE_FLOAT, MAX_MODE_FLOAT, opname, suffix);
5354 init_libfuncs (optable, MIN_MODE_DECIMAL_FLOAT, MAX_MODE_DECIMAL_FLOAT,
5355 dec_opname, suffix);
5358 /* Initialize the libfunc fields of an entire group of entries of an
5359 inter-mode-class conversion optab. The string formation rules are
5360 similar to the ones for init_libfuncs, above, but instead of having
5361 a mode name and an operand count these functions have two mode names
5362 and no operand count. */
5364 init_interclass_conv_libfuncs (convert_optab tab, const char *opname,
5365 enum mode_class from_class,
5366 enum mode_class to_class)
5368 enum machine_mode first_from_mode = GET_CLASS_NARROWEST_MODE (from_class);
5369 enum machine_mode first_to_mode = GET_CLASS_NARROWEST_MODE (to_class);
5370 size_t opname_len = strlen (opname);
5371 size_t max_mname_len = 0;
5373 enum machine_mode fmode, tmode;
5374 const char *fname, *tname;
5376 char *libfunc_name, *suffix;
5377 char *nondec_name, *dec_name, *nondec_suffix, *dec_suffix;
5380 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5381 depends on which underlying decimal floating point format is used. */
5382 const size_t dec_len = sizeof (DECIMAL_PREFIX) - 1;
5384 for (fmode = first_from_mode;
5386 fmode = GET_MODE_WIDER_MODE (fmode))
5387 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (fmode)));
5389 for (tmode = first_to_mode;
5391 tmode = GET_MODE_WIDER_MODE (tmode))
5392 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (tmode)));
5394 nondec_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
5395 nondec_name[0] = '_';
5396 nondec_name[1] = '_';
5397 memcpy (&nondec_name[2], opname, opname_len);
5398 nondec_suffix = nondec_name + opname_len + 2;
5400 dec_name = alloca (2 + dec_len + opname_len + 2*max_mname_len + 1 + 1);
5403 memcpy (&dec_name[2], DECIMAL_PREFIX, dec_len);
5404 memcpy (&dec_name[2+dec_len], opname, opname_len);
5405 dec_suffix = dec_name + dec_len + opname_len + 2;
5407 for (fmode = first_from_mode; fmode != VOIDmode;
5408 fmode = GET_MODE_WIDER_MODE (fmode))
5409 for (tmode = first_to_mode; tmode != VOIDmode;
5410 tmode = GET_MODE_WIDER_MODE (tmode))
5412 fname = GET_MODE_NAME (fmode);
5413 tname = GET_MODE_NAME (tmode);
5415 if (DECIMAL_FLOAT_MODE_P(fmode) || DECIMAL_FLOAT_MODE_P(tmode))
5417 libfunc_name = dec_name;
5418 suffix = dec_suffix;
5422 libfunc_name = nondec_name;
5423 suffix = nondec_suffix;
5427 for (q = fname; *q; p++, q++)
5429 for (q = tname; *q; p++, q++)
5434 convert_optab_handler (tab, tmode, fmode)->libfunc
5435 = init_one_libfunc (ggc_alloc_string (libfunc_name,
5440 /* Initialize the libfunc fields of an entire group of entries of an
5441 intra-mode-class conversion optab. The string formation rules are
5442 similar to the ones for init_libfunc, above. WIDENING says whether
5443 the optab goes from narrow to wide modes or vice versa. These functions
5444 have two mode names _and_ an operand count. */
5446 init_intraclass_conv_libfuncs (convert_optab tab, const char *opname,
5447 enum mode_class class, bool widening)
5449 enum machine_mode first_mode = GET_CLASS_NARROWEST_MODE (class);
5450 size_t opname_len = strlen (opname);
5451 size_t max_mname_len = 0;
5453 enum machine_mode nmode, wmode;
5454 const char *nname, *wname;
5456 char *nondec_name, *dec_name, *nondec_suffix, *dec_suffix;
5457 char *libfunc_name, *suffix;
5460 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5461 depends on which underlying decimal floating point format is used. */
5462 const size_t dec_len = sizeof (DECIMAL_PREFIX) - 1;
5464 for (nmode = first_mode; nmode != VOIDmode;
5465 nmode = GET_MODE_WIDER_MODE (nmode))
5466 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (nmode)));
5468 nondec_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
5469 nondec_name[0] = '_';
5470 nondec_name[1] = '_';
5471 memcpy (&nondec_name[2], opname, opname_len);
5472 nondec_suffix = nondec_name + opname_len + 2;
5474 dec_name = alloca (2 + dec_len + opname_len + 2*max_mname_len + 1 + 1);
5477 memcpy (&dec_name[2], DECIMAL_PREFIX, dec_len);
5478 memcpy (&dec_name[2 + dec_len], opname, opname_len);
5479 dec_suffix = dec_name + dec_len + opname_len + 2;
5481 for (nmode = first_mode; nmode != VOIDmode;
5482 nmode = GET_MODE_WIDER_MODE (nmode))
5483 for (wmode = GET_MODE_WIDER_MODE (nmode); wmode != VOIDmode;
5484 wmode = GET_MODE_WIDER_MODE (wmode))
5486 nname = GET_MODE_NAME (nmode);
5487 wname = GET_MODE_NAME (wmode);
5489 if (DECIMAL_FLOAT_MODE_P(nmode) || DECIMAL_FLOAT_MODE_P(wmode))
5491 libfunc_name = dec_name;
5492 suffix = dec_suffix;
5496 libfunc_name = nondec_name;
5497 suffix = nondec_suffix;
5501 for (q = widening ? nname : wname; *q; p++, q++)
5503 for (q = widening ? wname : nname; *q; p++, q++)
5509 convert_optab_handler(tab, widening ? wmode : nmode,
5510 widening ? nmode : wmode)->libfunc
5511 = init_one_libfunc (ggc_alloc_string (libfunc_name,
5518 init_one_libfunc (const char *name)
5522 /* Create a FUNCTION_DECL that can be passed to
5523 targetm.encode_section_info. */
5524 /* ??? We don't have any type information except for this is
5525 a function. Pretend this is "int foo()". */
5526 tree decl = build_decl (FUNCTION_DECL, get_identifier (name),
5527 build_function_type (integer_type_node, NULL_TREE));
5528 DECL_ARTIFICIAL (decl) = 1;
5529 DECL_EXTERNAL (decl) = 1;
5530 TREE_PUBLIC (decl) = 1;
5532 symbol = XEXP (DECL_RTL (decl), 0);
5534 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
5535 are the flags assigned by targetm.encode_section_info. */
5536 SET_SYMBOL_REF_DECL (symbol, 0);
5541 /* Call this to reset the function entry for one optab (OPTABLE) in mode
5542 MODE to NAME, which should be either 0 or a string constant. */
5544 set_optab_libfunc (optab optable, enum machine_mode mode, const char *name)
5547 optab_handler (optable, mode)->libfunc = init_one_libfunc (name);
5549 optab_handler (optable, mode)->libfunc = 0;
5552 /* Call this to reset the function entry for one conversion optab
5553 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
5554 either 0 or a string constant. */
5556 set_conv_libfunc (convert_optab optable, enum machine_mode tmode,
5557 enum machine_mode fmode, const char *name)
5560 convert_optab_handler (optable, tmode, fmode)->libfunc
5561 = init_one_libfunc (name);
5563 convert_optab_handler (optable, tmode, fmode)->libfunc = 0;
5566 /* Call this once to initialize the contents of the optabs
5567 appropriately for the current target machine. */
5573 enum machine_mode int_mode;
5575 /* Start by initializing all tables to contain CODE_FOR_nothing. */
5577 for (i = 0; i < NUM_RTX_CODE; i++)
5578 setcc_gen_code[i] = CODE_FOR_nothing;
5580 #ifdef HAVE_conditional_move
5581 for (i = 0; i < NUM_MACHINE_MODES; i++)
5582 movcc_gen_code[i] = CODE_FOR_nothing;
5585 for (i = 0; i < NUM_MACHINE_MODES; i++)
5587 vcond_gen_code[i] = CODE_FOR_nothing;
5588 vcondu_gen_code[i] = CODE_FOR_nothing;
5591 add_optab = init_optab (PLUS);
5592 addv_optab = init_optabv (PLUS);
5593 sub_optab = init_optab (MINUS);
5594 subv_optab = init_optabv (MINUS);
5595 smul_optab = init_optab (MULT);
5596 smulv_optab = init_optabv (MULT);
5597 smul_highpart_optab = init_optab (UNKNOWN);
5598 umul_highpart_optab = init_optab (UNKNOWN);
5599 smul_widen_optab = init_optab (UNKNOWN);
5600 umul_widen_optab = init_optab (UNKNOWN);
5601 usmul_widen_optab = init_optab (UNKNOWN);
5602 smadd_widen_optab = init_optab (UNKNOWN);
5603 umadd_widen_optab = init_optab (UNKNOWN);
5604 smsub_widen_optab = init_optab (UNKNOWN);
5605 umsub_widen_optab = init_optab (UNKNOWN);
5606 sdiv_optab = init_optab (DIV);
5607 sdivv_optab = init_optabv (DIV);
5608 sdivmod_optab = init_optab (UNKNOWN);
5609 udiv_optab = init_optab (UDIV);
5610 udivmod_optab = init_optab (UNKNOWN);
5611 smod_optab = init_optab (MOD);
5612 umod_optab = init_optab (UMOD);
5613 fmod_optab = init_optab (UNKNOWN);
5614 remainder_optab = init_optab (UNKNOWN);
5615 ftrunc_optab = init_optab (UNKNOWN);
5616 and_optab = init_optab (AND);
5617 ior_optab = init_optab (IOR);
5618 xor_optab = init_optab (XOR);
5619 ashl_optab = init_optab (ASHIFT);
5620 ashr_optab = init_optab (ASHIFTRT);
5621 lshr_optab = init_optab (LSHIFTRT);
5622 rotl_optab = init_optab (ROTATE);
5623 rotr_optab = init_optab (ROTATERT);
5624 smin_optab = init_optab (SMIN);
5625 smax_optab = init_optab (SMAX);
5626 umin_optab = init_optab (UMIN);
5627 umax_optab = init_optab (UMAX);
5628 pow_optab = init_optab (UNKNOWN);
5629 atan2_optab = init_optab (UNKNOWN);
5631 /* These three have codes assigned exclusively for the sake of
5633 mov_optab = init_optab (SET);
5634 movstrict_optab = init_optab (STRICT_LOW_PART);
5635 cmp_optab = init_optab (COMPARE);
5637 storent_optab = init_optab (UNKNOWN);
5639 ucmp_optab = init_optab (UNKNOWN);
5640 tst_optab = init_optab (UNKNOWN);
5642 eq_optab = init_optab (EQ);
5643 ne_optab = init_optab (NE);
5644 gt_optab = init_optab (GT);
5645 ge_optab = init_optab (GE);
5646 lt_optab = init_optab (LT);
5647 le_optab = init_optab (LE);
5648 unord_optab = init_optab (UNORDERED);
5650 neg_optab = init_optab (NEG);
5651 negv_optab = init_optabv (NEG);
5652 abs_optab = init_optab (ABS);
5653 absv_optab = init_optabv (ABS);
5654 addcc_optab = init_optab (UNKNOWN);
5655 one_cmpl_optab = init_optab (NOT);
5656 bswap_optab = init_optab (BSWAP);
5657 ffs_optab = init_optab (FFS);
5658 clz_optab = init_optab (CLZ);
5659 ctz_optab = init_optab (CTZ);
5660 popcount_optab = init_optab (POPCOUNT);
5661 parity_optab = init_optab (PARITY);
5662 sqrt_optab = init_optab (SQRT);
5663 floor_optab = init_optab (UNKNOWN);
5664 ceil_optab = init_optab (UNKNOWN);
5665 round_optab = init_optab (UNKNOWN);
5666 btrunc_optab = init_optab (UNKNOWN);
5667 nearbyint_optab = init_optab (UNKNOWN);
5668 rint_optab = init_optab (UNKNOWN);
5669 sincos_optab = init_optab (UNKNOWN);
5670 sin_optab = init_optab (UNKNOWN);
5671 asin_optab = init_optab (UNKNOWN);
5672 cos_optab = init_optab (UNKNOWN);
5673 acos_optab = init_optab (UNKNOWN);
5674 exp_optab = init_optab (UNKNOWN);
5675 exp10_optab = init_optab (UNKNOWN);
5676 exp2_optab = init_optab (UNKNOWN);
5677 expm1_optab = init_optab (UNKNOWN);
5678 ldexp_optab = init_optab (UNKNOWN);
5679 scalb_optab = init_optab (UNKNOWN);
5680 logb_optab = init_optab (UNKNOWN);
5681 ilogb_optab = init_optab (UNKNOWN);
5682 log_optab = init_optab (UNKNOWN);
5683 log10_optab = init_optab (UNKNOWN);
5684 log2_optab = init_optab (UNKNOWN);
5685 log1p_optab = init_optab (UNKNOWN);
5686 tan_optab = init_optab (UNKNOWN);
5687 atan_optab = init_optab (UNKNOWN);
5688 copysign_optab = init_optab (UNKNOWN);
5689 signbit_optab = init_optab (UNKNOWN);
5691 isinf_optab = init_optab (UNKNOWN);
5693 strlen_optab = init_optab (UNKNOWN);
5694 cbranch_optab = init_optab (UNKNOWN);
5695 cmov_optab = init_optab (UNKNOWN);
5696 cstore_optab = init_optab (UNKNOWN);
5697 push_optab = init_optab (UNKNOWN);
5699 reduc_smax_optab = init_optab (UNKNOWN);
5700 reduc_umax_optab = init_optab (UNKNOWN);
5701 reduc_smin_optab = init_optab (UNKNOWN);
5702 reduc_umin_optab = init_optab (UNKNOWN);
5703 reduc_splus_optab = init_optab (UNKNOWN);
5704 reduc_uplus_optab = init_optab (UNKNOWN);
5706 ssum_widen_optab = init_optab (UNKNOWN);
5707 usum_widen_optab = init_optab (UNKNOWN);
5708 sdot_prod_optab = init_optab (UNKNOWN);
5709 udot_prod_optab = init_optab (UNKNOWN);
5711 vec_extract_optab = init_optab (UNKNOWN);
5712 vec_extract_even_optab = init_optab (UNKNOWN);
5713 vec_extract_odd_optab = init_optab (UNKNOWN);
5714 vec_interleave_high_optab = init_optab (UNKNOWN);
5715 vec_interleave_low_optab = init_optab (UNKNOWN);
5716 vec_set_optab = init_optab (UNKNOWN);
5717 vec_init_optab = init_optab (UNKNOWN);
5718 vec_shl_optab = init_optab (UNKNOWN);
5719 vec_shr_optab = init_optab (UNKNOWN);
5720 vec_realign_load_optab = init_optab (UNKNOWN);
5721 movmisalign_optab = init_optab (UNKNOWN);
5722 vec_widen_umult_hi_optab = init_optab (UNKNOWN);
5723 vec_widen_umult_lo_optab = init_optab (UNKNOWN);
5724 vec_widen_smult_hi_optab = init_optab (UNKNOWN);
5725 vec_widen_smult_lo_optab = init_optab (UNKNOWN);
5726 vec_unpacks_hi_optab = init_optab (UNKNOWN);
5727 vec_unpacks_lo_optab = init_optab (UNKNOWN);
5728 vec_unpacku_hi_optab = init_optab (UNKNOWN);
5729 vec_unpacku_lo_optab = init_optab (UNKNOWN);
5730 vec_unpacks_float_hi_optab = init_optab (UNKNOWN);
5731 vec_unpacks_float_lo_optab = init_optab (UNKNOWN);
5732 vec_unpacku_float_hi_optab = init_optab (UNKNOWN);
5733 vec_unpacku_float_lo_optab = init_optab (UNKNOWN);
5734 vec_pack_trunc_optab = init_optab (UNKNOWN);
5735 vec_pack_usat_optab = init_optab (UNKNOWN);
5736 vec_pack_ssat_optab = init_optab (UNKNOWN);
5737 vec_pack_ufix_trunc_optab = init_optab (UNKNOWN);
5738 vec_pack_sfix_trunc_optab = init_optab (UNKNOWN);
5740 powi_optab = init_optab (UNKNOWN);
5743 sext_optab = init_convert_optab (SIGN_EXTEND);
5744 zext_optab = init_convert_optab (ZERO_EXTEND);
5745 trunc_optab = init_convert_optab (TRUNCATE);
5746 sfix_optab = init_convert_optab (FIX);
5747 ufix_optab = init_convert_optab (UNSIGNED_FIX);
5748 sfixtrunc_optab = init_convert_optab (UNKNOWN);
5749 ufixtrunc_optab = init_convert_optab (UNKNOWN);
5750 sfloat_optab = init_convert_optab (FLOAT);
5751 ufloat_optab = init_convert_optab (UNSIGNED_FLOAT);
5752 lrint_optab = init_convert_optab (UNKNOWN);
5753 lround_optab = init_convert_optab (UNKNOWN);
5754 lfloor_optab = init_convert_optab (UNKNOWN);
5755 lceil_optab = init_convert_optab (UNKNOWN);
5757 for (i = 0; i < NUM_MACHINE_MODES; i++)
5759 movmem_optab[i] = CODE_FOR_nothing;
5760 cmpstr_optab[i] = CODE_FOR_nothing;
5761 cmpstrn_optab[i] = CODE_FOR_nothing;
5762 cmpmem_optab[i] = CODE_FOR_nothing;
5763 setmem_optab[i] = CODE_FOR_nothing;
5765 sync_add_optab[i] = CODE_FOR_nothing;
5766 sync_sub_optab[i] = CODE_FOR_nothing;
5767 sync_ior_optab[i] = CODE_FOR_nothing;
5768 sync_and_optab[i] = CODE_FOR_nothing;
5769 sync_xor_optab[i] = CODE_FOR_nothing;
5770 sync_nand_optab[i] = CODE_FOR_nothing;
5771 sync_old_add_optab[i] = CODE_FOR_nothing;
5772 sync_old_sub_optab[i] = CODE_FOR_nothing;
5773 sync_old_ior_optab[i] = CODE_FOR_nothing;
5774 sync_old_and_optab[i] = CODE_FOR_nothing;
5775 sync_old_xor_optab[i] = CODE_FOR_nothing;
5776 sync_old_nand_optab[i] = CODE_FOR_nothing;
5777 sync_new_add_optab[i] = CODE_FOR_nothing;
5778 sync_new_sub_optab[i] = CODE_FOR_nothing;
5779 sync_new_ior_optab[i] = CODE_FOR_nothing;
5780 sync_new_and_optab[i] = CODE_FOR_nothing;
5781 sync_new_xor_optab[i] = CODE_FOR_nothing;
5782 sync_new_nand_optab[i] = CODE_FOR_nothing;
5783 sync_compare_and_swap[i] = CODE_FOR_nothing;
5784 sync_compare_and_swap_cc[i] = CODE_FOR_nothing;
5785 sync_lock_test_and_set[i] = CODE_FOR_nothing;
5786 sync_lock_release[i] = CODE_FOR_nothing;
5788 reload_in_optab[i] = reload_out_optab[i] = CODE_FOR_nothing;
5791 /* Fill in the optabs with the insns we support. */
5794 /* The ffs function operates on `int'. Fall back on it if we do not
5795 have a libgcc2 function for that width. */
5796 int_mode = mode_for_size (INT_TYPE_SIZE, MODE_INT, 0);
5797 optab_handler (ffs_optab, int_mode)->libfunc = init_one_libfunc ("ffs");
5799 /* Initialize the optabs with the names of the library functions. */
5800 init_integral_libfuncs (add_optab, "add", '3');
5801 init_floating_libfuncs (add_optab, "add", '3');
5802 init_integral_libfuncs (addv_optab, "addv", '3');
5803 init_floating_libfuncs (addv_optab, "add", '3');
5804 init_integral_libfuncs (sub_optab, "sub", '3');
5805 init_floating_libfuncs (sub_optab, "sub", '3');
5806 init_integral_libfuncs (subv_optab, "subv", '3');
5807 init_floating_libfuncs (subv_optab, "sub", '3');
5808 init_integral_libfuncs (smul_optab, "mul", '3');
5809 init_floating_libfuncs (smul_optab, "mul", '3');
5810 init_integral_libfuncs (smulv_optab, "mulv", '3');
5811 init_floating_libfuncs (smulv_optab, "mul", '3');
5812 init_integral_libfuncs (sdiv_optab, "div", '3');
5813 init_floating_libfuncs (sdiv_optab, "div", '3');
5814 init_integral_libfuncs (sdivv_optab, "divv", '3');
5815 init_integral_libfuncs (udiv_optab, "udiv", '3');
5816 init_integral_libfuncs (sdivmod_optab, "divmod", '4');
5817 init_integral_libfuncs (udivmod_optab, "udivmod", '4');
5818 init_integral_libfuncs (smod_optab, "mod", '3');
5819 init_integral_libfuncs (umod_optab, "umod", '3');
5820 init_floating_libfuncs (ftrunc_optab, "ftrunc", '2');
5821 init_integral_libfuncs (and_optab, "and", '3');
5822 init_integral_libfuncs (ior_optab, "ior", '3');
5823 init_integral_libfuncs (xor_optab, "xor", '3');
5824 init_integral_libfuncs (ashl_optab, "ashl", '3');
5825 init_integral_libfuncs (ashr_optab, "ashr", '3');
5826 init_integral_libfuncs (lshr_optab, "lshr", '3');
5827 init_integral_libfuncs (smin_optab, "min", '3');
5828 init_floating_libfuncs (smin_optab, "min", '3');
5829 init_integral_libfuncs (smax_optab, "max", '3');
5830 init_floating_libfuncs (smax_optab, "max", '3');
5831 init_integral_libfuncs (umin_optab, "umin", '3');
5832 init_integral_libfuncs (umax_optab, "umax", '3');
5833 init_integral_libfuncs (neg_optab, "neg", '2');
5834 init_floating_libfuncs (neg_optab, "neg", '2');
5835 init_integral_libfuncs (negv_optab, "negv", '2');
5836 init_floating_libfuncs (negv_optab, "neg", '2');
5837 init_integral_libfuncs (one_cmpl_optab, "one_cmpl", '2');
5838 init_integral_libfuncs (ffs_optab, "ffs", '2');
5839 init_integral_libfuncs (clz_optab, "clz", '2');
5840 init_integral_libfuncs (ctz_optab, "ctz", '2');
5841 init_integral_libfuncs (popcount_optab, "popcount", '2');
5842 init_integral_libfuncs (parity_optab, "parity", '2');
5844 /* Comparison libcalls for integers MUST come in pairs,
5846 init_integral_libfuncs (cmp_optab, "cmp", '2');
5847 init_integral_libfuncs (ucmp_optab, "ucmp", '2');
5848 init_floating_libfuncs (cmp_optab, "cmp", '2');
5850 /* EQ etc are floating point only. */
5851 init_floating_libfuncs (eq_optab, "eq", '2');
5852 init_floating_libfuncs (ne_optab, "ne", '2');
5853 init_floating_libfuncs (gt_optab, "gt", '2');
5854 init_floating_libfuncs (ge_optab, "ge", '2');
5855 init_floating_libfuncs (lt_optab, "lt", '2');
5856 init_floating_libfuncs (le_optab, "le", '2');
5857 init_floating_libfuncs (unord_optab, "unord", '2');
5859 init_floating_libfuncs (powi_optab, "powi", '2');
5862 init_interclass_conv_libfuncs (sfloat_optab, "float",
5863 MODE_INT, MODE_FLOAT);
5864 init_interclass_conv_libfuncs (sfloat_optab, "float",
5865 MODE_INT, MODE_DECIMAL_FLOAT);
5866 init_interclass_conv_libfuncs (ufloat_optab, "floatun",
5867 MODE_INT, MODE_FLOAT);
5868 init_interclass_conv_libfuncs (ufloat_optab, "floatun",
5869 MODE_INT, MODE_DECIMAL_FLOAT);
5870 init_interclass_conv_libfuncs (sfix_optab, "fix",
5871 MODE_FLOAT, MODE_INT);
5872 init_interclass_conv_libfuncs (sfix_optab, "fix",
5873 MODE_DECIMAL_FLOAT, MODE_INT);
5874 init_interclass_conv_libfuncs (ufix_optab, "fixuns",
5875 MODE_FLOAT, MODE_INT);
5876 init_interclass_conv_libfuncs (ufix_optab, "fixuns",
5877 MODE_DECIMAL_FLOAT, MODE_INT);
5878 init_interclass_conv_libfuncs (ufloat_optab, "floatuns",
5879 MODE_INT, MODE_DECIMAL_FLOAT);
5880 init_interclass_conv_libfuncs (lrint_optab, "lrint",
5881 MODE_INT, MODE_FLOAT);
5882 init_interclass_conv_libfuncs (lround_optab, "lround",
5883 MODE_INT, MODE_FLOAT);
5884 init_interclass_conv_libfuncs (lfloor_optab, "lfloor",
5885 MODE_INT, MODE_FLOAT);
5886 init_interclass_conv_libfuncs (lceil_optab, "lceil",
5887 MODE_INT, MODE_FLOAT);
5889 /* sext_optab is also used for FLOAT_EXTEND. */
5890 init_intraclass_conv_libfuncs (sext_optab, "extend", MODE_FLOAT, true);
5891 init_intraclass_conv_libfuncs (sext_optab, "extend", MODE_DECIMAL_FLOAT, true);
5892 init_interclass_conv_libfuncs (sext_optab, "extend", MODE_FLOAT, MODE_DECIMAL_FLOAT);
5893 init_interclass_conv_libfuncs (sext_optab, "extend", MODE_DECIMAL_FLOAT, MODE_FLOAT);
5894 init_intraclass_conv_libfuncs (trunc_optab, "trunc", MODE_FLOAT, false);
5895 init_intraclass_conv_libfuncs (trunc_optab, "trunc", MODE_DECIMAL_FLOAT, false);
5896 init_interclass_conv_libfuncs (trunc_optab, "trunc", MODE_FLOAT, MODE_DECIMAL_FLOAT);
5897 init_interclass_conv_libfuncs (trunc_optab, "trunc", MODE_DECIMAL_FLOAT, MODE_FLOAT);
5899 /* Explicitly initialize the bswap libfuncs since we need them to be
5900 valid for things other than word_mode. */
5901 set_optab_libfunc (bswap_optab, SImode, "__bswapsi2");
5902 set_optab_libfunc (bswap_optab, DImode, "__bswapdi2");
5904 /* Use cabs for double complex abs, since systems generally have cabs.
5905 Don't define any libcall for float complex, so that cabs will be used. */
5906 if (complex_double_type_node)
5907 optab_handler (abs_optab, TYPE_MODE (complex_double_type_node))->libfunc
5908 = init_one_libfunc ("cabs");
5910 abort_libfunc = init_one_libfunc ("abort");
5911 memcpy_libfunc = init_one_libfunc ("memcpy");
5912 memmove_libfunc = init_one_libfunc ("memmove");
5913 memcmp_libfunc = init_one_libfunc ("memcmp");
5914 memset_libfunc = init_one_libfunc ("memset");
5915 setbits_libfunc = init_one_libfunc ("__setbits");
5917 #ifndef DONT_USE_BUILTIN_SETJMP
5918 setjmp_libfunc = init_one_libfunc ("__builtin_setjmp");
5919 longjmp_libfunc = init_one_libfunc ("__builtin_longjmp");
5921 setjmp_libfunc = init_one_libfunc ("setjmp");
5922 longjmp_libfunc = init_one_libfunc ("longjmp");
5924 unwind_sjlj_register_libfunc = init_one_libfunc ("_Unwind_SjLj_Register");
5925 unwind_sjlj_unregister_libfunc
5926 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5928 /* For function entry/exit instrumentation. */
5929 profile_function_entry_libfunc
5930 = init_one_libfunc ("__cyg_profile_func_enter");
5931 profile_function_exit_libfunc
5932 = init_one_libfunc ("__cyg_profile_func_exit");
5934 gcov_flush_libfunc = init_one_libfunc ("__gcov_flush");
5936 if (HAVE_conditional_trap)
5937 trap_rtx = gen_rtx_fmt_ee (EQ, VOIDmode, NULL_RTX, NULL_RTX);
5939 /* Allow the target to add more libcalls or rename some, etc. */
5940 targetm.init_libfuncs ();
5945 /* Print information about the current contents of the optabs on
5949 debug_optab_libfuncs (void)
5955 /* Dump the arithmetic optabs. */
5956 for (i = 0; i != (int) OTI_MAX; i++)
5957 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5960 struct optab_handlers *h;
5963 h = optab_handler (o, j);
5966 gcc_assert (GET_CODE (h->libfunc) == SYMBOL_REF);
5967 fprintf (stderr, "%s\t%s:\t%s\n",
5968 GET_RTX_NAME (o->code),
5970 XSTR (h->libfunc, 0));
5974 /* Dump the conversion optabs. */
5975 for (i = 0; i < (int) COI_MAX; ++i)
5976 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5977 for (k = 0; k < NUM_MACHINE_MODES; ++k)
5980 struct optab_handlers *h;
5982 o = &convert_optab_table[i];
5983 h = convert_optab_handler(o, j, k);
5986 gcc_assert (GET_CODE (h->libfunc) == SYMBOL_REF);
5987 fprintf (stderr, "%s\t%s\t%s:\t%s\n",
5988 GET_RTX_NAME (o->code),
5991 XSTR (h->libfunc, 0));
5999 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
6000 CODE. Return 0 on failure. */
6003 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED, rtx op1,
6004 rtx op2 ATTRIBUTE_UNUSED, rtx tcode ATTRIBUTE_UNUSED)
6006 enum machine_mode mode = GET_MODE (op1);
6007 enum insn_code icode;
6010 if (!HAVE_conditional_trap)
6013 if (mode == VOIDmode)
6016 icode = optab_handler (cmp_optab, mode)->insn_code;
6017 if (icode == CODE_FOR_nothing)
6021 op1 = prepare_operand (icode, op1, 0, mode, mode, 0);
6022 op2 = prepare_operand (icode, op2, 1, mode, mode, 0);
6028 emit_insn (GEN_FCN (icode) (op1, op2));
6030 PUT_CODE (trap_rtx, code);
6031 gcc_assert (HAVE_conditional_trap);
6032 insn = gen_conditional_trap (trap_rtx, tcode);
6036 insn = get_insns ();
6043 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
6044 or unsigned operation code. */
6046 static enum rtx_code
6047 get_rtx_code (enum tree_code tcode, bool unsignedp)
6059 code = unsignedp ? LTU : LT;
6062 code = unsignedp ? LEU : LE;
6065 code = unsignedp ? GTU : GT;
6068 code = unsignedp ? GEU : GE;
6071 case UNORDERED_EXPR:
6102 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
6103 unsigned operators. Do not generate compare instruction. */
6106 vector_compare_rtx (tree cond, bool unsignedp, enum insn_code icode)
6108 enum rtx_code rcode;
6110 rtx rtx_op0, rtx_op1;
6112 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
6113 ensures that condition is a relational operation. */
6114 gcc_assert (COMPARISON_CLASS_P (cond));
6116 rcode = get_rtx_code (TREE_CODE (cond), unsignedp);
6117 t_op0 = TREE_OPERAND (cond, 0);
6118 t_op1 = TREE_OPERAND (cond, 1);
6120 /* Expand operands. */
6121 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)),
6123 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)),
6126 if (!insn_data[icode].operand[4].predicate (rtx_op0, GET_MODE (rtx_op0))
6127 && GET_MODE (rtx_op0) != VOIDmode)
6128 rtx_op0 = force_reg (GET_MODE (rtx_op0), rtx_op0);
6130 if (!insn_data[icode].operand[5].predicate (rtx_op1, GET_MODE (rtx_op1))
6131 && GET_MODE (rtx_op1) != VOIDmode)
6132 rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1);
6134 return gen_rtx_fmt_ee (rcode, VOIDmode, rtx_op0, rtx_op1);
6137 /* Return insn code for VEC_COND_EXPR EXPR. */
6139 static inline enum insn_code
6140 get_vcond_icode (tree expr, enum machine_mode mode)
6142 enum insn_code icode = CODE_FOR_nothing;
6144 if (TYPE_UNSIGNED (TREE_TYPE (expr)))
6145 icode = vcondu_gen_code[mode];
6147 icode = vcond_gen_code[mode];
6151 /* Return TRUE iff, appropriate vector insns are available
6152 for vector cond expr expr in VMODE mode. */
6155 expand_vec_cond_expr_p (tree expr, enum machine_mode vmode)
6157 if (get_vcond_icode (expr, vmode) == CODE_FOR_nothing)
6162 /* Generate insns for VEC_COND_EXPR. */
6165 expand_vec_cond_expr (tree vec_cond_expr, rtx target)
6167 enum insn_code icode;
6168 rtx comparison, rtx_op1, rtx_op2, cc_op0, cc_op1;
6169 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_cond_expr));
6170 bool unsignedp = TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr));
6172 icode = get_vcond_icode (vec_cond_expr, mode);
6173 if (icode == CODE_FOR_nothing)
6176 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6177 target = gen_reg_rtx (mode);
6179 /* Get comparison rtx. First expand both cond expr operands. */
6180 comparison = vector_compare_rtx (TREE_OPERAND (vec_cond_expr, 0),
6182 cc_op0 = XEXP (comparison, 0);
6183 cc_op1 = XEXP (comparison, 1);
6184 /* Expand both operands and force them in reg, if required. */
6185 rtx_op1 = expand_normal (TREE_OPERAND (vec_cond_expr, 1));
6186 if (!insn_data[icode].operand[1].predicate (rtx_op1, mode)
6187 && mode != VOIDmode)
6188 rtx_op1 = force_reg (mode, rtx_op1);
6190 rtx_op2 = expand_normal (TREE_OPERAND (vec_cond_expr, 2));
6191 if (!insn_data[icode].operand[2].predicate (rtx_op2, mode)
6192 && mode != VOIDmode)
6193 rtx_op2 = force_reg (mode, rtx_op2);
6195 /* Emit instruction! */
6196 emit_insn (GEN_FCN (icode) (target, rtx_op1, rtx_op2,
6197 comparison, cc_op0, cc_op1));
6203 /* This is an internal subroutine of the other compare_and_swap expanders.
6204 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
6205 operation. TARGET is an optional place to store the value result of
6206 the operation. ICODE is the particular instruction to expand. Return
6207 the result of the operation. */
6210 expand_val_compare_and_swap_1 (rtx mem, rtx old_val, rtx new_val,
6211 rtx target, enum insn_code icode)
6213 enum machine_mode mode = GET_MODE (mem);
6216 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6217 target = gen_reg_rtx (mode);
6219 if (GET_MODE (old_val) != VOIDmode && GET_MODE (old_val) != mode)
6220 old_val = convert_modes (mode, GET_MODE (old_val), old_val, 1);
6221 if (!insn_data[icode].operand[2].predicate (old_val, mode))
6222 old_val = force_reg (mode, old_val);
6224 if (GET_MODE (new_val) != VOIDmode && GET_MODE (new_val) != mode)
6225 new_val = convert_modes (mode, GET_MODE (new_val), new_val, 1);
6226 if (!insn_data[icode].operand[3].predicate (new_val, mode))
6227 new_val = force_reg (mode, new_val);
6229 insn = GEN_FCN (icode) (target, mem, old_val, new_val);
6230 if (insn == NULL_RTX)
6237 /* Expand a compare-and-swap operation and return its value. */
6240 expand_val_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
6242 enum machine_mode mode = GET_MODE (mem);
6243 enum insn_code icode = sync_compare_and_swap[mode];
6245 if (icode == CODE_FOR_nothing)
6248 return expand_val_compare_and_swap_1 (mem, old_val, new_val, target, icode);
6251 /* Expand a compare-and-swap operation and store true into the result if
6252 the operation was successful and false otherwise. Return the result.
6253 Unlike other routines, TARGET is not optional. */
6256 expand_bool_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
6258 enum machine_mode mode = GET_MODE (mem);
6259 enum insn_code icode;
6260 rtx subtarget, label0, label1;
6262 /* If the target supports a compare-and-swap pattern that simultaneously
6263 sets some flag for success, then use it. Otherwise use the regular
6264 compare-and-swap and follow that immediately with a compare insn. */
6265 icode = sync_compare_and_swap_cc[mode];
6269 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
6271 if (subtarget != NULL_RTX)
6275 case CODE_FOR_nothing:
6276 icode = sync_compare_and_swap[mode];
6277 if (icode == CODE_FOR_nothing)
6280 /* Ensure that if old_val == mem, that we're not comparing
6281 against an old value. */
6282 if (MEM_P (old_val))
6283 old_val = force_reg (mode, old_val);
6285 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
6287 if (subtarget == NULL_RTX)
6290 emit_cmp_insn (subtarget, old_val, EQ, const0_rtx, mode, true);
6293 /* If the target has a sane STORE_FLAG_VALUE, then go ahead and use a
6294 setcc instruction from the beginning. We don't work too hard here,
6295 but it's nice to not be stupid about initial code gen either. */
6296 if (STORE_FLAG_VALUE == 1)
6298 icode = setcc_gen_code[EQ];
6299 if (icode != CODE_FOR_nothing)
6301 enum machine_mode cmode = insn_data[icode].operand[0].mode;
6305 if (!insn_data[icode].operand[0].predicate (target, cmode))
6306 subtarget = gen_reg_rtx (cmode);
6308 insn = GEN_FCN (icode) (subtarget);
6312 if (GET_MODE (target) != GET_MODE (subtarget))
6314 convert_move (target, subtarget, 1);
6322 /* Without an appropriate setcc instruction, use a set of branches to
6323 get 1 and 0 stored into target. Presumably if the target has a
6324 STORE_FLAG_VALUE that isn't 1, then this will get cleaned up by ifcvt. */
6326 label0 = gen_label_rtx ();
6327 label1 = gen_label_rtx ();
6329 emit_jump_insn (bcc_gen_fctn[EQ] (label0));
6330 emit_move_insn (target, const0_rtx);
6331 emit_jump_insn (gen_jump (label1));
6333 emit_label (label0);
6334 emit_move_insn (target, const1_rtx);
6335 emit_label (label1);
6340 /* This is a helper function for the other atomic operations. This function
6341 emits a loop that contains SEQ that iterates until a compare-and-swap
6342 operation at the end succeeds. MEM is the memory to be modified. SEQ is
6343 a set of instructions that takes a value from OLD_REG as an input and
6344 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
6345 set to the current contents of MEM. After SEQ, a compare-and-swap will
6346 attempt to update MEM with NEW_REG. The function returns true when the
6347 loop was generated successfully. */
6350 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
6352 enum machine_mode mode = GET_MODE (mem);
6353 enum insn_code icode;
6354 rtx label, cmp_reg, subtarget;
6356 /* The loop we want to generate looks like
6362 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
6363 if (cmp_reg != old_reg)
6366 Note that we only do the plain load from memory once. Subsequent
6367 iterations use the value loaded by the compare-and-swap pattern. */
6369 label = gen_label_rtx ();
6370 cmp_reg = gen_reg_rtx (mode);
6372 emit_move_insn (cmp_reg, mem);
6374 emit_move_insn (old_reg, cmp_reg);
6378 /* If the target supports a compare-and-swap pattern that simultaneously
6379 sets some flag for success, then use it. Otherwise use the regular
6380 compare-and-swap and follow that immediately with a compare insn. */
6381 icode = sync_compare_and_swap_cc[mode];
6385 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
6387 if (subtarget != NULL_RTX)
6389 gcc_assert (subtarget == cmp_reg);
6394 case CODE_FOR_nothing:
6395 icode = sync_compare_and_swap[mode];
6396 if (icode == CODE_FOR_nothing)
6399 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
6401 if (subtarget == NULL_RTX)
6403 if (subtarget != cmp_reg)
6404 emit_move_insn (cmp_reg, subtarget);
6406 emit_cmp_insn (cmp_reg, old_reg, EQ, const0_rtx, mode, true);
6409 /* ??? Mark this jump predicted not taken? */
6410 emit_jump_insn (bcc_gen_fctn[NE] (label));
6415 /* This function generates the atomic operation MEM CODE= VAL. In this
6416 case, we do not care about any resulting value. Returns NULL if we
6417 cannot generate the operation. */
6420 expand_sync_operation (rtx mem, rtx val, enum rtx_code code)
6422 enum machine_mode mode = GET_MODE (mem);
6423 enum insn_code icode;
6426 /* Look to see if the target supports the operation directly. */
6430 icode = sync_add_optab[mode];
6433 icode = sync_ior_optab[mode];
6436 icode = sync_xor_optab[mode];
6439 icode = sync_and_optab[mode];
6442 icode = sync_nand_optab[mode];
6446 icode = sync_sub_optab[mode];
6447 if (icode == CODE_FOR_nothing)
6449 icode = sync_add_optab[mode];
6450 if (icode != CODE_FOR_nothing)
6452 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
6462 /* Generate the direct operation, if present. */
6463 if (icode != CODE_FOR_nothing)
6465 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6466 val = convert_modes (mode, GET_MODE (val), val, 1);
6467 if (!insn_data[icode].operand[1].predicate (val, mode))
6468 val = force_reg (mode, val);
6470 insn = GEN_FCN (icode) (mem, val);
6478 /* Failing that, generate a compare-and-swap loop in which we perform the
6479 operation with normal arithmetic instructions. */
6480 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6482 rtx t0 = gen_reg_rtx (mode), t1;
6489 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
6492 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
6493 true, OPTAB_LIB_WIDEN);
6495 insn = get_insns ();
6498 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
6505 /* This function generates the atomic operation MEM CODE= VAL. In this
6506 case, we do care about the resulting value: if AFTER is true then
6507 return the value MEM holds after the operation, if AFTER is false
6508 then return the value MEM holds before the operation. TARGET is an
6509 optional place for the result value to be stored. */
6512 expand_sync_fetch_operation (rtx mem, rtx val, enum rtx_code code,
6513 bool after, rtx target)
6515 enum machine_mode mode = GET_MODE (mem);
6516 enum insn_code old_code, new_code, icode;
6520 /* Look to see if the target supports the operation directly. */
6524 old_code = sync_old_add_optab[mode];
6525 new_code = sync_new_add_optab[mode];
6528 old_code = sync_old_ior_optab[mode];
6529 new_code = sync_new_ior_optab[mode];
6532 old_code = sync_old_xor_optab[mode];
6533 new_code = sync_new_xor_optab[mode];
6536 old_code = sync_old_and_optab[mode];
6537 new_code = sync_new_and_optab[mode];
6540 old_code = sync_old_nand_optab[mode];
6541 new_code = sync_new_nand_optab[mode];
6545 old_code = sync_old_sub_optab[mode];
6546 new_code = sync_new_sub_optab[mode];
6547 if (old_code == CODE_FOR_nothing && new_code == CODE_FOR_nothing)
6549 old_code = sync_old_add_optab[mode];
6550 new_code = sync_new_add_optab[mode];
6551 if (old_code != CODE_FOR_nothing || new_code != CODE_FOR_nothing)
6553 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
6563 /* If the target does supports the proper new/old operation, great. But
6564 if we only support the opposite old/new operation, check to see if we
6565 can compensate. In the case in which the old value is supported, then
6566 we can always perform the operation again with normal arithmetic. In
6567 the case in which the new value is supported, then we can only handle
6568 this in the case the operation is reversible. */
6573 if (icode == CODE_FOR_nothing)
6576 if (icode != CODE_FOR_nothing)
6583 if (icode == CODE_FOR_nothing
6584 && (code == PLUS || code == MINUS || code == XOR))
6587 if (icode != CODE_FOR_nothing)
6592 /* If we found something supported, great. */
6593 if (icode != CODE_FOR_nothing)
6595 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6596 target = gen_reg_rtx (mode);
6598 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6599 val = convert_modes (mode, GET_MODE (val), val, 1);
6600 if (!insn_data[icode].operand[2].predicate (val, mode))
6601 val = force_reg (mode, val);
6603 insn = GEN_FCN (icode) (target, mem, val);
6608 /* If we need to compensate for using an operation with the
6609 wrong return value, do so now. */
6616 else if (code == MINUS)
6621 target = expand_simple_unop (mode, NOT, target, NULL_RTX, true);
6622 target = expand_simple_binop (mode, code, target, val, NULL_RTX,
6623 true, OPTAB_LIB_WIDEN);
6630 /* Failing that, generate a compare-and-swap loop in which we perform the
6631 operation with normal arithmetic instructions. */
6632 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6634 rtx t0 = gen_reg_rtx (mode), t1;
6636 if (!target || !register_operand (target, mode))
6637 target = gen_reg_rtx (mode);
6642 emit_move_insn (target, t0);
6646 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
6649 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
6650 true, OPTAB_LIB_WIDEN);
6652 emit_move_insn (target, t1);
6654 insn = get_insns ();
6657 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
6664 /* This function expands a test-and-set operation. Ideally we atomically
6665 store VAL in MEM and return the previous value in MEM. Some targets
6666 may not support this operation and only support VAL with the constant 1;
6667 in this case while the return value will be 0/1, but the exact value
6668 stored in MEM is target defined. TARGET is an option place to stick
6669 the return value. */
6672 expand_sync_lock_test_and_set (rtx mem, rtx val, rtx target)
6674 enum machine_mode mode = GET_MODE (mem);
6675 enum insn_code icode;
6678 /* If the target supports the test-and-set directly, great. */
6679 icode = sync_lock_test_and_set[mode];
6680 if (icode != CODE_FOR_nothing)
6682 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6683 target = gen_reg_rtx (mode);
6685 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6686 val = convert_modes (mode, GET_MODE (val), val, 1);
6687 if (!insn_data[icode].operand[2].predicate (val, mode))
6688 val = force_reg (mode, val);
6690 insn = GEN_FCN (icode) (target, mem, val);
6698 /* Otherwise, use a compare-and-swap loop for the exchange. */
6699 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6701 if (!target || !register_operand (target, mode))
6702 target = gen_reg_rtx (mode);
6703 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6704 val = convert_modes (mode, GET_MODE (val), val, 1);
6705 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
6712 #include "gt-optabs.h"