1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
45 #include "basic-block.h"
48 /* Each optab contains info on how this target machine
49 can perform a particular operation
50 for all sizes and kinds of operands.
52 The operation to be performed is often specified
53 by passing one of these optabs as an argument.
55 See expr.h for documentation of these optabs. */
57 optab optab_table[OTI_MAX];
59 rtx libfunc_table[LTI_MAX];
61 /* Tables of patterns for converting one mode to another. */
62 convert_optab convert_optab_table[COI_MAX];
64 /* Contains the optab used for each rtx code. */
65 optab code_to_optab[NUM_RTX_CODE + 1];
67 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
68 gives the gen_function to make a branch to test that condition. */
70 rtxfun bcc_gen_fctn[NUM_RTX_CODE];
72 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
73 gives the insn code to make a store-condition insn
74 to test that condition. */
76 enum insn_code setcc_gen_code[NUM_RTX_CODE];
78 #ifdef HAVE_conditional_move
79 /* Indexed by the machine mode, gives the insn code to make a conditional
80 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
81 setcc_gen_code to cut down on the number of named patterns. Consider a day
82 when a lot more rtx codes are conditional (eg: for the ARM). */
84 enum insn_code movcc_gen_code[NUM_MACHINE_MODES];
87 /* Indexed by the machine mode, gives the insn code for vector conditional
90 enum insn_code vcond_gen_code[NUM_MACHINE_MODES];
91 enum insn_code vcondu_gen_code[NUM_MACHINE_MODES];
93 /* The insn generating function can not take an rtx_code argument.
94 TRAP_RTX is used as an rtx argument. Its code is replaced with
95 the code to be used in the trap insn and all other fields are ignored. */
96 static GTY(()) rtx trap_rtx;
98 static int add_equal_note (rtx, rtx, enum rtx_code, rtx, rtx);
99 static rtx widen_operand (rtx, enum machine_mode, enum machine_mode, int,
101 static void prepare_cmp_insn (rtx *, rtx *, enum rtx_code *, rtx,
102 enum machine_mode *, int *,
103 enum can_compare_purpose);
104 static enum insn_code can_fix_p (enum machine_mode, enum machine_mode, int,
106 static enum insn_code can_float_p (enum machine_mode, enum machine_mode, int);
107 static optab new_optab (void);
108 static convert_optab new_convert_optab (void);
109 static inline optab init_optab (enum rtx_code);
110 static inline optab init_optabv (enum rtx_code);
111 static inline convert_optab init_convert_optab (enum rtx_code);
112 static void init_libfuncs (optab, int, int, const char *, int);
113 static void init_integral_libfuncs (optab, const char *, int);
114 static void init_floating_libfuncs (optab, const char *, int);
115 static void init_interclass_conv_libfuncs (convert_optab, const char *,
116 enum mode_class, enum mode_class);
117 static void init_intraclass_conv_libfuncs (convert_optab, const char *,
118 enum mode_class, bool);
119 static void emit_cmp_and_jump_insn_1 (rtx, rtx, enum machine_mode,
120 enum rtx_code, int, rtx);
121 static void prepare_float_lib_cmp (rtx *, rtx *, enum rtx_code *,
122 enum machine_mode *, int *);
123 static rtx widen_clz (enum machine_mode, rtx, rtx);
124 static rtx expand_parity (enum machine_mode, rtx, rtx);
125 static rtx expand_ffs (enum machine_mode, rtx, rtx);
126 static rtx expand_ctz (enum machine_mode, rtx, rtx);
127 static enum rtx_code get_rtx_code (enum tree_code, bool);
128 static rtx vector_compare_rtx (tree, bool, enum insn_code);
130 /* Current libcall id. It doesn't matter what these are, as long
131 as they are unique to each libcall that is emitted. */
132 static HOST_WIDE_INT libcall_id = 0;
134 #ifndef HAVE_conditional_trap
135 #define HAVE_conditional_trap 0
136 #define gen_conditional_trap(a,b) (gcc_unreachable (), NULL_RTX)
139 /* Prefixes for the current version of decimal floating point (BID vs. DPD) */
140 #if ENABLE_DECIMAL_BID_FORMAT
141 #define DECIMAL_PREFIX "bid_"
143 #define DECIMAL_PREFIX "dpd_"
147 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
148 the result of operation CODE applied to OP0 (and OP1 if it is a binary
151 If the last insn does not set TARGET, don't do anything, but return 1.
153 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
154 don't add the REG_EQUAL note but return 0. Our caller can then try
155 again, ensuring that TARGET is not one of the operands. */
158 add_equal_note (rtx insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
160 rtx last_insn, insn, set;
163 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
165 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
166 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
167 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
168 && GET_RTX_CLASS (code) != RTX_COMPARE
169 && GET_RTX_CLASS (code) != RTX_UNARY)
172 if (GET_CODE (target) == ZERO_EXTRACT)
175 for (last_insn = insns;
176 NEXT_INSN (last_insn) != NULL_RTX;
177 last_insn = NEXT_INSN (last_insn))
180 set = single_set (last_insn);
184 if (! rtx_equal_p (SET_DEST (set), target)
185 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
186 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
187 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
190 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
191 besides the last insn. */
192 if (reg_overlap_mentioned_p (target, op0)
193 || (op1 && reg_overlap_mentioned_p (target, op1)))
195 insn = PREV_INSN (last_insn);
196 while (insn != NULL_RTX)
198 if (reg_set_p (target, insn))
201 insn = PREV_INSN (insn);
205 if (GET_RTX_CLASS (code) == RTX_UNARY)
206 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
208 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
210 set_unique_reg_note (last_insn, REG_EQUAL, note);
215 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
216 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
217 not actually do a sign-extend or zero-extend, but can leave the
218 higher-order bits of the result rtx undefined, for example, in the case
219 of logical operations, but not right shifts. */
222 widen_operand (rtx op, enum machine_mode mode, enum machine_mode oldmode,
223 int unsignedp, int no_extend)
227 /* If we don't have to extend and this is a constant, return it. */
228 if (no_extend && GET_MODE (op) == VOIDmode)
231 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
232 extend since it will be more efficient to do so unless the signedness of
233 a promoted object differs from our extension. */
235 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
236 && SUBREG_PROMOTED_UNSIGNED_P (op) == unsignedp))
237 return convert_modes (mode, oldmode, op, unsignedp);
239 /* If MODE is no wider than a single word, we return a paradoxical
241 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
242 return gen_rtx_SUBREG (mode, force_reg (GET_MODE (op), op), 0);
244 /* Otherwise, get an object of MODE, clobber it, and set the low-order
247 result = gen_reg_rtx (mode);
248 emit_insn (gen_rtx_CLOBBER (VOIDmode, result));
249 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
253 /* Return the optab used for computing the operation given by
254 the tree code, CODE. This function is not always usable (for
255 example, it cannot give complete results for multiplication
256 or division) but probably ought to be relied on more widely
257 throughout the expander. */
259 optab_for_tree_code (enum tree_code code, const_tree type)
271 return one_cmpl_optab;
280 return TYPE_UNSIGNED (type) ? umod_optab : smod_optab;
288 return TYPE_UNSIGNED (type) ? udiv_optab : sdiv_optab;
294 return TYPE_UNSIGNED (type) ? lshr_optab : ashr_optab;
303 return TYPE_UNSIGNED (type) ? umax_optab : smax_optab;
306 return TYPE_UNSIGNED (type) ? umin_optab : smin_optab;
308 case REALIGN_LOAD_EXPR:
309 return vec_realign_load_optab;
312 return TYPE_UNSIGNED (type) ? usum_widen_optab : ssum_widen_optab;
315 return TYPE_UNSIGNED (type) ? udot_prod_optab : sdot_prod_optab;
318 return TYPE_UNSIGNED (type) ? reduc_umax_optab : reduc_smax_optab;
321 return TYPE_UNSIGNED (type) ? reduc_umin_optab : reduc_smin_optab;
323 case REDUC_PLUS_EXPR:
324 return TYPE_UNSIGNED (type) ? reduc_uplus_optab : reduc_splus_optab;
326 case VEC_LSHIFT_EXPR:
327 return vec_shl_optab;
329 case VEC_RSHIFT_EXPR:
330 return vec_shr_optab;
332 case VEC_WIDEN_MULT_HI_EXPR:
333 return TYPE_UNSIGNED (type) ?
334 vec_widen_umult_hi_optab : vec_widen_smult_hi_optab;
336 case VEC_WIDEN_MULT_LO_EXPR:
337 return TYPE_UNSIGNED (type) ?
338 vec_widen_umult_lo_optab : vec_widen_smult_lo_optab;
340 case VEC_UNPACK_HI_EXPR:
341 return TYPE_UNSIGNED (type) ?
342 vec_unpacku_hi_optab : vec_unpacks_hi_optab;
344 case VEC_UNPACK_LO_EXPR:
345 return TYPE_UNSIGNED (type) ?
346 vec_unpacku_lo_optab : vec_unpacks_lo_optab;
348 case VEC_UNPACK_FLOAT_HI_EXPR:
349 /* The signedness is determined from input operand. */
350 return TYPE_UNSIGNED (type) ?
351 vec_unpacku_float_hi_optab : vec_unpacks_float_hi_optab;
353 case VEC_UNPACK_FLOAT_LO_EXPR:
354 /* The signedness is determined from input operand. */
355 return TYPE_UNSIGNED (type) ?
356 vec_unpacku_float_lo_optab : vec_unpacks_float_lo_optab;
358 case VEC_PACK_TRUNC_EXPR:
359 return vec_pack_trunc_optab;
361 case VEC_PACK_SAT_EXPR:
362 return TYPE_UNSIGNED (type) ? vec_pack_usat_optab : vec_pack_ssat_optab;
364 case VEC_PACK_FIX_TRUNC_EXPR:
365 /* The signedness is determined from output operand. */
366 return TYPE_UNSIGNED (type) ?
367 vec_pack_ufix_trunc_optab : vec_pack_sfix_trunc_optab;
373 trapv = INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type);
376 case POINTER_PLUS_EXPR:
378 return trapv ? addv_optab : add_optab;
381 return trapv ? subv_optab : sub_optab;
384 return trapv ? smulv_optab : smul_optab;
387 return trapv ? negv_optab : neg_optab;
390 return trapv ? absv_optab : abs_optab;
392 case VEC_EXTRACT_EVEN_EXPR:
393 return vec_extract_even_optab;
395 case VEC_EXTRACT_ODD_EXPR:
396 return vec_extract_odd_optab;
398 case VEC_INTERLEAVE_HIGH_EXPR:
399 return vec_interleave_high_optab;
401 case VEC_INTERLEAVE_LOW_EXPR:
402 return vec_interleave_low_optab;
410 /* Expand vector widening operations.
412 There are two different classes of operations handled here:
413 1) Operations whose result is wider than all the arguments to the operation.
414 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
415 In this case OP0 and optionally OP1 would be initialized,
416 but WIDE_OP wouldn't (not relevant for this case).
417 2) Operations whose result is of the same size as the last argument to the
418 operation, but wider than all the other arguments to the operation.
419 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
420 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
422 E.g, when called to expand the following operations, this is how
423 the arguments will be initialized:
425 widening-sum 2 oprnd0 - oprnd1
426 widening-dot-product 3 oprnd0 oprnd1 oprnd2
427 widening-mult 2 oprnd0 oprnd1 -
428 type-promotion (vec-unpack) 1 oprnd0 - - */
431 expand_widen_pattern_expr (tree exp, rtx op0, rtx op1, rtx wide_op, rtx target,
434 tree oprnd0, oprnd1, oprnd2;
435 enum machine_mode wmode = 0, tmode0, tmode1 = 0;
436 optab widen_pattern_optab;
438 enum machine_mode xmode0, xmode1 = 0, wxmode = 0;
441 rtx xop0, xop1, wxop;
442 int nops = TREE_OPERAND_LENGTH (exp);
444 oprnd0 = TREE_OPERAND (exp, 0);
445 tmode0 = TYPE_MODE (TREE_TYPE (oprnd0));
446 widen_pattern_optab =
447 optab_for_tree_code (TREE_CODE (exp), TREE_TYPE (oprnd0));
448 icode = (int) optab_handler (widen_pattern_optab, tmode0)->insn_code;
449 gcc_assert (icode != CODE_FOR_nothing);
450 xmode0 = insn_data[icode].operand[1].mode;
454 oprnd1 = TREE_OPERAND (exp, 1);
455 tmode1 = TYPE_MODE (TREE_TYPE (oprnd1));
456 xmode1 = insn_data[icode].operand[2].mode;
459 /* The last operand is of a wider mode than the rest of the operands. */
467 gcc_assert (tmode1 == tmode0);
469 oprnd2 = TREE_OPERAND (exp, 2);
470 wmode = TYPE_MODE (TREE_TYPE (oprnd2));
471 wxmode = insn_data[icode].operand[3].mode;
475 wmode = wxmode = insn_data[icode].operand[0].mode;
478 || ! (*insn_data[icode].operand[0].predicate) (target, wmode))
479 temp = gen_reg_rtx (wmode);
487 /* In case the insn wants input operands in modes different from
488 those of the actual operands, convert the operands. It would
489 seem that we don't need to convert CONST_INTs, but we do, so
490 that they're properly zero-extended, sign-extended or truncated
493 if (GET_MODE (op0) != xmode0 && xmode0 != VOIDmode)
494 xop0 = convert_modes (xmode0,
495 GET_MODE (op0) != VOIDmode
501 if (GET_MODE (op1) != xmode1 && xmode1 != VOIDmode)
502 xop1 = convert_modes (xmode1,
503 GET_MODE (op1) != VOIDmode
509 if (GET_MODE (wide_op) != wxmode && wxmode != VOIDmode)
510 wxop = convert_modes (wxmode,
511 GET_MODE (wide_op) != VOIDmode
516 /* Now, if insn's predicates don't allow our operands, put them into
519 if (! (*insn_data[icode].operand[1].predicate) (xop0, xmode0)
520 && xmode0 != VOIDmode)
521 xop0 = copy_to_mode_reg (xmode0, xop0);
525 if (! (*insn_data[icode].operand[2].predicate) (xop1, xmode1)
526 && xmode1 != VOIDmode)
527 xop1 = copy_to_mode_reg (xmode1, xop1);
531 if (! (*insn_data[icode].operand[3].predicate) (wxop, wxmode)
532 && wxmode != VOIDmode)
533 wxop = copy_to_mode_reg (wxmode, wxop);
535 pat = GEN_FCN (icode) (temp, xop0, xop1, wxop);
538 pat = GEN_FCN (icode) (temp, xop0, xop1);
544 if (! (*insn_data[icode].operand[2].predicate) (wxop, wxmode)
545 && wxmode != VOIDmode)
546 wxop = copy_to_mode_reg (wxmode, wxop);
548 pat = GEN_FCN (icode) (temp, xop0, wxop);
551 pat = GEN_FCN (icode) (temp, xop0);
558 /* Generate code to perform an operation specified by TERNARY_OPTAB
559 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
561 UNSIGNEDP is for the case where we have to widen the operands
562 to perform the operation. It says to use zero-extension.
564 If TARGET is nonzero, the value
565 is generated there, if it is convenient to do so.
566 In all cases an rtx is returned for the locus of the value;
567 this may or may not be TARGET. */
570 expand_ternary_op (enum machine_mode mode, optab ternary_optab, rtx op0,
571 rtx op1, rtx op2, rtx target, int unsignedp)
573 int icode = (int) optab_handler (ternary_optab, mode)->insn_code;
574 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
575 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
576 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
579 rtx xop0 = op0, xop1 = op1, xop2 = op2;
581 gcc_assert (optab_handler (ternary_optab, mode)->insn_code
582 != CODE_FOR_nothing);
584 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
585 temp = gen_reg_rtx (mode);
589 /* In case the insn wants input operands in modes different from
590 those of the actual operands, convert the operands. It would
591 seem that we don't need to convert CONST_INTs, but we do, so
592 that they're properly zero-extended, sign-extended or truncated
595 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
596 xop0 = convert_modes (mode0,
597 GET_MODE (op0) != VOIDmode
602 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
603 xop1 = convert_modes (mode1,
604 GET_MODE (op1) != VOIDmode
609 if (GET_MODE (op2) != mode2 && mode2 != VOIDmode)
610 xop2 = convert_modes (mode2,
611 GET_MODE (op2) != VOIDmode
616 /* Now, if insn's predicates don't allow our operands, put them into
619 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
620 && mode0 != VOIDmode)
621 xop0 = copy_to_mode_reg (mode0, xop0);
623 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
624 && mode1 != VOIDmode)
625 xop1 = copy_to_mode_reg (mode1, xop1);
627 if (!insn_data[icode].operand[3].predicate (xop2, mode2)
628 && mode2 != VOIDmode)
629 xop2 = copy_to_mode_reg (mode2, xop2);
631 pat = GEN_FCN (icode) (temp, xop0, xop1, xop2);
638 /* Like expand_binop, but return a constant rtx if the result can be
639 calculated at compile time. The arguments and return value are
640 otherwise the same as for expand_binop. */
643 simplify_expand_binop (enum machine_mode mode, optab binoptab,
644 rtx op0, rtx op1, rtx target, int unsignedp,
645 enum optab_methods methods)
647 if (CONSTANT_P (op0) && CONSTANT_P (op1))
649 rtx x = simplify_binary_operation (binoptab->code, mode, op0, op1);
655 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
658 /* Like simplify_expand_binop, but always put the result in TARGET.
659 Return true if the expansion succeeded. */
662 force_expand_binop (enum machine_mode mode, optab binoptab,
663 rtx op0, rtx op1, rtx target, int unsignedp,
664 enum optab_methods methods)
666 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
667 target, unsignedp, methods);
671 emit_move_insn (target, x);
675 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
678 expand_vec_shift_expr (tree vec_shift_expr, rtx target)
680 enum insn_code icode;
681 rtx rtx_op1, rtx_op2;
682 enum machine_mode mode1;
683 enum machine_mode mode2;
684 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_shift_expr));
685 tree vec_oprnd = TREE_OPERAND (vec_shift_expr, 0);
686 tree shift_oprnd = TREE_OPERAND (vec_shift_expr, 1);
690 switch (TREE_CODE (vec_shift_expr))
692 case VEC_RSHIFT_EXPR:
693 shift_optab = vec_shr_optab;
695 case VEC_LSHIFT_EXPR:
696 shift_optab = vec_shl_optab;
702 icode = (int) optab_handler (shift_optab, mode)->insn_code;
703 gcc_assert (icode != CODE_FOR_nothing);
705 mode1 = insn_data[icode].operand[1].mode;
706 mode2 = insn_data[icode].operand[2].mode;
708 rtx_op1 = expand_normal (vec_oprnd);
709 if (!(*insn_data[icode].operand[1].predicate) (rtx_op1, mode1)
710 && mode1 != VOIDmode)
711 rtx_op1 = force_reg (mode1, rtx_op1);
713 rtx_op2 = expand_normal (shift_oprnd);
714 if (!(*insn_data[icode].operand[2].predicate) (rtx_op2, mode2)
715 && mode2 != VOIDmode)
716 rtx_op2 = force_reg (mode2, rtx_op2);
719 || ! (*insn_data[icode].operand[0].predicate) (target, mode))
720 target = gen_reg_rtx (mode);
722 /* Emit instruction */
723 pat = GEN_FCN (icode) (target, rtx_op1, rtx_op2);
730 /* This subroutine of expand_doubleword_shift handles the cases in which
731 the effective shift value is >= BITS_PER_WORD. The arguments and return
732 value are the same as for the parent routine, except that SUPERWORD_OP1
733 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
734 INTO_TARGET may be null if the caller has decided to calculate it. */
737 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
738 rtx outof_target, rtx into_target,
739 int unsignedp, enum optab_methods methods)
741 if (into_target != 0)
742 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
743 into_target, unsignedp, methods))
746 if (outof_target != 0)
748 /* For a signed right shift, we must fill OUTOF_TARGET with copies
749 of the sign bit, otherwise we must fill it with zeros. */
750 if (binoptab != ashr_optab)
751 emit_move_insn (outof_target, CONST0_RTX (word_mode));
753 if (!force_expand_binop (word_mode, binoptab,
754 outof_input, GEN_INT (BITS_PER_WORD - 1),
755 outof_target, unsignedp, methods))
761 /* This subroutine of expand_doubleword_shift handles the cases in which
762 the effective shift value is < BITS_PER_WORD. The arguments and return
763 value are the same as for the parent routine. */
766 expand_subword_shift (enum machine_mode op1_mode, optab binoptab,
767 rtx outof_input, rtx into_input, rtx op1,
768 rtx outof_target, rtx into_target,
769 int unsignedp, enum optab_methods methods,
770 unsigned HOST_WIDE_INT shift_mask)
772 optab reverse_unsigned_shift, unsigned_shift;
775 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
776 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
778 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
779 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
780 the opposite direction to BINOPTAB. */
781 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
783 carries = outof_input;
784 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
785 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
790 /* We must avoid shifting by BITS_PER_WORD bits since that is either
791 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
792 has unknown behavior. Do a single shift first, then shift by the
793 remainder. It's OK to use ~OP1 as the remainder if shift counts
794 are truncated to the mode size. */
795 carries = expand_binop (word_mode, reverse_unsigned_shift,
796 outof_input, const1_rtx, 0, unsignedp, methods);
797 if (shift_mask == BITS_PER_WORD - 1)
799 tmp = immed_double_const (-1, -1, op1_mode);
800 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
805 tmp = immed_double_const (BITS_PER_WORD - 1, 0, op1_mode);
806 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
810 if (tmp == 0 || carries == 0)
812 carries = expand_binop (word_mode, reverse_unsigned_shift,
813 carries, tmp, 0, unsignedp, methods);
817 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
818 so the result can go directly into INTO_TARGET if convenient. */
819 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
820 into_target, unsignedp, methods);
824 /* Now OR in the bits carried over from OUTOF_INPUT. */
825 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
826 into_target, unsignedp, methods))
829 /* Use a standard word_mode shift for the out-of half. */
830 if (outof_target != 0)
831 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
832 outof_target, unsignedp, methods))
839 #ifdef HAVE_conditional_move
840 /* Try implementing expand_doubleword_shift using conditional moves.
841 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
842 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
843 are the shift counts to use in the former and latter case. All other
844 arguments are the same as the parent routine. */
847 expand_doubleword_shift_condmove (enum machine_mode op1_mode, optab binoptab,
848 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
849 rtx outof_input, rtx into_input,
850 rtx subword_op1, rtx superword_op1,
851 rtx outof_target, rtx into_target,
852 int unsignedp, enum optab_methods methods,
853 unsigned HOST_WIDE_INT shift_mask)
855 rtx outof_superword, into_superword;
857 /* Put the superword version of the output into OUTOF_SUPERWORD and
859 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
860 if (outof_target != 0 && subword_op1 == superword_op1)
862 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
863 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
864 into_superword = outof_target;
865 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
866 outof_superword, 0, unsignedp, methods))
871 into_superword = gen_reg_rtx (word_mode);
872 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
873 outof_superword, into_superword,
878 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
879 if (!expand_subword_shift (op1_mode, binoptab,
880 outof_input, into_input, subword_op1,
881 outof_target, into_target,
882 unsignedp, methods, shift_mask))
885 /* Select between them. Do the INTO half first because INTO_SUPERWORD
886 might be the current value of OUTOF_TARGET. */
887 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
888 into_target, into_superword, word_mode, false))
891 if (outof_target != 0)
892 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
893 outof_target, outof_superword,
901 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
902 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
903 input operand; the shift moves bits in the direction OUTOF_INPUT->
904 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
905 of the target. OP1 is the shift count and OP1_MODE is its mode.
906 If OP1 is constant, it will have been truncated as appropriate
907 and is known to be nonzero.
909 If SHIFT_MASK is zero, the result of word shifts is undefined when the
910 shift count is outside the range [0, BITS_PER_WORD). This routine must
911 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
913 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
914 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
915 fill with zeros or sign bits as appropriate.
917 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
918 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
919 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
920 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
923 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
924 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
925 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
926 function wants to calculate it itself.
928 Return true if the shift could be successfully synthesized. */
931 expand_doubleword_shift (enum machine_mode op1_mode, optab binoptab,
932 rtx outof_input, rtx into_input, rtx op1,
933 rtx outof_target, rtx into_target,
934 int unsignedp, enum optab_methods methods,
935 unsigned HOST_WIDE_INT shift_mask)
937 rtx superword_op1, tmp, cmp1, cmp2;
938 rtx subword_label, done_label;
939 enum rtx_code cmp_code;
941 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
942 fill the result with sign or zero bits as appropriate. If so, the value
943 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
944 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
945 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
947 This isn't worthwhile for constant shifts since the optimizers will
948 cope better with in-range shift counts. */
949 if (shift_mask >= BITS_PER_WORD
951 && !CONSTANT_P (op1))
953 if (!expand_doubleword_shift (op1_mode, binoptab,
954 outof_input, into_input, op1,
956 unsignedp, methods, shift_mask))
958 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
959 outof_target, unsignedp, methods))
964 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
965 is true when the effective shift value is less than BITS_PER_WORD.
966 Set SUPERWORD_OP1 to the shift count that should be used to shift
967 OUTOF_INPUT into INTO_TARGET when the condition is false. */
968 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
969 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
971 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
972 is a subword shift count. */
973 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
975 cmp2 = CONST0_RTX (op1_mode);
981 /* Set CMP1 to OP1 - BITS_PER_WORD. */
982 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
984 cmp2 = CONST0_RTX (op1_mode);
986 superword_op1 = cmp1;
991 /* If we can compute the condition at compile time, pick the
992 appropriate subroutine. */
993 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
994 if (tmp != 0 && GET_CODE (tmp) == CONST_INT)
996 if (tmp == const0_rtx)
997 return expand_superword_shift (binoptab, outof_input, superword_op1,
998 outof_target, into_target,
1001 return expand_subword_shift (op1_mode, binoptab,
1002 outof_input, into_input, op1,
1003 outof_target, into_target,
1004 unsignedp, methods, shift_mask);
1007 #ifdef HAVE_conditional_move
1008 /* Try using conditional moves to generate straight-line code. */
1010 rtx start = get_last_insn ();
1011 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
1012 cmp_code, cmp1, cmp2,
1013 outof_input, into_input,
1015 outof_target, into_target,
1016 unsignedp, methods, shift_mask))
1018 delete_insns_since (start);
1022 /* As a last resort, use branches to select the correct alternative. */
1023 subword_label = gen_label_rtx ();
1024 done_label = gen_label_rtx ();
1027 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
1028 0, 0, subword_label);
1031 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
1032 outof_target, into_target,
1033 unsignedp, methods))
1036 emit_jump_insn (gen_jump (done_label));
1038 emit_label (subword_label);
1040 if (!expand_subword_shift (op1_mode, binoptab,
1041 outof_input, into_input, op1,
1042 outof_target, into_target,
1043 unsignedp, methods, shift_mask))
1046 emit_label (done_label);
1050 /* Subroutine of expand_binop. Perform a double word multiplication of
1051 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
1052 as the target's word_mode. This function return NULL_RTX if anything
1053 goes wrong, in which case it may have already emitted instructions
1054 which need to be deleted.
1056 If we want to multiply two two-word values and have normal and widening
1057 multiplies of single-word values, we can do this with three smaller
1058 multiplications. Note that we do not make a REG_NO_CONFLICT block here
1059 because we are not operating on one word at a time.
1061 The multiplication proceeds as follows:
1062 _______________________
1063 [__op0_high_|__op0_low__]
1064 _______________________
1065 * [__op1_high_|__op1_low__]
1066 _______________________________________________
1067 _______________________
1068 (1) [__op0_low__*__op1_low__]
1069 _______________________
1070 (2a) [__op0_low__*__op1_high_]
1071 _______________________
1072 (2b) [__op0_high_*__op1_low__]
1073 _______________________
1074 (3) [__op0_high_*__op1_high_]
1077 This gives a 4-word result. Since we are only interested in the
1078 lower 2 words, partial result (3) and the upper words of (2a) and
1079 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1080 calculated using non-widening multiplication.
1082 (1), however, needs to be calculated with an unsigned widening
1083 multiplication. If this operation is not directly supported we
1084 try using a signed widening multiplication and adjust the result.
1085 This adjustment works as follows:
1087 If both operands are positive then no adjustment is needed.
1089 If the operands have different signs, for example op0_low < 0 and
1090 op1_low >= 0, the instruction treats the most significant bit of
1091 op0_low as a sign bit instead of a bit with significance
1092 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1093 with 2**BITS_PER_WORD - op0_low, and two's complements the
1094 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1097 Similarly, if both operands are negative, we need to add
1098 (op0_low + op1_low) * 2**BITS_PER_WORD.
1100 We use a trick to adjust quickly. We logically shift op0_low right
1101 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1102 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1103 logical shift exists, we do an arithmetic right shift and subtract
1107 expand_doubleword_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
1108 bool umulp, enum optab_methods methods)
1110 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
1111 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
1112 rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
1113 rtx product, adjust, product_high, temp;
1115 rtx op0_high = operand_subword_force (op0, high, mode);
1116 rtx op0_low = operand_subword_force (op0, low, mode);
1117 rtx op1_high = operand_subword_force (op1, high, mode);
1118 rtx op1_low = operand_subword_force (op1, low, mode);
1120 /* If we're using an unsigned multiply to directly compute the product
1121 of the low-order words of the operands and perform any required
1122 adjustments of the operands, we begin by trying two more multiplications
1123 and then computing the appropriate sum.
1125 We have checked above that the required addition is provided.
1126 Full-word addition will normally always succeed, especially if
1127 it is provided at all, so we don't worry about its failure. The
1128 multiplication may well fail, however, so we do handle that. */
1132 /* ??? This could be done with emit_store_flag where available. */
1133 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
1134 NULL_RTX, 1, methods);
1136 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
1137 NULL_RTX, 0, OPTAB_DIRECT);
1140 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
1141 NULL_RTX, 0, methods);
1144 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
1145 NULL_RTX, 0, OPTAB_DIRECT);
1152 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
1153 NULL_RTX, 0, OPTAB_DIRECT);
1157 /* OP0_HIGH should now be dead. */
1161 /* ??? This could be done with emit_store_flag where available. */
1162 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
1163 NULL_RTX, 1, methods);
1165 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
1166 NULL_RTX, 0, OPTAB_DIRECT);
1169 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
1170 NULL_RTX, 0, methods);
1173 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
1174 NULL_RTX, 0, OPTAB_DIRECT);
1181 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
1182 NULL_RTX, 0, OPTAB_DIRECT);
1186 /* OP1_HIGH should now be dead. */
1188 adjust = expand_binop (word_mode, add_optab, adjust, temp,
1189 adjust, 0, OPTAB_DIRECT);
1191 if (target && !REG_P (target))
1195 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
1196 target, 1, OPTAB_DIRECT);
1198 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
1199 target, 1, OPTAB_DIRECT);
1204 product_high = operand_subword (product, high, 1, mode);
1205 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
1206 REG_P (product_high) ? product_high : adjust,
1208 emit_move_insn (product_high, adjust);
1212 /* Wrapper around expand_binop which takes an rtx code to specify
1213 the operation to perform, not an optab pointer. All other
1214 arguments are the same. */
1216 expand_simple_binop (enum machine_mode mode, enum rtx_code code, rtx op0,
1217 rtx op1, rtx target, int unsignedp,
1218 enum optab_methods methods)
1220 optab binop = code_to_optab[(int) code];
1223 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
1226 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1227 binop. Order them according to commutative_operand_precedence and, if
1228 possible, try to put TARGET or a pseudo first. */
1230 swap_commutative_operands_with_target (rtx target, rtx op0, rtx op1)
1232 int op0_prec = commutative_operand_precedence (op0);
1233 int op1_prec = commutative_operand_precedence (op1);
1235 if (op0_prec < op1_prec)
1238 if (op0_prec > op1_prec)
1241 /* With equal precedence, both orders are ok, but it is better if the
1242 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
1243 if (target == 0 || REG_P (target))
1244 return (REG_P (op1) && !REG_P (op0)) || target == op1;
1246 return rtx_equal_p (op1, target);
1249 /* Return true if BINOPTAB implements a shift operation. */
1252 shift_optab_p (optab binoptab)
1254 switch (binoptab->code)
1268 /* Return true if BINOPTAB implements a commutatative binary operation. */
1271 commutative_optab_p (optab binoptab)
1273 return (GET_RTX_CLASS (binoptab->code) == RTX_COMM_ARITH
1274 || binoptab == smul_widen_optab
1275 || binoptab == umul_widen_optab
1276 || binoptab == smul_highpart_optab
1277 || binoptab == umul_highpart_optab);
1280 /* X is to be used in mode MODE as an operand to BINOPTAB. If we're
1281 optimizing, and if the operand is a constant that costs more than
1282 1 instruction, force the constant into a register and return that
1283 register. Return X otherwise. UNSIGNEDP says whether X is unsigned. */
1286 avoid_expensive_constant (enum machine_mode mode, optab binoptab,
1287 rtx x, bool unsignedp)
1291 && rtx_cost (x, binoptab->code) > COSTS_N_INSNS (1))
1293 if (GET_MODE (x) != VOIDmode)
1294 x = convert_modes (mode, VOIDmode, x, unsignedp);
1295 x = force_reg (mode, x);
1300 /* Helper function for expand_binop: handle the case where there
1301 is an insn that directly implements the indicated operation.
1302 Returns null if this is not possible. */
1304 expand_binop_directly (enum machine_mode mode, optab binoptab,
1306 rtx target, int unsignedp, enum optab_methods methods,
1309 int icode = (int) optab_handler (binoptab, mode)->insn_code;
1310 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
1311 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
1312 enum machine_mode tmp_mode;
1315 rtx xop0 = op0, xop1 = op1;
1322 temp = gen_reg_rtx (mode);
1324 /* If it is a commutative operator and the modes would match
1325 if we would swap the operands, we can save the conversions. */
1326 commutative_p = commutative_optab_p (binoptab);
1328 && GET_MODE (xop0) != mode0 && GET_MODE (xop1) != mode1
1329 && GET_MODE (xop0) == mode1 && GET_MODE (xop1) == mode1)
1336 /* If we are optimizing, force expensive constants into a register. */
1337 xop0 = avoid_expensive_constant (mode0, binoptab, xop0, unsignedp);
1338 if (!shift_optab_p (binoptab))
1339 xop1 = avoid_expensive_constant (mode1, binoptab, xop1, unsignedp);
1341 /* In case the insn wants input operands in modes different from
1342 those of the actual operands, convert the operands. It would
1343 seem that we don't need to convert CONST_INTs, but we do, so
1344 that they're properly zero-extended, sign-extended or truncated
1347 if (GET_MODE (xop0) != mode0 && mode0 != VOIDmode)
1348 xop0 = convert_modes (mode0,
1349 GET_MODE (xop0) != VOIDmode
1354 if (GET_MODE (xop1) != mode1 && mode1 != VOIDmode)
1355 xop1 = convert_modes (mode1,
1356 GET_MODE (xop1) != VOIDmode
1361 /* If operation is commutative,
1362 try to make the first operand a register.
1363 Even better, try to make it the same as the target.
1364 Also try to make the last operand a constant. */
1366 && swap_commutative_operands_with_target (target, xop0, xop1))
1373 /* Now, if insn's predicates don't allow our operands, put them into
1376 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
1377 && mode0 != VOIDmode)
1378 xop0 = copy_to_mode_reg (mode0, xop0);
1380 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
1381 && mode1 != VOIDmode)
1382 xop1 = copy_to_mode_reg (mode1, xop1);
1384 if (binoptab == vec_pack_trunc_optab
1385 || binoptab == vec_pack_usat_optab
1386 || binoptab == vec_pack_ssat_optab
1387 || binoptab == vec_pack_ufix_trunc_optab
1388 || binoptab == vec_pack_sfix_trunc_optab)
1390 /* The mode of the result is different then the mode of the
1392 tmp_mode = insn_data[icode].operand[0].mode;
1393 if (GET_MODE_NUNITS (tmp_mode) != 2 * GET_MODE_NUNITS (mode))
1399 if (!insn_data[icode].operand[0].predicate (temp, tmp_mode))
1400 temp = gen_reg_rtx (tmp_mode);
1402 pat = GEN_FCN (icode) (temp, xop0, xop1);
1405 /* If PAT is composed of more than one insn, try to add an appropriate
1406 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1407 operand, call expand_binop again, this time without a target. */
1408 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1409 && ! add_equal_note (pat, temp, binoptab->code, xop0, xop1))
1411 delete_insns_since (last);
1412 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1413 unsignedp, methods);
1420 delete_insns_since (last);
1424 /* Generate code to perform an operation specified by BINOPTAB
1425 on operands OP0 and OP1, with result having machine-mode MODE.
1427 UNSIGNEDP is for the case where we have to widen the operands
1428 to perform the operation. It says to use zero-extension.
1430 If TARGET is nonzero, the value
1431 is generated there, if it is convenient to do so.
1432 In all cases an rtx is returned for the locus of the value;
1433 this may or may not be TARGET. */
1436 expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1,
1437 rtx target, int unsignedp, enum optab_methods methods)
1439 enum optab_methods next_methods
1440 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1441 ? OPTAB_WIDEN : methods);
1442 enum mode_class class;
1443 enum machine_mode wider_mode;
1445 rtx entry_last = get_last_insn ();
1448 class = GET_MODE_CLASS (mode);
1450 /* If subtracting an integer constant, convert this into an addition of
1451 the negated constant. */
1453 if (binoptab == sub_optab && GET_CODE (op1) == CONST_INT)
1455 op1 = negate_rtx (mode, op1);
1456 binoptab = add_optab;
1459 /* Record where to delete back to if we backtrack. */
1460 last = get_last_insn ();
1462 /* If we can do it with a three-operand insn, do so. */
1464 if (methods != OPTAB_MUST_WIDEN
1465 && optab_handler (binoptab, mode)->insn_code != CODE_FOR_nothing)
1467 temp = expand_binop_directly (mode, binoptab, op0, op1, target,
1468 unsignedp, methods, last);
1473 /* If we were trying to rotate, and that didn't work, try rotating
1474 the other direction before falling back to shifts and bitwise-or. */
1475 if (((binoptab == rotl_optab
1476 && optab_handler (rotr_optab, mode)->insn_code != CODE_FOR_nothing)
1477 || (binoptab == rotr_optab
1478 && optab_handler (rotl_optab, mode)->insn_code != CODE_FOR_nothing))
1479 && class == MODE_INT)
1481 optab otheroptab = (binoptab == rotl_optab ? rotr_optab : rotl_optab);
1483 unsigned int bits = GET_MODE_BITSIZE (mode);
1485 if (GET_CODE (op1) == CONST_INT)
1486 newop1 = GEN_INT (bits - INTVAL (op1));
1487 else if (targetm.shift_truncation_mask (mode) == bits - 1)
1488 newop1 = negate_rtx (mode, op1);
1490 newop1 = expand_binop (mode, sub_optab,
1491 GEN_INT (bits), op1,
1492 NULL_RTX, unsignedp, OPTAB_DIRECT);
1494 temp = expand_binop_directly (mode, otheroptab, op0, newop1,
1495 target, unsignedp, methods, last);
1500 /* If this is a multiply, see if we can do a widening operation that
1501 takes operands of this mode and makes a wider mode. */
1503 if (binoptab == smul_optab
1504 && GET_MODE_WIDER_MODE (mode) != VOIDmode
1505 && ((optab_handler ((unsignedp ? umul_widen_optab : smul_widen_optab),
1506 GET_MODE_WIDER_MODE (mode))->insn_code)
1507 != CODE_FOR_nothing))
1509 temp = expand_binop (GET_MODE_WIDER_MODE (mode),
1510 unsignedp ? umul_widen_optab : smul_widen_optab,
1511 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1515 if (GET_MODE_CLASS (mode) == MODE_INT
1516 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1517 GET_MODE_BITSIZE (GET_MODE (temp))))
1518 return gen_lowpart (mode, temp);
1520 return convert_to_mode (mode, temp, unsignedp);
1524 /* Look for a wider mode of the same class for which we think we
1525 can open-code the operation. Check for a widening multiply at the
1526 wider mode as well. */
1528 if (CLASS_HAS_WIDER_MODES_P (class)
1529 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1530 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1531 wider_mode != VOIDmode;
1532 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1534 if (optab_handler (binoptab, wider_mode)->insn_code != CODE_FOR_nothing
1535 || (binoptab == smul_optab
1536 && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
1537 && ((optab_handler ((unsignedp ? umul_widen_optab
1538 : smul_widen_optab),
1539 GET_MODE_WIDER_MODE (wider_mode))->insn_code)
1540 != CODE_FOR_nothing)))
1542 rtx xop0 = op0, xop1 = op1;
1545 /* For certain integer operations, we need not actually extend
1546 the narrow operands, as long as we will truncate
1547 the results to the same narrowness. */
1549 if ((binoptab == ior_optab || binoptab == and_optab
1550 || binoptab == xor_optab
1551 || binoptab == add_optab || binoptab == sub_optab
1552 || binoptab == smul_optab || binoptab == ashl_optab)
1553 && class == MODE_INT)
1556 xop0 = avoid_expensive_constant (mode, binoptab,
1558 if (binoptab != ashl_optab)
1559 xop1 = avoid_expensive_constant (mode, binoptab,
1563 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1565 /* The second operand of a shift must always be extended. */
1566 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1567 no_extend && binoptab != ashl_optab);
1569 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1570 unsignedp, OPTAB_DIRECT);
1573 if (class != MODE_INT
1574 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1575 GET_MODE_BITSIZE (wider_mode)))
1578 target = gen_reg_rtx (mode);
1579 convert_move (target, temp, 0);
1583 return gen_lowpart (mode, temp);
1586 delete_insns_since (last);
1590 /* If operation is commutative,
1591 try to make the first operand a register.
1592 Even better, try to make it the same as the target.
1593 Also try to make the last operand a constant. */
1594 if (commutative_optab_p (binoptab)
1595 && swap_commutative_operands_with_target (target, op0, op1))
1602 /* These can be done a word at a time. */
1603 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1604 && class == MODE_INT
1605 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
1606 && optab_handler (binoptab, word_mode)->insn_code != CODE_FOR_nothing)
1612 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1613 won't be accurate, so use a new target. */
1614 if (target == 0 || target == op0 || target == op1)
1615 target = gen_reg_rtx (mode);
1619 /* Do the actual arithmetic. */
1620 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
1622 rtx target_piece = operand_subword (target, i, 1, mode);
1623 rtx x = expand_binop (word_mode, binoptab,
1624 operand_subword_force (op0, i, mode),
1625 operand_subword_force (op1, i, mode),
1626 target_piece, unsignedp, next_methods);
1631 if (target_piece != x)
1632 emit_move_insn (target_piece, x);
1635 insns = get_insns ();
1638 if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
1640 if (binoptab->code != UNKNOWN)
1642 = gen_rtx_fmt_ee (binoptab->code, mode,
1643 copy_rtx (op0), copy_rtx (op1));
1647 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1652 /* Synthesize double word shifts from single word shifts. */
1653 if ((binoptab == lshr_optab || binoptab == ashl_optab
1654 || binoptab == ashr_optab)
1655 && class == MODE_INT
1656 && (GET_CODE (op1) == CONST_INT || !optimize_size)
1657 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1658 && optab_handler (binoptab, word_mode)->insn_code != CODE_FOR_nothing
1659 && optab_handler (ashl_optab, word_mode)->insn_code != CODE_FOR_nothing
1660 && optab_handler (lshr_optab, word_mode)->insn_code != CODE_FOR_nothing)
1662 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1663 enum machine_mode op1_mode;
1665 double_shift_mask = targetm.shift_truncation_mask (mode);
1666 shift_mask = targetm.shift_truncation_mask (word_mode);
1667 op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode;
1669 /* Apply the truncation to constant shifts. */
1670 if (double_shift_mask > 0 && GET_CODE (op1) == CONST_INT)
1671 op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
1673 if (op1 == CONST0_RTX (op1_mode))
1676 /* Make sure that this is a combination that expand_doubleword_shift
1677 can handle. See the comments there for details. */
1678 if (double_shift_mask == 0
1679 || (shift_mask == BITS_PER_WORD - 1
1680 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1682 rtx insns, equiv_value;
1683 rtx into_target, outof_target;
1684 rtx into_input, outof_input;
1685 int left_shift, outof_word;
1687 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1688 won't be accurate, so use a new target. */
1689 if (target == 0 || target == op0 || target == op1)
1690 target = gen_reg_rtx (mode);
1694 /* OUTOF_* is the word we are shifting bits away from, and
1695 INTO_* is the word that we are shifting bits towards, thus
1696 they differ depending on the direction of the shift and
1697 WORDS_BIG_ENDIAN. */
1699 left_shift = binoptab == ashl_optab;
1700 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1702 outof_target = operand_subword (target, outof_word, 1, mode);
1703 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1705 outof_input = operand_subword_force (op0, outof_word, mode);
1706 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1708 if (expand_doubleword_shift (op1_mode, binoptab,
1709 outof_input, into_input, op1,
1710 outof_target, into_target,
1711 unsignedp, next_methods, shift_mask))
1713 insns = get_insns ();
1716 equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, op0, op1);
1717 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1724 /* Synthesize double word rotates from single word shifts. */
1725 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1726 && class == MODE_INT
1727 && GET_CODE (op1) == CONST_INT
1728 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1729 && optab_handler (ashl_optab, word_mode)->insn_code != CODE_FOR_nothing
1730 && optab_handler (lshr_optab, word_mode)->insn_code != CODE_FOR_nothing)
1733 rtx into_target, outof_target;
1734 rtx into_input, outof_input;
1736 int shift_count, left_shift, outof_word;
1738 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1739 won't be accurate, so use a new target. Do this also if target is not
1740 a REG, first because having a register instead may open optimization
1741 opportunities, and second because if target and op0 happen to be MEMs
1742 designating the same location, we would risk clobbering it too early
1743 in the code sequence we generate below. */
1744 if (target == 0 || target == op0 || target == op1 || ! REG_P (target))
1745 target = gen_reg_rtx (mode);
1749 shift_count = INTVAL (op1);
1751 /* OUTOF_* is the word we are shifting bits away from, and
1752 INTO_* is the word that we are shifting bits towards, thus
1753 they differ depending on the direction of the shift and
1754 WORDS_BIG_ENDIAN. */
1756 left_shift = (binoptab == rotl_optab);
1757 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1759 outof_target = operand_subword (target, outof_word, 1, mode);
1760 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1762 outof_input = operand_subword_force (op0, outof_word, mode);
1763 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1765 if (shift_count == BITS_PER_WORD)
1767 /* This is just a word swap. */
1768 emit_move_insn (outof_target, into_input);
1769 emit_move_insn (into_target, outof_input);
1774 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1775 rtx first_shift_count, second_shift_count;
1776 optab reverse_unsigned_shift, unsigned_shift;
1778 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1779 ? lshr_optab : ashl_optab);
1781 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1782 ? ashl_optab : lshr_optab);
1784 if (shift_count > BITS_PER_WORD)
1786 first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1787 second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1791 first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1792 second_shift_count = GEN_INT (shift_count);
1795 into_temp1 = expand_binop (word_mode, unsigned_shift,
1796 outof_input, first_shift_count,
1797 NULL_RTX, unsignedp, next_methods);
1798 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1799 into_input, second_shift_count,
1800 NULL_RTX, unsignedp, next_methods);
1802 if (into_temp1 != 0 && into_temp2 != 0)
1803 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1804 into_target, unsignedp, next_methods);
1808 if (inter != 0 && inter != into_target)
1809 emit_move_insn (into_target, inter);
1811 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1812 into_input, first_shift_count,
1813 NULL_RTX, unsignedp, next_methods);
1814 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1815 outof_input, second_shift_count,
1816 NULL_RTX, unsignedp, next_methods);
1818 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1819 inter = expand_binop (word_mode, ior_optab,
1820 outof_temp1, outof_temp2,
1821 outof_target, unsignedp, next_methods);
1823 if (inter != 0 && inter != outof_target)
1824 emit_move_insn (outof_target, inter);
1827 insns = get_insns ();
1832 /* One may be tempted to wrap the insns in a REG_NO_CONFLICT
1833 block to help the register allocator a bit. But a multi-word
1834 rotate will need all the input bits when setting the output
1835 bits, so there clearly is a conflict between the input and
1836 output registers. So we can't use a no-conflict block here. */
1842 /* These can be done a word at a time by propagating carries. */
1843 if ((binoptab == add_optab || binoptab == sub_optab)
1844 && class == MODE_INT
1845 && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
1846 && optab_handler (binoptab, word_mode)->insn_code != CODE_FOR_nothing)
1849 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1850 const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
1851 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1852 rtx xop0, xop1, xtarget;
1854 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1855 value is one of those, use it. Otherwise, use 1 since it is the
1856 one easiest to get. */
1857 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1858 int normalizep = STORE_FLAG_VALUE;
1863 /* Prepare the operands. */
1864 xop0 = force_reg (mode, op0);
1865 xop1 = force_reg (mode, op1);
1867 xtarget = gen_reg_rtx (mode);
1869 if (target == 0 || !REG_P (target))
1872 /* Indicate for flow that the entire target reg is being set. */
1874 emit_insn (gen_rtx_CLOBBER (VOIDmode, xtarget));
1876 /* Do the actual arithmetic. */
1877 for (i = 0; i < nwords; i++)
1879 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1880 rtx target_piece = operand_subword (xtarget, index, 1, mode);
1881 rtx op0_piece = operand_subword_force (xop0, index, mode);
1882 rtx op1_piece = operand_subword_force (xop1, index, mode);
1885 /* Main add/subtract of the input operands. */
1886 x = expand_binop (word_mode, binoptab,
1887 op0_piece, op1_piece,
1888 target_piece, unsignedp, next_methods);
1894 /* Store carry from main add/subtract. */
1895 carry_out = gen_reg_rtx (word_mode);
1896 carry_out = emit_store_flag_force (carry_out,
1897 (binoptab == add_optab
1900 word_mode, 1, normalizep);
1907 /* Add/subtract previous carry to main result. */
1908 newx = expand_binop (word_mode,
1909 normalizep == 1 ? binoptab : otheroptab,
1911 NULL_RTX, 1, next_methods);
1915 /* Get out carry from adding/subtracting carry in. */
1916 rtx carry_tmp = gen_reg_rtx (word_mode);
1917 carry_tmp = emit_store_flag_force (carry_tmp,
1918 (binoptab == add_optab
1921 word_mode, 1, normalizep);
1923 /* Logical-ior the two poss. carry together. */
1924 carry_out = expand_binop (word_mode, ior_optab,
1925 carry_out, carry_tmp,
1926 carry_out, 0, next_methods);
1930 emit_move_insn (target_piece, newx);
1934 if (x != target_piece)
1935 emit_move_insn (target_piece, x);
1938 carry_in = carry_out;
1941 if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
1943 if (optab_handler (mov_optab, mode)->insn_code != CODE_FOR_nothing
1944 || ! rtx_equal_p (target, xtarget))
1946 rtx temp = emit_move_insn (target, xtarget);
1948 set_unique_reg_note (temp,
1950 gen_rtx_fmt_ee (binoptab->code, mode,
1961 delete_insns_since (last);
1964 /* Attempt to synthesize double word multiplies using a sequence of word
1965 mode multiplications. We first attempt to generate a sequence using a
1966 more efficient unsigned widening multiply, and if that fails we then
1967 try using a signed widening multiply. */
1969 if (binoptab == smul_optab
1970 && class == MODE_INT
1971 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1972 && optab_handler (smul_optab, word_mode)->insn_code != CODE_FOR_nothing
1973 && optab_handler (add_optab, word_mode)->insn_code != CODE_FOR_nothing)
1975 rtx product = NULL_RTX;
1977 if (optab_handler (umul_widen_optab, mode)->insn_code
1978 != CODE_FOR_nothing)
1980 product = expand_doubleword_mult (mode, op0, op1, target,
1983 delete_insns_since (last);
1986 if (product == NULL_RTX
1987 && optab_handler (smul_widen_optab, mode)->insn_code
1988 != CODE_FOR_nothing)
1990 product = expand_doubleword_mult (mode, op0, op1, target,
1993 delete_insns_since (last);
1996 if (product != NULL_RTX)
1998 if (optab_handler (mov_optab, mode)->insn_code != CODE_FOR_nothing)
2000 temp = emit_move_insn (target ? target : product, product);
2001 set_unique_reg_note (temp,
2003 gen_rtx_fmt_ee (MULT, mode,
2011 /* It can't be open-coded in this mode.
2012 Use a library call if one is available and caller says that's ok. */
2014 if (optab_handler (binoptab, mode)->libfunc
2015 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
2019 enum machine_mode op1_mode = mode;
2024 if (shift_optab_p (binoptab))
2026 op1_mode = targetm.libgcc_shift_count_mode ();
2027 /* Specify unsigned here,
2028 since negative shift counts are meaningless. */
2029 op1x = convert_to_mode (op1_mode, op1, 1);
2032 if (GET_MODE (op0) != VOIDmode
2033 && GET_MODE (op0) != mode)
2034 op0 = convert_to_mode (mode, op0, unsignedp);
2036 /* Pass 1 for NO_QUEUE so we don't lose any increments
2037 if the libcall is cse'd or moved. */
2038 value = emit_library_call_value (optab_handler (binoptab, mode)->libfunc,
2039 NULL_RTX, LCT_CONST, mode, 2,
2040 op0, mode, op1x, op1_mode);
2042 insns = get_insns ();
2045 target = gen_reg_rtx (mode);
2046 emit_libcall_block (insns, target, value,
2047 gen_rtx_fmt_ee (binoptab->code, mode, op0, op1));
2052 delete_insns_since (last);
2054 /* It can't be done in this mode. Can we do it in a wider mode? */
2056 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
2057 || methods == OPTAB_MUST_WIDEN))
2059 /* Caller says, don't even try. */
2060 delete_insns_since (entry_last);
2064 /* Compute the value of METHODS to pass to recursive calls.
2065 Don't allow widening to be tried recursively. */
2067 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
2069 /* Look for a wider mode of the same class for which it appears we can do
2072 if (CLASS_HAS_WIDER_MODES_P (class))
2074 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2075 wider_mode != VOIDmode;
2076 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2078 if ((optab_handler (binoptab, wider_mode)->insn_code
2079 != CODE_FOR_nothing)
2080 || (methods == OPTAB_LIB
2081 && optab_handler (binoptab, wider_mode)->libfunc))
2083 rtx xop0 = op0, xop1 = op1;
2086 /* For certain integer operations, we need not actually extend
2087 the narrow operands, as long as we will truncate
2088 the results to the same narrowness. */
2090 if ((binoptab == ior_optab || binoptab == and_optab
2091 || binoptab == xor_optab
2092 || binoptab == add_optab || binoptab == sub_optab
2093 || binoptab == smul_optab || binoptab == ashl_optab)
2094 && class == MODE_INT)
2097 xop0 = widen_operand (xop0, wider_mode, mode,
2098 unsignedp, no_extend);
2100 /* The second operand of a shift must always be extended. */
2101 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
2102 no_extend && binoptab != ashl_optab);
2104 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
2105 unsignedp, methods);
2108 if (class != MODE_INT
2109 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
2110 GET_MODE_BITSIZE (wider_mode)))
2113 target = gen_reg_rtx (mode);
2114 convert_move (target, temp, 0);
2118 return gen_lowpart (mode, temp);
2121 delete_insns_since (last);
2126 delete_insns_since (entry_last);
2130 /* Expand a binary operator which has both signed and unsigned forms.
2131 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2134 If we widen unsigned operands, we may use a signed wider operation instead
2135 of an unsigned wider operation, since the result would be the same. */
2138 sign_expand_binop (enum machine_mode mode, optab uoptab, optab soptab,
2139 rtx op0, rtx op1, rtx target, int unsignedp,
2140 enum optab_methods methods)
2143 optab direct_optab = unsignedp ? uoptab : soptab;
2144 struct optab wide_soptab;
2146 /* Do it without widening, if possible. */
2147 temp = expand_binop (mode, direct_optab, op0, op1, target,
2148 unsignedp, OPTAB_DIRECT);
2149 if (temp || methods == OPTAB_DIRECT)
2152 /* Try widening to a signed int. Make a fake signed optab that
2153 hides any signed insn for direct use. */
2154 wide_soptab = *soptab;
2155 optab_handler (&wide_soptab, mode)->insn_code = CODE_FOR_nothing;
2156 optab_handler (&wide_soptab, mode)->libfunc = 0;
2158 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2159 unsignedp, OPTAB_WIDEN);
2161 /* For unsigned operands, try widening to an unsigned int. */
2162 if (temp == 0 && unsignedp)
2163 temp = expand_binop (mode, uoptab, op0, op1, target,
2164 unsignedp, OPTAB_WIDEN);
2165 if (temp || methods == OPTAB_WIDEN)
2168 /* Use the right width lib call if that exists. */
2169 temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_LIB);
2170 if (temp || methods == OPTAB_LIB)
2173 /* Must widen and use a lib call, use either signed or unsigned. */
2174 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2175 unsignedp, methods);
2179 return expand_binop (mode, uoptab, op0, op1, target,
2180 unsignedp, methods);
2184 /* Generate code to perform an operation specified by UNOPPTAB
2185 on operand OP0, with two results to TARG0 and TARG1.
2186 We assume that the order of the operands for the instruction
2187 is TARG0, TARG1, OP0.
2189 Either TARG0 or TARG1 may be zero, but what that means is that
2190 the result is not actually wanted. We will generate it into
2191 a dummy pseudo-reg and discard it. They may not both be zero.
2193 Returns 1 if this operation can be performed; 0 if not. */
2196 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
2199 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2200 enum mode_class class;
2201 enum machine_mode wider_mode;
2202 rtx entry_last = get_last_insn ();
2205 class = GET_MODE_CLASS (mode);
2208 targ0 = gen_reg_rtx (mode);
2210 targ1 = gen_reg_rtx (mode);
2212 /* Record where to go back to if we fail. */
2213 last = get_last_insn ();
2215 if (optab_handler (unoptab, mode)->insn_code != CODE_FOR_nothing)
2217 int icode = (int) optab_handler (unoptab, mode)->insn_code;
2218 enum machine_mode mode0 = insn_data[icode].operand[2].mode;
2222 if (GET_MODE (xop0) != VOIDmode
2223 && GET_MODE (xop0) != mode0)
2224 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2226 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2227 if (!insn_data[icode].operand[2].predicate (xop0, mode0))
2228 xop0 = copy_to_mode_reg (mode0, xop0);
2230 /* We could handle this, but we should always be called with a pseudo
2231 for our targets and all insns should take them as outputs. */
2232 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2233 gcc_assert (insn_data[icode].operand[1].predicate (targ1, mode));
2235 pat = GEN_FCN (icode) (targ0, targ1, xop0);
2242 delete_insns_since (last);
2245 /* It can't be done in this mode. Can we do it in a wider mode? */
2247 if (CLASS_HAS_WIDER_MODES_P (class))
2249 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2250 wider_mode != VOIDmode;
2251 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2253 if (optab_handler (unoptab, wider_mode)->insn_code
2254 != CODE_FOR_nothing)
2256 rtx t0 = gen_reg_rtx (wider_mode);
2257 rtx t1 = gen_reg_rtx (wider_mode);
2258 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2260 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
2262 convert_move (targ0, t0, unsignedp);
2263 convert_move (targ1, t1, unsignedp);
2267 delete_insns_since (last);
2272 delete_insns_since (entry_last);
2276 /* Generate code to perform an operation specified by BINOPTAB
2277 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2278 We assume that the order of the operands for the instruction
2279 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2280 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2282 Either TARG0 or TARG1 may be zero, but what that means is that
2283 the result is not actually wanted. We will generate it into
2284 a dummy pseudo-reg and discard it. They may not both be zero.
2286 Returns 1 if this operation can be performed; 0 if not. */
2289 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
2292 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2293 enum mode_class class;
2294 enum machine_mode wider_mode;
2295 rtx entry_last = get_last_insn ();
2298 class = GET_MODE_CLASS (mode);
2301 targ0 = gen_reg_rtx (mode);
2303 targ1 = gen_reg_rtx (mode);
2305 /* Record where to go back to if we fail. */
2306 last = get_last_insn ();
2308 if (optab_handler (binoptab, mode)->insn_code != CODE_FOR_nothing)
2310 int icode = (int) optab_handler (binoptab, mode)->insn_code;
2311 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2312 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
2314 rtx xop0 = op0, xop1 = op1;
2316 /* If we are optimizing, force expensive constants into a register. */
2317 xop0 = avoid_expensive_constant (mode0, binoptab, xop0, unsignedp);
2318 xop1 = avoid_expensive_constant (mode1, binoptab, xop1, unsignedp);
2320 /* In case the insn wants input operands in modes different from
2321 those of the actual operands, convert the operands. It would
2322 seem that we don't need to convert CONST_INTs, but we do, so
2323 that they're properly zero-extended, sign-extended or truncated
2326 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
2327 xop0 = convert_modes (mode0,
2328 GET_MODE (op0) != VOIDmode
2333 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
2334 xop1 = convert_modes (mode1,
2335 GET_MODE (op1) != VOIDmode
2340 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2341 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2342 xop0 = copy_to_mode_reg (mode0, xop0);
2344 if (!insn_data[icode].operand[2].predicate (xop1, mode1))
2345 xop1 = copy_to_mode_reg (mode1, xop1);
2347 /* We could handle this, but we should always be called with a pseudo
2348 for our targets and all insns should take them as outputs. */
2349 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2350 gcc_assert (insn_data[icode].operand[3].predicate (targ1, mode));
2352 pat = GEN_FCN (icode) (targ0, xop0, xop1, targ1);
2359 delete_insns_since (last);
2362 /* It can't be done in this mode. Can we do it in a wider mode? */
2364 if (CLASS_HAS_WIDER_MODES_P (class))
2366 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2367 wider_mode != VOIDmode;
2368 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2370 if (optab_handler (binoptab, wider_mode)->insn_code
2371 != CODE_FOR_nothing)
2373 rtx t0 = gen_reg_rtx (wider_mode);
2374 rtx t1 = gen_reg_rtx (wider_mode);
2375 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2376 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2378 if (expand_twoval_binop (binoptab, cop0, cop1,
2381 convert_move (targ0, t0, unsignedp);
2382 convert_move (targ1, t1, unsignedp);
2386 delete_insns_since (last);
2391 delete_insns_since (entry_last);
2395 /* Expand the two-valued library call indicated by BINOPTAB, but
2396 preserve only one of the values. If TARG0 is non-NULL, the first
2397 value is placed into TARG0; otherwise the second value is placed
2398 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2399 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2400 This routine assumes that the value returned by the library call is
2401 as if the return value was of an integral mode twice as wide as the
2402 mode of OP0. Returns 1 if the call was successful. */
2405 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2406 rtx targ0, rtx targ1, enum rtx_code code)
2408 enum machine_mode mode;
2409 enum machine_mode libval_mode;
2413 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2414 gcc_assert (!targ0 != !targ1);
2416 mode = GET_MODE (op0);
2417 if (!optab_handler (binoptab, mode)->libfunc)
2420 /* The value returned by the library function will have twice as
2421 many bits as the nominal MODE. */
2422 libval_mode = smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode),
2425 libval = emit_library_call_value (optab_handler (binoptab, mode)->libfunc,
2426 NULL_RTX, LCT_CONST,
2430 /* Get the part of VAL containing the value that we want. */
2431 libval = simplify_gen_subreg (mode, libval, libval_mode,
2432 targ0 ? 0 : GET_MODE_SIZE (mode));
2433 insns = get_insns ();
2435 /* Move the into the desired location. */
2436 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2437 gen_rtx_fmt_ee (code, mode, op0, op1));
2443 /* Wrapper around expand_unop which takes an rtx code to specify
2444 the operation to perform, not an optab pointer. All other
2445 arguments are the same. */
2447 expand_simple_unop (enum machine_mode mode, enum rtx_code code, rtx op0,
2448 rtx target, int unsignedp)
2450 optab unop = code_to_optab[(int) code];
2453 return expand_unop (mode, unop, op0, target, unsignedp);
2459 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2461 widen_clz (enum machine_mode mode, rtx op0, rtx target)
2463 enum mode_class class = GET_MODE_CLASS (mode);
2464 if (CLASS_HAS_WIDER_MODES_P (class))
2466 enum machine_mode wider_mode;
2467 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2468 wider_mode != VOIDmode;
2469 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2471 if (optab_handler (clz_optab, wider_mode)->insn_code
2472 != CODE_FOR_nothing)
2474 rtx xop0, temp, last;
2476 last = get_last_insn ();
2479 target = gen_reg_rtx (mode);
2480 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2481 temp = expand_unop (wider_mode, clz_optab, xop0, NULL_RTX, true);
2483 temp = expand_binop (wider_mode, sub_optab, temp,
2484 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2485 - GET_MODE_BITSIZE (mode)),
2486 target, true, OPTAB_DIRECT);
2488 delete_insns_since (last);
2500 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2502 widen_bswap (enum machine_mode mode, rtx op0, rtx target)
2504 enum mode_class class = GET_MODE_CLASS (mode);
2505 enum machine_mode wider_mode;
2508 if (!CLASS_HAS_WIDER_MODES_P (class))
2511 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2512 wider_mode != VOIDmode;
2513 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2514 if (optab_handler (bswap_optab, wider_mode)->insn_code != CODE_FOR_nothing)
2519 last = get_last_insn ();
2521 x = widen_operand (op0, wider_mode, mode, true, true);
2522 x = expand_unop (wider_mode, bswap_optab, x, NULL_RTX, true);
2525 x = expand_shift (RSHIFT_EXPR, wider_mode, x,
2526 size_int (GET_MODE_BITSIZE (wider_mode)
2527 - GET_MODE_BITSIZE (mode)),
2533 target = gen_reg_rtx (mode);
2534 emit_move_insn (target, gen_lowpart (mode, x));
2537 delete_insns_since (last);
2542 /* Try calculating bswap as two bswaps of two word-sized operands. */
2545 expand_doubleword_bswap (enum machine_mode mode, rtx op, rtx target)
2549 t1 = expand_unop (word_mode, bswap_optab,
2550 operand_subword_force (op, 0, mode), NULL_RTX, true);
2551 t0 = expand_unop (word_mode, bswap_optab,
2552 operand_subword_force (op, 1, mode), NULL_RTX, true);
2555 target = gen_reg_rtx (mode);
2557 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
2558 emit_move_insn (operand_subword (target, 0, 1, mode), t0);
2559 emit_move_insn (operand_subword (target, 1, 1, mode), t1);
2564 /* Try calculating (parity x) as (and (popcount x) 1), where
2565 popcount can also be done in a wider mode. */
2567 expand_parity (enum machine_mode mode, rtx op0, rtx target)
2569 enum mode_class class = GET_MODE_CLASS (mode);
2570 if (CLASS_HAS_WIDER_MODES_P (class))
2572 enum machine_mode wider_mode;
2573 for (wider_mode = mode; wider_mode != VOIDmode;
2574 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2576 if (optab_handler (popcount_optab, wider_mode)->insn_code
2577 != CODE_FOR_nothing)
2579 rtx xop0, temp, last;
2581 last = get_last_insn ();
2584 target = gen_reg_rtx (mode);
2585 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2586 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2589 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2590 target, true, OPTAB_DIRECT);
2592 delete_insns_since (last);
2601 /* Try calculating ffs(x) using clz(x). Since the ffs builtin promises
2602 to return zero for a zero value and clz may have an undefined value
2603 in that case, only do this if we know clz returns the right thing so
2604 that we don't have to generate a test and branch. */
2606 expand_ffs (enum machine_mode mode, rtx op0, rtx target)
2609 if (clz_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
2610 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2
2611 && val == GET_MODE_BITSIZE (mode))
2613 rtx last = get_last_insn ();
2616 temp = expand_unop (mode, neg_optab, op0, NULL_RTX, true);
2618 temp = expand_binop (mode, and_optab, op0, temp, NULL_RTX,
2619 true, OPTAB_DIRECT);
2621 temp = expand_unop (mode, clz_optab, temp, NULL_RTX, true);
2623 temp = expand_binop (mode, sub_optab,
2624 GEN_INT (GET_MODE_BITSIZE (mode)),
2626 target, true, OPTAB_DIRECT);
2628 delete_insns_since (last);
2634 /* We can compute ctz(x) using clz(x) with a similar recipe. Here the ctz
2635 builtin has an undefined result on zero, just like clz, so we don't have
2636 to do that check. */
2638 expand_ctz (enum machine_mode mode, rtx op0, rtx target)
2640 if (clz_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2642 rtx last = get_last_insn ();
2645 temp = expand_unop (mode, neg_optab, op0, NULL_RTX, true);
2647 temp = expand_binop (mode, and_optab, op0, temp, NULL_RTX,
2648 true, OPTAB_DIRECT);
2650 temp = expand_unop (mode, clz_optab, temp, NULL_RTX, true);
2652 temp = expand_binop (mode, xor_optab, temp,
2653 GEN_INT (GET_MODE_BITSIZE (mode) - 1),
2655 true, OPTAB_DIRECT);
2657 delete_insns_since (last);
2663 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2664 conditions, VAL may already be a SUBREG against which we cannot generate
2665 a further SUBREG. In this case, we expect forcing the value into a
2666 register will work around the situation. */
2669 lowpart_subreg_maybe_copy (enum machine_mode omode, rtx val,
2670 enum machine_mode imode)
2673 ret = lowpart_subreg (omode, val, imode);
2676 val = force_reg (imode, val);
2677 ret = lowpart_subreg (omode, val, imode);
2678 gcc_assert (ret != NULL);
2683 /* Expand a floating point absolute value or negation operation via a
2684 logical operation on the sign bit. */
2687 expand_absneg_bit (enum rtx_code code, enum machine_mode mode,
2688 rtx op0, rtx target)
2690 const struct real_format *fmt;
2691 int bitpos, word, nwords, i;
2692 enum machine_mode imode;
2693 HOST_WIDE_INT hi, lo;
2696 /* The format has to have a simple sign bit. */
2697 fmt = REAL_MODE_FORMAT (mode);
2701 bitpos = fmt->signbit_rw;
2705 /* Don't create negative zeros if the format doesn't support them. */
2706 if (code == NEG && !fmt->has_signed_zero)
2709 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2711 imode = int_mode_for_mode (mode);
2712 if (imode == BLKmode)
2721 if (FLOAT_WORDS_BIG_ENDIAN)
2722 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2724 word = bitpos / BITS_PER_WORD;
2725 bitpos = bitpos % BITS_PER_WORD;
2726 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2729 if (bitpos < HOST_BITS_PER_WIDE_INT)
2732 lo = (HOST_WIDE_INT) 1 << bitpos;
2736 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2742 if (target == 0 || target == op0)
2743 target = gen_reg_rtx (mode);
2749 for (i = 0; i < nwords; ++i)
2751 rtx targ_piece = operand_subword (target, i, 1, mode);
2752 rtx op0_piece = operand_subword_force (op0, i, mode);
2756 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2758 immed_double_const (lo, hi, imode),
2759 targ_piece, 1, OPTAB_LIB_WIDEN);
2760 if (temp != targ_piece)
2761 emit_move_insn (targ_piece, temp);
2764 emit_move_insn (targ_piece, op0_piece);
2767 insns = get_insns ();
2770 temp = gen_rtx_fmt_e (code, mode, copy_rtx (op0));
2771 emit_no_conflict_block (insns, target, op0, NULL_RTX, temp);
2775 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2776 gen_lowpart (imode, op0),
2777 immed_double_const (lo, hi, imode),
2778 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2779 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2781 set_unique_reg_note (get_last_insn (), REG_EQUAL,
2782 gen_rtx_fmt_e (code, mode, copy_rtx (op0)));
2788 /* Generate code to perform an operation specified by UNOPTAB
2789 on operand OP0, with result having machine-mode MODE.
2791 UNSIGNEDP is for the case where we have to widen the operands
2792 to perform the operation. It says to use zero-extension.
2794 If TARGET is nonzero, the value
2795 is generated there, if it is convenient to do so.
2796 In all cases an rtx is returned for the locus of the value;
2797 this may or may not be TARGET. */
2800 expand_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
2803 enum mode_class class;
2804 enum machine_mode wider_mode;
2806 rtx last = get_last_insn ();
2809 class = GET_MODE_CLASS (mode);
2811 if (optab_handler (unoptab, mode)->insn_code != CODE_FOR_nothing)
2813 int icode = (int) optab_handler (unoptab, mode)->insn_code;
2814 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2820 temp = gen_reg_rtx (mode);
2822 if (GET_MODE (xop0) != VOIDmode
2823 && GET_MODE (xop0) != mode0)
2824 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2826 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2828 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2829 xop0 = copy_to_mode_reg (mode0, xop0);
2831 if (!insn_data[icode].operand[0].predicate (temp, mode))
2832 temp = gen_reg_rtx (mode);
2834 pat = GEN_FCN (icode) (temp, xop0);
2837 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
2838 && ! add_equal_note (pat, temp, unoptab->code, xop0, NULL_RTX))
2840 delete_insns_since (last);
2841 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
2849 delete_insns_since (last);
2852 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2854 /* Widening clz needs special treatment. */
2855 if (unoptab == clz_optab)
2857 temp = widen_clz (mode, op0, target);
2864 /* Widening (or narrowing) bswap needs special treatment. */
2865 if (unoptab == bswap_optab)
2867 temp = widen_bswap (mode, op0, target);
2871 if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
2872 && optab_handler (unoptab, word_mode)->insn_code != CODE_FOR_nothing)
2874 temp = expand_doubleword_bswap (mode, op0, target);
2882 if (CLASS_HAS_WIDER_MODES_P (class))
2883 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2884 wider_mode != VOIDmode;
2885 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2887 if (optab_handler (unoptab, wider_mode)->insn_code != CODE_FOR_nothing)
2891 /* For certain operations, we need not actually extend
2892 the narrow operand, as long as we will truncate the
2893 results to the same narrowness. */
2895 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2896 (unoptab == neg_optab
2897 || unoptab == one_cmpl_optab)
2898 && class == MODE_INT);
2900 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2905 if (class != MODE_INT
2906 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
2907 GET_MODE_BITSIZE (wider_mode)))
2910 target = gen_reg_rtx (mode);
2911 convert_move (target, temp, 0);
2915 return gen_lowpart (mode, temp);
2918 delete_insns_since (last);
2922 /* These can be done a word at a time. */
2923 if (unoptab == one_cmpl_optab
2924 && class == MODE_INT
2925 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
2926 && optab_handler (unoptab, word_mode)->insn_code != CODE_FOR_nothing)
2931 if (target == 0 || target == op0)
2932 target = gen_reg_rtx (mode);
2936 /* Do the actual arithmetic. */
2937 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
2939 rtx target_piece = operand_subword (target, i, 1, mode);
2940 rtx x = expand_unop (word_mode, unoptab,
2941 operand_subword_force (op0, i, mode),
2942 target_piece, unsignedp);
2944 if (target_piece != x)
2945 emit_move_insn (target_piece, x);
2948 insns = get_insns ();
2951 emit_no_conflict_block (insns, target, op0, NULL_RTX,
2952 gen_rtx_fmt_e (unoptab->code, mode,
2957 if (unoptab->code == NEG)
2959 /* Try negating floating point values by flipping the sign bit. */
2960 if (SCALAR_FLOAT_MODE_P (mode))
2962 temp = expand_absneg_bit (NEG, mode, op0, target);
2967 /* If there is no negation pattern, and we have no negative zero,
2968 try subtracting from zero. */
2969 if (!HONOR_SIGNED_ZEROS (mode))
2971 temp = expand_binop (mode, (unoptab == negv_optab
2972 ? subv_optab : sub_optab),
2973 CONST0_RTX (mode), op0, target,
2974 unsignedp, OPTAB_DIRECT);
2980 /* Try calculating parity (x) as popcount (x) % 2. */
2981 if (unoptab == parity_optab)
2983 temp = expand_parity (mode, op0, target);
2988 /* Try implementing ffs (x) in terms of clz (x). */
2989 if (unoptab == ffs_optab)
2991 temp = expand_ffs (mode, op0, target);
2996 /* Try implementing ctz (x) in terms of clz (x). */
2997 if (unoptab == ctz_optab)
2999 temp = expand_ctz (mode, op0, target);
3005 /* Now try a library call in this mode. */
3006 if (optab_handler (unoptab, mode)->libfunc)
3010 enum machine_mode outmode = mode;
3012 /* All of these functions return small values. Thus we choose to
3013 have them return something that isn't a double-word. */
3014 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
3015 || unoptab == popcount_optab || unoptab == parity_optab)
3017 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node)));
3021 /* Pass 1 for NO_QUEUE so we don't lose any increments
3022 if the libcall is cse'd or moved. */
3023 value = emit_library_call_value (optab_handler (unoptab, mode)->libfunc,
3024 NULL_RTX, LCT_CONST, outmode,
3026 insns = get_insns ();
3029 target = gen_reg_rtx (outmode);
3030 emit_libcall_block (insns, target, value,
3031 gen_rtx_fmt_e (unoptab->code, outmode, op0));
3036 /* It can't be done in this mode. Can we do it in a wider mode? */
3038 if (CLASS_HAS_WIDER_MODES_P (class))
3040 for (wider_mode = GET_MODE_WIDER_MODE (mode);
3041 wider_mode != VOIDmode;
3042 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
3044 if ((optab_handler (unoptab, wider_mode)->insn_code
3045 != CODE_FOR_nothing)
3046 || optab_handler (unoptab, wider_mode)->libfunc)
3050 /* For certain operations, we need not actually extend
3051 the narrow operand, as long as we will truncate the
3052 results to the same narrowness. */
3054 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
3055 (unoptab == neg_optab
3056 || unoptab == one_cmpl_optab)
3057 && class == MODE_INT);
3059 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
3062 /* If we are generating clz using wider mode, adjust the
3064 if (unoptab == clz_optab && temp != 0)
3065 temp = expand_binop (wider_mode, sub_optab, temp,
3066 GEN_INT (GET_MODE_BITSIZE (wider_mode)
3067 - GET_MODE_BITSIZE (mode)),
3068 target, true, OPTAB_DIRECT);
3072 if (class != MODE_INT)
3075 target = gen_reg_rtx (mode);
3076 convert_move (target, temp, 0);
3080 return gen_lowpart (mode, temp);
3083 delete_insns_since (last);
3088 /* One final attempt at implementing negation via subtraction,
3089 this time allowing widening of the operand. */
3090 if (unoptab->code == NEG && !HONOR_SIGNED_ZEROS (mode))
3093 temp = expand_binop (mode,
3094 unoptab == negv_optab ? subv_optab : sub_optab,
3095 CONST0_RTX (mode), op0,
3096 target, unsignedp, OPTAB_LIB_WIDEN);
3104 /* Emit code to compute the absolute value of OP0, with result to
3105 TARGET if convenient. (TARGET may be 0.) The return value says
3106 where the result actually is to be found.
3108 MODE is the mode of the operand; the mode of the result is
3109 different but can be deduced from MODE.
3114 expand_abs_nojump (enum machine_mode mode, rtx op0, rtx target,
3115 int result_unsignedp)
3120 result_unsignedp = 1;
3122 /* First try to do it with a special abs instruction. */
3123 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
3128 /* For floating point modes, try clearing the sign bit. */
3129 if (SCALAR_FLOAT_MODE_P (mode))
3131 temp = expand_absneg_bit (ABS, mode, op0, target);
3136 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3137 if (optab_handler (smax_optab, mode)->insn_code != CODE_FOR_nothing
3138 && !HONOR_SIGNED_ZEROS (mode))
3140 rtx last = get_last_insn ();
3142 temp = expand_unop (mode, neg_optab, op0, NULL_RTX, 0);
3144 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3150 delete_insns_since (last);
3153 /* If this machine has expensive jumps, we can do integer absolute
3154 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3155 where W is the width of MODE. */
3157 if (GET_MODE_CLASS (mode) == MODE_INT && BRANCH_COST >= 2)
3159 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
3160 size_int (GET_MODE_BITSIZE (mode) - 1),
3163 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
3166 temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab,
3167 temp, extended, target, 0, OPTAB_LIB_WIDEN);
3177 expand_abs (enum machine_mode mode, rtx op0, rtx target,
3178 int result_unsignedp, int safe)
3183 result_unsignedp = 1;
3185 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
3189 /* If that does not win, use conditional jump and negate. */
3191 /* It is safe to use the target if it is the same
3192 as the source if this is also a pseudo register */
3193 if (op0 == target && REG_P (op0)
3194 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
3197 op1 = gen_label_rtx ();
3198 if (target == 0 || ! safe
3199 || GET_MODE (target) != mode
3200 || (MEM_P (target) && MEM_VOLATILE_P (target))
3202 && REGNO (target) < FIRST_PSEUDO_REGISTER))
3203 target = gen_reg_rtx (mode);
3205 emit_move_insn (target, op0);
3208 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
3209 NULL_RTX, NULL_RTX, op1);
3211 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3214 emit_move_insn (target, op0);
3220 /* A subroutine of expand_copysign, perform the copysign operation using the
3221 abs and neg primitives advertised to exist on the target. The assumption
3222 is that we have a split register file, and leaving op0 in fp registers,
3223 and not playing with subregs so much, will help the register allocator. */
3226 expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3227 int bitpos, bool op0_is_abs)
3229 enum machine_mode imode;
3236 /* Check if the back end provides an insn that handles signbit for the
3238 icode = (int) signbit_optab->handlers [(int) mode].insn_code;
3239 if (icode != CODE_FOR_nothing)
3241 imode = insn_data[icode].operand[0].mode;
3242 sign = gen_reg_rtx (imode);
3243 emit_unop_insn (icode, sign, op1, UNKNOWN);
3247 HOST_WIDE_INT hi, lo;
3249 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3251 imode = int_mode_for_mode (mode);
3252 if (imode == BLKmode)
3254 op1 = gen_lowpart (imode, op1);
3261 if (FLOAT_WORDS_BIG_ENDIAN)
3262 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3264 word = bitpos / BITS_PER_WORD;
3265 bitpos = bitpos % BITS_PER_WORD;
3266 op1 = operand_subword_force (op1, word, mode);
3269 if (bitpos < HOST_BITS_PER_WIDE_INT)
3272 lo = (HOST_WIDE_INT) 1 << bitpos;
3276 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
3280 sign = gen_reg_rtx (imode);
3281 sign = expand_binop (imode, and_optab, op1,
3282 immed_double_const (lo, hi, imode),
3283 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3288 op0 = expand_unop (mode, abs_optab, op0, target, 0);
3295 if (target == NULL_RTX)
3296 target = copy_to_reg (op0);
3298 emit_move_insn (target, op0);
3301 label = gen_label_rtx ();
3302 emit_cmp_and_jump_insns (sign, const0_rtx, EQ, NULL_RTX, imode, 1, label);
3304 if (GET_CODE (op0) == CONST_DOUBLE)
3305 op0 = simplify_unary_operation (NEG, mode, op0, mode);
3307 op0 = expand_unop (mode, neg_optab, op0, target, 0);
3309 emit_move_insn (target, op0);
3317 /* A subroutine of expand_copysign, perform the entire copysign operation
3318 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3319 is true if op0 is known to have its sign bit clear. */
3322 expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3323 int bitpos, bool op0_is_abs)
3325 enum machine_mode imode;
3326 HOST_WIDE_INT hi, lo;
3327 int word, nwords, i;
3330 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3332 imode = int_mode_for_mode (mode);
3333 if (imode == BLKmode)
3342 if (FLOAT_WORDS_BIG_ENDIAN)
3343 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3345 word = bitpos / BITS_PER_WORD;
3346 bitpos = bitpos % BITS_PER_WORD;
3347 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
3350 if (bitpos < HOST_BITS_PER_WIDE_INT)
3353 lo = (HOST_WIDE_INT) 1 << bitpos;
3357 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
3361 if (target == 0 || target == op0 || target == op1)
3362 target = gen_reg_rtx (mode);
3368 for (i = 0; i < nwords; ++i)
3370 rtx targ_piece = operand_subword (target, i, 1, mode);
3371 rtx op0_piece = operand_subword_force (op0, i, mode);
3376 op0_piece = expand_binop (imode, and_optab, op0_piece,
3377 immed_double_const (~lo, ~hi, imode),
3378 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3380 op1 = expand_binop (imode, and_optab,
3381 operand_subword_force (op1, i, mode),
3382 immed_double_const (lo, hi, imode),
3383 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3385 temp = expand_binop (imode, ior_optab, op0_piece, op1,
3386 targ_piece, 1, OPTAB_LIB_WIDEN);
3387 if (temp != targ_piece)
3388 emit_move_insn (targ_piece, temp);
3391 emit_move_insn (targ_piece, op0_piece);
3394 insns = get_insns ();
3397 emit_no_conflict_block (insns, target, op0, op1, NULL_RTX);
3401 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
3402 immed_double_const (lo, hi, imode),
3403 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3405 op0 = gen_lowpart (imode, op0);
3407 op0 = expand_binop (imode, and_optab, op0,
3408 immed_double_const (~lo, ~hi, imode),
3409 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3411 temp = expand_binop (imode, ior_optab, op0, op1,
3412 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3413 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3419 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3420 scalar floating point mode. Return NULL if we do not know how to
3421 expand the operation inline. */
3424 expand_copysign (rtx op0, rtx op1, rtx target)
3426 enum machine_mode mode = GET_MODE (op0);
3427 const struct real_format *fmt;
3431 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3432 gcc_assert (GET_MODE (op1) == mode);
3434 /* First try to do it with a special instruction. */
3435 temp = expand_binop (mode, copysign_optab, op0, op1,
3436 target, 0, OPTAB_DIRECT);
3440 fmt = REAL_MODE_FORMAT (mode);
3441 if (fmt == NULL || !fmt->has_signed_zero)
3445 if (GET_CODE (op0) == CONST_DOUBLE)
3447 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
3448 op0 = simplify_unary_operation (ABS, mode, op0, mode);
3452 if (fmt->signbit_ro >= 0
3453 && (GET_CODE (op0) == CONST_DOUBLE
3454 || (optab_handler (neg_optab, mode)->insn_code != CODE_FOR_nothing
3455 && optab_handler (abs_optab, mode)->insn_code != CODE_FOR_nothing)))
3457 temp = expand_copysign_absneg (mode, op0, op1, target,
3458 fmt->signbit_ro, op0_is_abs);
3463 if (fmt->signbit_rw < 0)
3465 return expand_copysign_bit (mode, op0, op1, target,
3466 fmt->signbit_rw, op0_is_abs);
3469 /* Generate an instruction whose insn-code is INSN_CODE,
3470 with two operands: an output TARGET and an input OP0.
3471 TARGET *must* be nonzero, and the output is always stored there.
3472 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3473 the value that is stored into TARGET. */
3476 emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code)
3479 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
3484 /* Now, if insn does not accept our operands, put them into pseudos. */
3486 if (!insn_data[icode].operand[1].predicate (op0, mode0))
3487 op0 = copy_to_mode_reg (mode0, op0);
3489 if (!insn_data[icode].operand[0].predicate (temp, GET_MODE (temp)))
3490 temp = gen_reg_rtx (GET_MODE (temp));
3492 pat = GEN_FCN (icode) (temp, op0);
3494 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && code != UNKNOWN)
3495 add_equal_note (pat, temp, code, op0, NULL_RTX);
3500 emit_move_insn (target, temp);
3503 struct no_conflict_data
3505 rtx target, first, insn;
3509 /* Called via note_stores by emit_no_conflict_block and emit_libcall_block.
3510 Set P->must_stay if the currently examined clobber / store has to stay
3511 in the list of insns that constitute the actual no_conflict block /
3514 no_conflict_move_test (rtx dest, const_rtx set, void *p0)
3516 struct no_conflict_data *p= p0;
3518 /* If this inns directly contributes to setting the target, it must stay. */
3519 if (reg_overlap_mentioned_p (p->target, dest))
3520 p->must_stay = true;
3521 /* If we haven't committed to keeping any other insns in the list yet,
3522 there is nothing more to check. */
3523 else if (p->insn == p->first)
3525 /* If this insn sets / clobbers a register that feeds one of the insns
3526 already in the list, this insn has to stay too. */
3527 else if (reg_overlap_mentioned_p (dest, PATTERN (p->first))
3528 || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest)))
3529 || reg_used_between_p (dest, p->first, p->insn)
3530 /* Likewise if this insn depends on a register set by a previous
3531 insn in the list, or if it sets a result (presumably a hard
3532 register) that is set or clobbered by a previous insn.
3533 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3534 SET_DEST perform the former check on the address, and the latter
3535 check on the MEM. */
3536 || (GET_CODE (set) == SET
3537 && (modified_in_p (SET_SRC (set), p->first)
3538 || modified_in_p (SET_DEST (set), p->first)
3539 || modified_between_p (SET_SRC (set), p->first, p->insn)
3540 || modified_between_p (SET_DEST (set), p->first, p->insn))))
3541 p->must_stay = true;
3544 /* Encapsulate the block starting at FIRST and ending with LAST, which is
3545 logically equivalent to EQUIV, so it gets manipulated as a unit if it
3546 is possible to do so. */
3549 maybe_encapsulate_block (rtx first, rtx last, rtx equiv)
3551 if (!flag_non_call_exceptions || !may_trap_p (equiv))
3553 /* We can't attach the REG_LIBCALL and REG_RETVAL notes when the
3554 encapsulated region would not be in one basic block, i.e. when
3555 there is a control_flow_insn_p insn between FIRST and LAST. */
3556 bool attach_libcall_retval_notes = true;
3557 rtx insn, next = NEXT_INSN (last);
3559 for (insn = first; insn != next; insn = NEXT_INSN (insn))
3560 if (control_flow_insn_p (insn))
3562 attach_libcall_retval_notes = false;
3566 if (attach_libcall_retval_notes)
3568 REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
3570 REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first,
3572 next = NEXT_INSN (last);
3573 for (insn = first; insn != next; insn = NEXT_INSN (insn))
3574 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_LIBCALL_ID,
3575 GEN_INT (libcall_id),
3582 /* Emit code to perform a series of operations on a multi-word quantity, one
3585 Such a block is preceded by a CLOBBER of the output, consists of multiple
3586 insns, each setting one word of the output, and followed by a SET copying
3587 the output to itself.
3589 Each of the insns setting words of the output receives a REG_NO_CONFLICT
3590 note indicating that it doesn't conflict with the (also multi-word)
3591 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
3594 INSNS is a block of code generated to perform the operation, not including
3595 the CLOBBER and final copy. All insns that compute intermediate values
3596 are first emitted, followed by the block as described above.
3598 TARGET, OP0, and OP1 are the output and inputs of the operations,
3599 respectively. OP1 may be zero for a unary operation.
3601 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
3604 If TARGET is not a register, INSNS is simply emitted with no special
3605 processing. Likewise if anything in INSNS is not an INSN or if
3606 there is a libcall block inside INSNS.
3608 The final insn emitted is returned. */
3611 emit_no_conflict_block (rtx insns, rtx target, rtx op0, rtx op1, rtx equiv)
3613 rtx prev, next, first, last, insn;
3615 if (!REG_P (target) || reload_in_progress)
3616 return emit_insn (insns);
3618 for (insn = insns; insn; insn = NEXT_INSN (insn))
3619 if (!NONJUMP_INSN_P (insn)
3620 || find_reg_note (insn, REG_LIBCALL, NULL_RTX))
3621 return emit_insn (insns);
3623 /* First emit all insns that do not store into words of the output and remove
3624 these from the list. */
3625 for (insn = insns; insn; insn = next)
3628 struct no_conflict_data data;
3630 next = NEXT_INSN (insn);
3632 /* Some ports (cris) create a libcall regions at their own. We must
3633 avoid any potential nesting of LIBCALLs. */
3634 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3635 remove_note (insn, note);
3636 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3637 remove_note (insn, note);
3638 if ((note = find_reg_note (insn, REG_LIBCALL_ID, NULL)) != NULL)
3639 remove_note (insn, note);
3641 data.target = target;
3645 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3646 if (! data.must_stay)
3648 if (PREV_INSN (insn))
3649 NEXT_INSN (PREV_INSN (insn)) = next;
3654 PREV_INSN (next) = PREV_INSN (insn);
3660 prev = get_last_insn ();
3662 /* Now write the CLOBBER of the output, followed by the setting of each
3663 of the words, followed by the final copy. */
3664 if (target != op0 && target != op1)
3665 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
3667 for (insn = insns; insn; insn = next)
3669 next = NEXT_INSN (insn);
3672 if (op1 && REG_P (op1))
3673 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op1,
3676 if (op0 && REG_P (op0))
3677 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op0,
3681 if (optab_handler (mov_optab, GET_MODE (target))->insn_code
3682 != CODE_FOR_nothing)
3684 last = emit_move_insn (target, target);
3686 set_unique_reg_note (last, REG_EQUAL, equiv);
3690 last = get_last_insn ();
3692 /* Remove any existing REG_EQUAL note from "last", or else it will
3693 be mistaken for a note referring to the full contents of the
3694 alleged libcall value when found together with the REG_RETVAL
3695 note added below. An existing note can come from an insn
3696 expansion at "last". */
3697 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3701 first = get_insns ();
3703 first = NEXT_INSN (prev);
3705 maybe_encapsulate_block (first, last, equiv);
3710 /* Emit code to make a call to a constant function or a library call.
3712 INSNS is a list containing all insns emitted in the call.
3713 These insns leave the result in RESULT. Our block is to copy RESULT
3714 to TARGET, which is logically equivalent to EQUIV.
3716 We first emit any insns that set a pseudo on the assumption that these are
3717 loading constants into registers; doing so allows them to be safely cse'ed
3718 between blocks. Then we emit all the other insns in the block, followed by
3719 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3720 note with an operand of EQUIV.
3722 Moving assignments to pseudos outside of the block is done to improve
3723 the generated code, but is not required to generate correct code,
3724 hence being unable to move an assignment is not grounds for not making
3725 a libcall block. There are two reasons why it is safe to leave these
3726 insns inside the block: First, we know that these pseudos cannot be
3727 used in generated RTL outside the block since they are created for
3728 temporary purposes within the block. Second, CSE will not record the
3729 values of anything set inside a libcall block, so we know they must
3730 be dead at the end of the block.
3732 Except for the first group of insns (the ones setting pseudos), the
3733 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3735 emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv)
3737 rtx final_dest = target;
3738 rtx prev, next, first, last, insn;
3740 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3741 into a MEM later. Protect the libcall block from this change. */
3742 if (! REG_P (target) || REG_USERVAR_P (target))
3743 target = gen_reg_rtx (GET_MODE (target));
3745 /* If we're using non-call exceptions, a libcall corresponding to an
3746 operation that may trap may also trap. */
3747 if (flag_non_call_exceptions && may_trap_p (equiv))
3749 for (insn = insns; insn; insn = NEXT_INSN (insn))
3752 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3754 if (note != 0 && INTVAL (XEXP (note, 0)) <= 0)
3755 remove_note (insn, note);
3759 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3760 reg note to indicate that this call cannot throw or execute a nonlocal
3761 goto (unless there is already a REG_EH_REGION note, in which case
3763 for (insn = insns; insn; insn = NEXT_INSN (insn))
3766 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3769 XEXP (note, 0) = constm1_rtx;
3771 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx,
3775 /* First emit all insns that set pseudos. Remove them from the list as
3776 we go. Avoid insns that set pseudos which were referenced in previous
3777 insns. These can be generated by move_by_pieces, for example,
3778 to update an address. Similarly, avoid insns that reference things
3779 set in previous insns. */
3781 for (insn = insns; insn; insn = next)
3783 rtx set = single_set (insn);
3786 /* Some ports (cris) create a libcall regions at their own. We must
3787 avoid any potential nesting of LIBCALLs. */
3788 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3789 remove_note (insn, note);
3790 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3791 remove_note (insn, note);
3792 if ((note = find_reg_note (insn, REG_LIBCALL_ID, NULL)) != NULL)
3793 remove_note (insn, note);
3795 next = NEXT_INSN (insn);
3797 if (set != 0 && REG_P (SET_DEST (set))
3798 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3800 struct no_conflict_data data;
3802 data.target = const0_rtx;
3806 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3807 if (! data.must_stay)
3809 if (PREV_INSN (insn))
3810 NEXT_INSN (PREV_INSN (insn)) = next;
3815 PREV_INSN (next) = PREV_INSN (insn);
3821 /* Some ports use a loop to copy large arguments onto the stack.
3822 Don't move anything outside such a loop. */
3827 prev = get_last_insn ();
3829 /* Write the remaining insns followed by the final copy. */
3831 for (insn = insns; insn; insn = next)
3833 next = NEXT_INSN (insn);
3838 last = emit_move_insn (target, result);
3839 if (optab_handler (mov_optab, GET_MODE (target))->insn_code
3840 != CODE_FOR_nothing)
3841 set_unique_reg_note (last, REG_EQUAL, copy_rtx (equiv));
3844 /* Remove any existing REG_EQUAL note from "last", or else it will
3845 be mistaken for a note referring to the full contents of the
3846 libcall value when found together with the REG_RETVAL note added
3847 below. An existing note can come from an insn expansion at
3849 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3852 if (final_dest != target)
3853 emit_move_insn (final_dest, target);
3856 first = get_insns ();
3858 first = NEXT_INSN (prev);
3860 maybe_encapsulate_block (first, last, equiv);
3863 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3864 PURPOSE describes how this comparison will be used. CODE is the rtx
3865 comparison code we will be using.
3867 ??? Actually, CODE is slightly weaker than that. A target is still
3868 required to implement all of the normal bcc operations, but not
3869 required to implement all (or any) of the unordered bcc operations. */
3872 can_compare_p (enum rtx_code code, enum machine_mode mode,
3873 enum can_compare_purpose purpose)
3877 if (optab_handler (cmp_optab, mode)->insn_code != CODE_FOR_nothing)
3879 if (purpose == ccp_jump)
3880 return bcc_gen_fctn[(int) code] != NULL;
3881 else if (purpose == ccp_store_flag)
3882 return setcc_gen_code[(int) code] != CODE_FOR_nothing;
3884 /* There's only one cmov entry point, and it's allowed to fail. */
3887 if (purpose == ccp_jump
3888 && optab_handler (cbranch_optab, mode)->insn_code != CODE_FOR_nothing)
3890 if (purpose == ccp_cmov
3891 && optab_handler (cmov_optab, mode)->insn_code != CODE_FOR_nothing)
3893 if (purpose == ccp_store_flag
3894 && optab_handler (cstore_optab, mode)->insn_code != CODE_FOR_nothing)
3896 mode = GET_MODE_WIDER_MODE (mode);
3898 while (mode != VOIDmode);
3903 /* This function is called when we are going to emit a compare instruction that
3904 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3906 *PMODE is the mode of the inputs (in case they are const_int).
3907 *PUNSIGNEDP nonzero says that the operands are unsigned;
3908 this matters if they need to be widened.
3910 If they have mode BLKmode, then SIZE specifies the size of both operands.
3912 This function performs all the setup necessary so that the caller only has
3913 to emit a single comparison insn. This setup can involve doing a BLKmode
3914 comparison or emitting a library call to perform the comparison if no insn
3915 is available to handle it.
3916 The values which are passed in through pointers can be modified; the caller
3917 should perform the comparison on the modified values. Constant
3918 comparisons must have already been folded. */
3921 prepare_cmp_insn (rtx *px, rtx *py, enum rtx_code *pcomparison, rtx size,
3922 enum machine_mode *pmode, int *punsignedp,
3923 enum can_compare_purpose purpose)
3925 enum machine_mode mode = *pmode;
3926 rtx x = *px, y = *py;
3927 int unsignedp = *punsignedp;
3929 /* If we are inside an appropriately-short loop and we are optimizing,
3930 force expensive constants into a register. */
3931 if (CONSTANT_P (x) && optimize
3932 && rtx_cost (x, COMPARE) > COSTS_N_INSNS (1))
3933 x = force_reg (mode, x);
3935 if (CONSTANT_P (y) && optimize
3936 && rtx_cost (y, COMPARE) > COSTS_N_INSNS (1))
3937 y = force_reg (mode, y);
3940 /* Make sure if we have a canonical comparison. The RTL
3941 documentation states that canonical comparisons are required only
3942 for targets which have cc0. */
3943 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
3946 /* Don't let both operands fail to indicate the mode. */
3947 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
3948 x = force_reg (mode, x);
3950 /* Handle all BLKmode compares. */
3952 if (mode == BLKmode)
3954 enum machine_mode cmp_mode, result_mode;
3955 enum insn_code cmp_code;
3960 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
3964 /* Try to use a memory block compare insn - either cmpstr
3965 or cmpmem will do. */
3966 for (cmp_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
3967 cmp_mode != VOIDmode;
3968 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode))
3970 cmp_code = cmpmem_optab[cmp_mode];
3971 if (cmp_code == CODE_FOR_nothing)
3972 cmp_code = cmpstr_optab[cmp_mode];
3973 if (cmp_code == CODE_FOR_nothing)
3974 cmp_code = cmpstrn_optab[cmp_mode];
3975 if (cmp_code == CODE_FOR_nothing)
3978 /* Must make sure the size fits the insn's mode. */
3979 if ((GET_CODE (size) == CONST_INT
3980 && INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode)))
3981 || (GET_MODE_BITSIZE (GET_MODE (size))
3982 > GET_MODE_BITSIZE (cmp_mode)))
3985 result_mode = insn_data[cmp_code].operand[0].mode;
3986 result = gen_reg_rtx (result_mode);
3987 size = convert_to_mode (cmp_mode, size, 1);
3988 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
3992 *pmode = result_mode;
3996 /* Otherwise call a library function, memcmp. */
3997 libfunc = memcmp_libfunc;
3998 length_type = sizetype;
3999 result_mode = TYPE_MODE (integer_type_node);
4000 cmp_mode = TYPE_MODE (length_type);
4001 size = convert_to_mode (TYPE_MODE (length_type), size,
4002 TYPE_UNSIGNED (length_type));
4004 result = emit_library_call_value (libfunc, 0, LCT_PURE_MAKE_BLOCK,
4011 *pmode = result_mode;
4015 /* Don't allow operands to the compare to trap, as that can put the
4016 compare and branch in different basic blocks. */
4017 if (flag_non_call_exceptions)
4020 x = force_reg (mode, x);
4022 y = force_reg (mode, y);
4027 if (can_compare_p (*pcomparison, mode, purpose))
4030 /* Handle a lib call just for the mode we are using. */
4032 if (optab_handler (cmp_optab, mode)->libfunc && !SCALAR_FLOAT_MODE_P (mode))
4034 rtx libfunc = optab_handler (cmp_optab, mode)->libfunc;
4037 /* If we want unsigned, and this mode has a distinct unsigned
4038 comparison routine, use that. */
4039 if (unsignedp && optab_handler (ucmp_optab, mode)->libfunc)
4040 libfunc = optab_handler (ucmp_optab, mode)->libfunc;
4042 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST_MAKE_BLOCK,
4043 targetm.libgcc_cmp_return_mode (),
4044 2, x, mode, y, mode);
4046 /* There are two kinds of comparison routines. Biased routines
4047 return 0/1/2, and unbiased routines return -1/0/1. Other parts
4048 of gcc expect that the comparison operation is equivalent
4049 to the modified comparison. For signed comparisons compare the
4050 result against 1 in the biased case, and zero in the unbiased
4051 case. For unsigned comparisons always compare against 1 after
4052 biasing the unbiased result by adding 1. This gives us a way to
4058 if (!TARGET_LIB_INT_CMP_BIASED)
4061 *px = plus_constant (result, 1);
4068 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
4069 prepare_float_lib_cmp (px, py, pcomparison, pmode, punsignedp);
4072 /* Before emitting an insn with code ICODE, make sure that X, which is going
4073 to be used for operand OPNUM of the insn, is converted from mode MODE to
4074 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
4075 that it is accepted by the operand predicate. Return the new value. */
4078 prepare_operand (int icode, rtx x, int opnum, enum machine_mode mode,
4079 enum machine_mode wider_mode, int unsignedp)
4081 if (mode != wider_mode)
4082 x = convert_modes (wider_mode, mode, x, unsignedp);
4084 if (!insn_data[icode].operand[opnum].predicate
4085 (x, insn_data[icode].operand[opnum].mode))
4087 if (reload_completed)
4089 x = copy_to_mode_reg (insn_data[icode].operand[opnum].mode, x);
4095 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
4096 we can do the comparison.
4097 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
4098 be NULL_RTX which indicates that only a comparison is to be generated. */
4101 emit_cmp_and_jump_insn_1 (rtx x, rtx y, enum machine_mode mode,
4102 enum rtx_code comparison, int unsignedp, rtx label)
4104 rtx test = gen_rtx_fmt_ee (comparison, mode, x, y);
4105 enum mode_class class = GET_MODE_CLASS (mode);
4106 enum machine_mode wider_mode = mode;
4108 /* Try combined insns first. */
4111 enum insn_code icode;
4112 PUT_MODE (test, wider_mode);
4116 icode = optab_handler (cbranch_optab, wider_mode)->insn_code;
4118 if (icode != CODE_FOR_nothing
4119 && insn_data[icode].operand[0].predicate (test, wider_mode))
4121 x = prepare_operand (icode, x, 1, mode, wider_mode, unsignedp);
4122 y = prepare_operand (icode, y, 2, mode, wider_mode, unsignedp);
4123 emit_jump_insn (GEN_FCN (icode) (test, x, y, label));
4128 /* Handle some compares against zero. */
4129 icode = (int) optab_handler (tst_optab, wider_mode)->insn_code;
4130 if (y == CONST0_RTX (mode) && icode != CODE_FOR_nothing)
4132 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
4133 emit_insn (GEN_FCN (icode) (x));
4135 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
4139 /* Handle compares for which there is a directly suitable insn. */
4141 icode = (int) optab_handler (cmp_optab, wider_mode)->insn_code;
4142 if (icode != CODE_FOR_nothing)
4144 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
4145 y = prepare_operand (icode, y, 1, mode, wider_mode, unsignedp);
4146 emit_insn (GEN_FCN (icode) (x, y));
4148 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
4152 if (!CLASS_HAS_WIDER_MODES_P (class))
4155 wider_mode = GET_MODE_WIDER_MODE (wider_mode);
4157 while (wider_mode != VOIDmode);
4162 /* Generate code to compare X with Y so that the condition codes are
4163 set and to jump to LABEL if the condition is true. If X is a
4164 constant and Y is not a constant, then the comparison is swapped to
4165 ensure that the comparison RTL has the canonical form.
4167 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
4168 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
4169 the proper branch condition code.
4171 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4173 MODE is the mode of the inputs (in case they are const_int).
4175 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
4176 be passed unchanged to emit_cmp_insn, then potentially converted into an
4177 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
4180 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
4181 enum machine_mode mode, int unsignedp, rtx label)
4183 rtx op0 = x, op1 = y;
4185 /* Swap operands and condition to ensure canonical RTL. */
4186 if (swap_commutative_operands_p (x, y))
4188 /* If we're not emitting a branch, callers are required to pass
4189 operands in an order conforming to canonical RTL. We relax this
4190 for commutative comparisons so callers using EQ don't need to do
4191 swapping by hand. */
4192 gcc_assert (label || (comparison == swap_condition (comparison)));
4195 comparison = swap_condition (comparison);
4199 /* If OP0 is still a constant, then both X and Y must be constants.
4200 Force X into a register to create canonical RTL. */
4201 if (CONSTANT_P (op0))
4202 op0 = force_reg (mode, op0);
4206 comparison = unsigned_condition (comparison);
4208 prepare_cmp_insn (&op0, &op1, &comparison, size, &mode, &unsignedp,
4210 emit_cmp_and_jump_insn_1 (op0, op1, mode, comparison, unsignedp, label);
4213 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
4216 emit_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
4217 enum machine_mode mode, int unsignedp)
4219 emit_cmp_and_jump_insns (x, y, comparison, size, mode, unsignedp, 0);
4222 /* Emit a library call comparison between floating point X and Y.
4223 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4226 prepare_float_lib_cmp (rtx *px, rtx *py, enum rtx_code *pcomparison,
4227 enum machine_mode *pmode, int *punsignedp)
4229 enum rtx_code comparison = *pcomparison;
4230 enum rtx_code swapped = swap_condition (comparison);
4231 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
4234 enum machine_mode orig_mode = GET_MODE (x);
4235 enum machine_mode mode;
4236 rtx value, target, insns, equiv;
4238 bool reversed_p = false;
4240 for (mode = orig_mode;
4242 mode = GET_MODE_WIDER_MODE (mode))
4244 if ((libfunc = optab_handler (code_to_optab[comparison], mode)->libfunc))
4247 if ((libfunc = optab_handler (code_to_optab[swapped], mode)->libfunc))
4250 tmp = x; x = y; y = tmp;
4251 comparison = swapped;
4255 if ((libfunc = optab_handler (code_to_optab[reversed], mode)->libfunc)
4256 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, reversed))
4258 comparison = reversed;
4264 gcc_assert (mode != VOIDmode);
4266 if (mode != orig_mode)
4268 x = convert_to_mode (mode, x, 0);
4269 y = convert_to_mode (mode, y, 0);
4272 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4273 the RTL. The allows the RTL optimizers to delete the libcall if the
4274 condition can be determined at compile-time. */
4275 if (comparison == UNORDERED)
4277 rtx temp = simplify_gen_relational (NE, word_mode, mode, x, x);
4278 equiv = simplify_gen_relational (NE, word_mode, mode, y, y);
4279 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
4280 temp, const_true_rtx, equiv);
4284 equiv = simplify_gen_relational (comparison, word_mode, mode, x, y);
4285 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4287 rtx true_rtx, false_rtx;
4292 true_rtx = const0_rtx;
4293 false_rtx = const_true_rtx;
4297 true_rtx = const_true_rtx;
4298 false_rtx = const0_rtx;
4302 true_rtx = const1_rtx;
4303 false_rtx = const0_rtx;
4307 true_rtx = const0_rtx;
4308 false_rtx = constm1_rtx;
4312 true_rtx = constm1_rtx;
4313 false_rtx = const0_rtx;
4317 true_rtx = const0_rtx;
4318 false_rtx = const1_rtx;
4324 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
4325 equiv, true_rtx, false_rtx);
4330 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4331 word_mode, 2, x, mode, y, mode);
4332 insns = get_insns ();
4335 target = gen_reg_rtx (word_mode);
4336 emit_libcall_block (insns, target, value, equiv);
4338 if (comparison == UNORDERED
4339 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4340 comparison = reversed_p ? EQ : NE;
4345 *pcomparison = comparison;
4349 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4352 emit_indirect_jump (rtx loc)
4354 if (!insn_data[(int) CODE_FOR_indirect_jump].operand[0].predicate
4356 loc = copy_to_mode_reg (Pmode, loc);
4358 emit_jump_insn (gen_indirect_jump (loc));
4362 #ifdef HAVE_conditional_move
4364 /* Emit a conditional move instruction if the machine supports one for that
4365 condition and machine mode.
4367 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4368 the mode to use should they be constants. If it is VOIDmode, they cannot
4371 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4372 should be stored there. MODE is the mode to use should they be constants.
4373 If it is VOIDmode, they cannot both be constants.
4375 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4376 is not supported. */
4379 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
4380 enum machine_mode cmode, rtx op2, rtx op3,
4381 enum machine_mode mode, int unsignedp)
4383 rtx tem, subtarget, comparison, insn;
4384 enum insn_code icode;
4385 enum rtx_code reversed;
4387 /* If one operand is constant, make it the second one. Only do this
4388 if the other operand is not constant as well. */
4390 if (swap_commutative_operands_p (op0, op1))
4395 code = swap_condition (code);
4398 /* get_condition will prefer to generate LT and GT even if the old
4399 comparison was against zero, so undo that canonicalization here since
4400 comparisons against zero are cheaper. */
4401 if (code == LT && op1 == const1_rtx)
4402 code = LE, op1 = const0_rtx;
4403 else if (code == GT && op1 == constm1_rtx)
4404 code = GE, op1 = const0_rtx;
4406 if (cmode == VOIDmode)
4407 cmode = GET_MODE (op0);
4409 if (swap_commutative_operands_p (op2, op3)
4410 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4419 if (mode == VOIDmode)
4420 mode = GET_MODE (op2);
4422 icode = movcc_gen_code[mode];
4424 if (icode == CODE_FOR_nothing)
4428 target = gen_reg_rtx (mode);
4432 /* If the insn doesn't accept these operands, put them in pseudos. */
4434 if (!insn_data[icode].operand[0].predicate
4435 (subtarget, insn_data[icode].operand[0].mode))
4436 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4438 if (!insn_data[icode].operand[2].predicate
4439 (op2, insn_data[icode].operand[2].mode))
4440 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4442 if (!insn_data[icode].operand[3].predicate
4443 (op3, insn_data[icode].operand[3].mode))
4444 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4446 /* Everything should now be in the suitable form, so emit the compare insn
4447 and then the conditional move. */
4450 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4452 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4453 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4454 return NULL and let the caller figure out how best to deal with this
4456 if (GET_CODE (comparison) != code)
4459 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4461 /* If that failed, then give up. */
4467 if (subtarget != target)
4468 convert_move (target, subtarget, 0);
4473 /* Return nonzero if a conditional move of mode MODE is supported.
4475 This function is for combine so it can tell whether an insn that looks
4476 like a conditional move is actually supported by the hardware. If we
4477 guess wrong we lose a bit on optimization, but that's it. */
4478 /* ??? sparc64 supports conditionally moving integers values based on fp
4479 comparisons, and vice versa. How do we handle them? */
4482 can_conditionally_move_p (enum machine_mode mode)
4484 if (movcc_gen_code[mode] != CODE_FOR_nothing)
4490 #endif /* HAVE_conditional_move */
4492 /* Emit a conditional addition instruction if the machine supports one for that
4493 condition and machine mode.
4495 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4496 the mode to use should they be constants. If it is VOIDmode, they cannot
4499 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
4500 should be stored there. MODE is the mode to use should they be constants.
4501 If it is VOIDmode, they cannot both be constants.
4503 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4504 is not supported. */
4507 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
4508 enum machine_mode cmode, rtx op2, rtx op3,
4509 enum machine_mode mode, int unsignedp)
4511 rtx tem, subtarget, comparison, insn;
4512 enum insn_code icode;
4513 enum rtx_code reversed;
4515 /* If one operand is constant, make it the second one. Only do this
4516 if the other operand is not constant as well. */
4518 if (swap_commutative_operands_p (op0, op1))
4523 code = swap_condition (code);
4526 /* get_condition will prefer to generate LT and GT even if the old
4527 comparison was against zero, so undo that canonicalization here since
4528 comparisons against zero are cheaper. */
4529 if (code == LT && op1 == const1_rtx)
4530 code = LE, op1 = const0_rtx;
4531 else if (code == GT && op1 == constm1_rtx)
4532 code = GE, op1 = const0_rtx;
4534 if (cmode == VOIDmode)
4535 cmode = GET_MODE (op0);
4537 if (swap_commutative_operands_p (op2, op3)
4538 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4547 if (mode == VOIDmode)
4548 mode = GET_MODE (op2);
4550 icode = optab_handler (addcc_optab, mode)->insn_code;
4552 if (icode == CODE_FOR_nothing)
4556 target = gen_reg_rtx (mode);
4558 /* If the insn doesn't accept these operands, put them in pseudos. */
4560 if (!insn_data[icode].operand[0].predicate
4561 (target, insn_data[icode].operand[0].mode))
4562 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4566 if (!insn_data[icode].operand[2].predicate
4567 (op2, insn_data[icode].operand[2].mode))
4568 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4570 if (!insn_data[icode].operand[3].predicate
4571 (op3, insn_data[icode].operand[3].mode))
4572 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4574 /* Everything should now be in the suitable form, so emit the compare insn
4575 and then the conditional move. */
4578 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4580 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4581 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4582 return NULL and let the caller figure out how best to deal with this
4584 if (GET_CODE (comparison) != code)
4587 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4589 /* If that failed, then give up. */
4595 if (subtarget != target)
4596 convert_move (target, subtarget, 0);
4601 /* These functions attempt to generate an insn body, rather than
4602 emitting the insn, but if the gen function already emits them, we
4603 make no attempt to turn them back into naked patterns. */
4605 /* Generate and return an insn body to add Y to X. */
4608 gen_add2_insn (rtx x, rtx y)
4610 int icode = (int) optab_handler (add_optab, GET_MODE (x))->insn_code;
4612 gcc_assert (insn_data[icode].operand[0].predicate
4613 (x, insn_data[icode].operand[0].mode));
4614 gcc_assert (insn_data[icode].operand[1].predicate
4615 (x, insn_data[icode].operand[1].mode));
4616 gcc_assert (insn_data[icode].operand[2].predicate
4617 (y, insn_data[icode].operand[2].mode));
4619 return GEN_FCN (icode) (x, x, y);
4622 /* Generate and return an insn body to add r1 and c,
4623 storing the result in r0. */
4625 gen_add3_insn (rtx r0, rtx r1, rtx c)
4627 int icode = (int) optab_handler (add_optab, GET_MODE (r0))->insn_code;
4629 if (icode == CODE_FOR_nothing
4630 || !(insn_data[icode].operand[0].predicate
4631 (r0, insn_data[icode].operand[0].mode))
4632 || !(insn_data[icode].operand[1].predicate
4633 (r1, insn_data[icode].operand[1].mode))
4634 || !(insn_data[icode].operand[2].predicate
4635 (c, insn_data[icode].operand[2].mode)))
4638 return GEN_FCN (icode) (r0, r1, c);
4642 have_add2_insn (rtx x, rtx y)
4646 gcc_assert (GET_MODE (x) != VOIDmode);
4648 icode = (int) optab_handler (add_optab, GET_MODE (x))->insn_code;
4650 if (icode == CODE_FOR_nothing)
4653 if (!(insn_data[icode].operand[0].predicate
4654 (x, insn_data[icode].operand[0].mode))
4655 || !(insn_data[icode].operand[1].predicate
4656 (x, insn_data[icode].operand[1].mode))
4657 || !(insn_data[icode].operand[2].predicate
4658 (y, insn_data[icode].operand[2].mode)))
4664 /* Generate and return an insn body to subtract Y from X. */
4667 gen_sub2_insn (rtx x, rtx y)
4669 int icode = (int) optab_handler (sub_optab, GET_MODE (x))->insn_code;
4671 gcc_assert (insn_data[icode].operand[0].predicate
4672 (x, insn_data[icode].operand[0].mode));
4673 gcc_assert (insn_data[icode].operand[1].predicate
4674 (x, insn_data[icode].operand[1].mode));
4675 gcc_assert (insn_data[icode].operand[2].predicate
4676 (y, insn_data[icode].operand[2].mode));
4678 return GEN_FCN (icode) (x, x, y);
4681 /* Generate and return an insn body to subtract r1 and c,
4682 storing the result in r0. */
4684 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4686 int icode = (int) optab_handler (sub_optab, GET_MODE (r0))->insn_code;
4688 if (icode == CODE_FOR_nothing
4689 || !(insn_data[icode].operand[0].predicate
4690 (r0, insn_data[icode].operand[0].mode))
4691 || !(insn_data[icode].operand[1].predicate
4692 (r1, insn_data[icode].operand[1].mode))
4693 || !(insn_data[icode].operand[2].predicate
4694 (c, insn_data[icode].operand[2].mode)))
4697 return GEN_FCN (icode) (r0, r1, c);
4701 have_sub2_insn (rtx x, rtx y)
4705 gcc_assert (GET_MODE (x) != VOIDmode);
4707 icode = (int) optab_handler (sub_optab, GET_MODE (x))->insn_code;
4709 if (icode == CODE_FOR_nothing)
4712 if (!(insn_data[icode].operand[0].predicate
4713 (x, insn_data[icode].operand[0].mode))
4714 || !(insn_data[icode].operand[1].predicate
4715 (x, insn_data[icode].operand[1].mode))
4716 || !(insn_data[icode].operand[2].predicate
4717 (y, insn_data[icode].operand[2].mode)))
4723 /* Generate the body of an instruction to copy Y into X.
4724 It may be a list of insns, if one insn isn't enough. */
4727 gen_move_insn (rtx x, rtx y)
4732 emit_move_insn_1 (x, y);
4738 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4739 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4740 no such operation exists, CODE_FOR_nothing will be returned. */
4743 can_extend_p (enum machine_mode to_mode, enum machine_mode from_mode,
4747 #ifdef HAVE_ptr_extend
4749 return CODE_FOR_ptr_extend;
4752 tab = unsignedp ? zext_optab : sext_optab;
4753 return convert_optab_handler (tab, to_mode, from_mode)->insn_code;
4756 /* Generate the body of an insn to extend Y (with mode MFROM)
4757 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4760 gen_extend_insn (rtx x, rtx y, enum machine_mode mto,
4761 enum machine_mode mfrom, int unsignedp)
4763 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
4764 return GEN_FCN (icode) (x, y);
4767 /* can_fix_p and can_float_p say whether the target machine
4768 can directly convert a given fixed point type to
4769 a given floating point type, or vice versa.
4770 The returned value is the CODE_FOR_... value to use,
4771 or CODE_FOR_nothing if these modes cannot be directly converted.
4773 *TRUNCP_PTR is set to 1 if it is necessary to output
4774 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4776 static enum insn_code
4777 can_fix_p (enum machine_mode fixmode, enum machine_mode fltmode,
4778 int unsignedp, int *truncp_ptr)
4781 enum insn_code icode;
4783 tab = unsignedp ? ufixtrunc_optab : sfixtrunc_optab;
4784 icode = convert_optab_handler (tab, fixmode, fltmode)->insn_code;
4785 if (icode != CODE_FOR_nothing)
4791 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4792 for this to work. We need to rework the fix* and ftrunc* patterns
4793 and documentation. */
4794 tab = unsignedp ? ufix_optab : sfix_optab;
4795 icode = convert_optab_handler (tab, fixmode, fltmode)->insn_code;
4796 if (icode != CODE_FOR_nothing
4797 && optab_handler (ftrunc_optab, fltmode)->insn_code != CODE_FOR_nothing)
4804 return CODE_FOR_nothing;
4807 static enum insn_code
4808 can_float_p (enum machine_mode fltmode, enum machine_mode fixmode,
4813 tab = unsignedp ? ufloat_optab : sfloat_optab;
4814 return convert_optab_handler (tab, fltmode, fixmode)->insn_code;
4817 /* Generate code to convert FROM to floating point
4818 and store in TO. FROM must be fixed point and not VOIDmode.
4819 UNSIGNEDP nonzero means regard FROM as unsigned.
4820 Normally this is done by correcting the final value
4821 if it is negative. */
4824 expand_float (rtx to, rtx from, int unsignedp)
4826 enum insn_code icode;
4828 enum machine_mode fmode, imode;
4829 bool can_do_signed = false;
4831 /* Crash now, because we won't be able to decide which mode to use. */
4832 gcc_assert (GET_MODE (from) != VOIDmode);
4834 /* Look for an insn to do the conversion. Do it in the specified
4835 modes if possible; otherwise convert either input, output or both to
4836 wider mode. If the integer mode is wider than the mode of FROM,
4837 we can do the conversion signed even if the input is unsigned. */
4839 for (fmode = GET_MODE (to); fmode != VOIDmode;
4840 fmode = GET_MODE_WIDER_MODE (fmode))
4841 for (imode = GET_MODE (from); imode != VOIDmode;
4842 imode = GET_MODE_WIDER_MODE (imode))
4844 int doing_unsigned = unsignedp;
4846 if (fmode != GET_MODE (to)
4847 && significand_size (fmode) < GET_MODE_BITSIZE (GET_MODE (from)))
4850 icode = can_float_p (fmode, imode, unsignedp);
4851 if (icode == CODE_FOR_nothing && unsignedp)
4853 enum insn_code scode = can_float_p (fmode, imode, 0);
4854 if (scode != CODE_FOR_nothing)
4855 can_do_signed = true;
4856 if (imode != GET_MODE (from))
4857 icode = scode, doing_unsigned = 0;
4860 if (icode != CODE_FOR_nothing)
4862 if (imode != GET_MODE (from))
4863 from = convert_to_mode (imode, from, unsignedp);
4865 if (fmode != GET_MODE (to))
4866 target = gen_reg_rtx (fmode);
4868 emit_unop_insn (icode, target, from,
4869 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
4872 convert_move (to, target, 0);
4877 /* Unsigned integer, and no way to convert directly. For binary
4878 floating point modes, convert as signed, then conditionally adjust
4880 if (unsignedp && can_do_signed && !DECIMAL_FLOAT_MODE_P (GET_MODE (to)))
4882 rtx label = gen_label_rtx ();
4884 REAL_VALUE_TYPE offset;
4886 /* Look for a usable floating mode FMODE wider than the source and at
4887 least as wide as the target. Using FMODE will avoid rounding woes
4888 with unsigned values greater than the signed maximum value. */
4890 for (fmode = GET_MODE (to); fmode != VOIDmode;
4891 fmode = GET_MODE_WIDER_MODE (fmode))
4892 if (GET_MODE_BITSIZE (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
4893 && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
4896 if (fmode == VOIDmode)
4898 /* There is no such mode. Pretend the target is wide enough. */
4899 fmode = GET_MODE (to);
4901 /* Avoid double-rounding when TO is narrower than FROM. */
4902 if ((significand_size (fmode) + 1)
4903 < GET_MODE_BITSIZE (GET_MODE (from)))
4906 rtx neglabel = gen_label_rtx ();
4908 /* Don't use TARGET if it isn't a register, is a hard register,
4909 or is the wrong mode. */
4911 || REGNO (target) < FIRST_PSEUDO_REGISTER
4912 || GET_MODE (target) != fmode)
4913 target = gen_reg_rtx (fmode);
4915 imode = GET_MODE (from);
4916 do_pending_stack_adjust ();
4918 /* Test whether the sign bit is set. */
4919 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
4922 /* The sign bit is not set. Convert as signed. */
4923 expand_float (target, from, 0);
4924 emit_jump_insn (gen_jump (label));
4927 /* The sign bit is set.
4928 Convert to a usable (positive signed) value by shifting right
4929 one bit, while remembering if a nonzero bit was shifted
4930 out; i.e., compute (from & 1) | (from >> 1). */
4932 emit_label (neglabel);
4933 temp = expand_binop (imode, and_optab, from, const1_rtx,
4934 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4935 temp1 = expand_shift (RSHIFT_EXPR, imode, from, integer_one_node,
4937 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
4939 expand_float (target, temp, 0);
4941 /* Multiply by 2 to undo the shift above. */
4942 temp = expand_binop (fmode, add_optab, target, target,
4943 target, 0, OPTAB_LIB_WIDEN);
4945 emit_move_insn (target, temp);
4947 do_pending_stack_adjust ();
4953 /* If we are about to do some arithmetic to correct for an
4954 unsigned operand, do it in a pseudo-register. */
4956 if (GET_MODE (to) != fmode
4957 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
4958 target = gen_reg_rtx (fmode);
4960 /* Convert as signed integer to floating. */
4961 expand_float (target, from, 0);
4963 /* If FROM is negative (and therefore TO is negative),
4964 correct its value by 2**bitwidth. */
4966 do_pending_stack_adjust ();
4967 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from),
4971 real_2expN (&offset, GET_MODE_BITSIZE (GET_MODE (from)));
4972 temp = expand_binop (fmode, add_optab, target,
4973 CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode),
4974 target, 0, OPTAB_LIB_WIDEN);
4976 emit_move_insn (target, temp);
4978 do_pending_stack_adjust ();
4983 /* No hardware instruction available; call a library routine. */
4988 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
4990 if (GET_MODE_SIZE (GET_MODE (from)) < GET_MODE_SIZE (SImode))
4991 from = convert_to_mode (SImode, from, unsignedp);
4993 libfunc = convert_optab_handler (tab, GET_MODE (to),
4994 GET_MODE (from))->libfunc;
4995 gcc_assert (libfunc);
4999 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
5000 GET_MODE (to), 1, from,
5002 insns = get_insns ();
5005 emit_libcall_block (insns, target, value,
5006 gen_rtx_FLOAT (GET_MODE (to), from));
5011 /* Copy result to requested destination
5012 if we have been computing in a temp location. */
5016 if (GET_MODE (target) == GET_MODE (to))
5017 emit_move_insn (to, target);
5019 convert_move (to, target, 0);
5023 /* Generate code to convert FROM to fixed point and store in TO. FROM
5024 must be floating point. */
5027 expand_fix (rtx to, rtx from, int unsignedp)
5029 enum insn_code icode;
5031 enum machine_mode fmode, imode;
5034 /* We first try to find a pair of modes, one real and one integer, at
5035 least as wide as FROM and TO, respectively, in which we can open-code
5036 this conversion. If the integer mode is wider than the mode of TO,
5037 we can do the conversion either signed or unsigned. */
5039 for (fmode = GET_MODE (from); fmode != VOIDmode;
5040 fmode = GET_MODE_WIDER_MODE (fmode))
5041 for (imode = GET_MODE (to); imode != VOIDmode;
5042 imode = GET_MODE_WIDER_MODE (imode))
5044 int doing_unsigned = unsignedp;
5046 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
5047 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
5048 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
5050 if (icode != CODE_FOR_nothing)
5052 if (fmode != GET_MODE (from))
5053 from = convert_to_mode (fmode, from, 0);
5057 rtx temp = gen_reg_rtx (GET_MODE (from));
5058 from = expand_unop (GET_MODE (from), ftrunc_optab, from,
5062 if (imode != GET_MODE (to))
5063 target = gen_reg_rtx (imode);
5065 emit_unop_insn (icode, target, from,
5066 doing_unsigned ? UNSIGNED_FIX : FIX);
5068 convert_move (to, target, unsignedp);
5073 /* For an unsigned conversion, there is one more way to do it.
5074 If we have a signed conversion, we generate code that compares
5075 the real value to the largest representable positive number. If if
5076 is smaller, the conversion is done normally. Otherwise, subtract
5077 one plus the highest signed number, convert, and add it back.
5079 We only need to check all real modes, since we know we didn't find
5080 anything with a wider integer mode.
5082 This code used to extend FP value into mode wider than the destination.
5083 This is not needed. Consider, for instance conversion from SFmode
5086 The hot path through the code is dealing with inputs smaller than 2^63
5087 and doing just the conversion, so there is no bits to lose.
5089 In the other path we know the value is positive in the range 2^63..2^64-1
5090 inclusive. (as for other imput overflow happens and result is undefined)
5091 So we know that the most important bit set in mantissa corresponds to
5092 2^63. The subtraction of 2^63 should not generate any rounding as it
5093 simply clears out that bit. The rest is trivial. */
5095 if (unsignedp && GET_MODE_BITSIZE (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
5096 for (fmode = GET_MODE (from); fmode != VOIDmode;
5097 fmode = GET_MODE_WIDER_MODE (fmode))
5098 if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0,
5102 REAL_VALUE_TYPE offset;
5103 rtx limit, lab1, lab2, insn;
5105 bitsize = GET_MODE_BITSIZE (GET_MODE (to));
5106 real_2expN (&offset, bitsize - 1);
5107 limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode);
5108 lab1 = gen_label_rtx ();
5109 lab2 = gen_label_rtx ();
5111 if (fmode != GET_MODE (from))
5112 from = convert_to_mode (fmode, from, 0);
5114 /* See if we need to do the subtraction. */
5115 do_pending_stack_adjust ();
5116 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from),
5119 /* If not, do the signed "fix" and branch around fixup code. */
5120 expand_fix (to, from, 0);
5121 emit_jump_insn (gen_jump (lab2));
5124 /* Otherwise, subtract 2**(N-1), convert to signed number,
5125 then add 2**(N-1). Do the addition using XOR since this
5126 will often generate better code. */
5128 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
5129 NULL_RTX, 0, OPTAB_LIB_WIDEN);
5130 expand_fix (to, target, 0);
5131 target = expand_binop (GET_MODE (to), xor_optab, to,
5133 ((HOST_WIDE_INT) 1 << (bitsize - 1),
5135 to, 1, OPTAB_LIB_WIDEN);
5138 emit_move_insn (to, target);
5142 if (optab_handler (mov_optab, GET_MODE (to))->insn_code
5143 != CODE_FOR_nothing)
5145 /* Make a place for a REG_NOTE and add it. */
5146 insn = emit_move_insn (to, to);
5147 set_unique_reg_note (insn,
5149 gen_rtx_fmt_e (UNSIGNED_FIX,
5157 /* We can't do it with an insn, so use a library call. But first ensure
5158 that the mode of TO is at least as wide as SImode, since those are the
5159 only library calls we know about. */
5161 if (GET_MODE_SIZE (GET_MODE (to)) < GET_MODE_SIZE (SImode))
5163 target = gen_reg_rtx (SImode);
5165 expand_fix (target, from, unsignedp);
5173 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
5174 libfunc = convert_optab_handler (tab, GET_MODE (to),
5175 GET_MODE (from))->libfunc;
5176 gcc_assert (libfunc);
5180 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
5181 GET_MODE (to), 1, from,
5183 insns = get_insns ();
5186 emit_libcall_block (insns, target, value,
5187 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
5188 GET_MODE (to), from));
5193 if (GET_MODE (to) == GET_MODE (target))
5194 emit_move_insn (to, target);
5196 convert_move (to, target, 0);
5200 /* Generate code to convert FROM to fixed point and store in TO. FROM
5201 must be floating point, TO must be signed. Use the conversion optab
5202 TAB to do the conversion. */
5205 expand_sfix_optab (rtx to, rtx from, convert_optab tab)
5207 enum insn_code icode;
5209 enum machine_mode fmode, imode;
5211 /* We first try to find a pair of modes, one real and one integer, at
5212 least as wide as FROM and TO, respectively, in which we can open-code
5213 this conversion. If the integer mode is wider than the mode of TO,
5214 we can do the conversion either signed or unsigned. */
5216 for (fmode = GET_MODE (from); fmode != VOIDmode;
5217 fmode = GET_MODE_WIDER_MODE (fmode))
5218 for (imode = GET_MODE (to); imode != VOIDmode;
5219 imode = GET_MODE_WIDER_MODE (imode))
5221 icode = convert_optab_handler (tab, imode, fmode)->insn_code;
5222 if (icode != CODE_FOR_nothing)
5224 if (fmode != GET_MODE (from))
5225 from = convert_to_mode (fmode, from, 0);
5227 if (imode != GET_MODE (to))
5228 target = gen_reg_rtx (imode);
5230 emit_unop_insn (icode, target, from, UNKNOWN);
5232 convert_move (to, target, 0);
5240 /* Report whether we have an instruction to perform the operation
5241 specified by CODE on operands of mode MODE. */
5243 have_insn_for (enum rtx_code code, enum machine_mode mode)
5245 return (code_to_optab[(int) code] != 0
5246 && (optab_handler (code_to_optab[(int) code], mode)->insn_code
5247 != CODE_FOR_nothing));
5250 /* Create a blank optab. */
5255 optab op = ggc_alloc (sizeof (struct optab));
5256 for (i = 0; i < NUM_MACHINE_MODES; i++)
5258 optab_handler (op, i)->insn_code = CODE_FOR_nothing;
5259 optab_handler (op, i)->libfunc = 0;
5265 static convert_optab
5266 new_convert_optab (void)
5269 convert_optab op = ggc_alloc (sizeof (struct convert_optab));
5270 for (i = 0; i < NUM_MACHINE_MODES; i++)
5271 for (j = 0; j < NUM_MACHINE_MODES; j++)
5273 convert_optab_handler (op, i, j)->insn_code = CODE_FOR_nothing;
5274 convert_optab_handler (op, i, j)->libfunc = 0;
5279 /* Same, but fill in its code as CODE, and write it into the
5280 code_to_optab table. */
5282 init_optab (enum rtx_code code)
5284 optab op = new_optab ();
5286 code_to_optab[(int) code] = op;
5290 /* Same, but fill in its code as CODE, and do _not_ write it into
5291 the code_to_optab table. */
5293 init_optabv (enum rtx_code code)
5295 optab op = new_optab ();
5300 /* Conversion optabs never go in the code_to_optab table. */
5301 static inline convert_optab
5302 init_convert_optab (enum rtx_code code)
5304 convert_optab op = new_convert_optab ();
5309 /* Initialize the libfunc fields of an entire group of entries in some
5310 optab. Each entry is set equal to a string consisting of a leading
5311 pair of underscores followed by a generic operation name followed by
5312 a mode name (downshifted to lowercase) followed by a single character
5313 representing the number of operands for the given operation (which is
5314 usually one of the characters '2', '3', or '4').
5316 OPTABLE is the table in which libfunc fields are to be initialized.
5317 FIRST_MODE is the first machine mode index in the given optab to
5319 LAST_MODE is the last machine mode index in the given optab to
5321 OPNAME is the generic (string) name of the operation.
5322 SUFFIX is the character which specifies the number of operands for
5323 the given generic operation.
5327 init_libfuncs (optab optable, int first_mode, int last_mode,
5328 const char *opname, int suffix)
5331 unsigned opname_len = strlen (opname);
5333 for (mode = first_mode; (int) mode <= (int) last_mode;
5334 mode = (enum machine_mode) ((int) mode + 1))
5336 const char *mname = GET_MODE_NAME (mode);
5337 unsigned mname_len = strlen (mname);
5338 char *libfunc_name = alloca (2 + opname_len + mname_len + 1 + 1);
5345 for (q = opname; *q; )
5347 for (q = mname; *q; q++)
5348 *p++ = TOLOWER (*q);
5352 optab_handler (optable, mode)->libfunc
5353 = init_one_libfunc (ggc_alloc_string (libfunc_name, p - libfunc_name));
5357 /* Initialize the libfunc fields of an entire group of entries in some
5358 optab which correspond to all integer mode operations. The parameters
5359 have the same meaning as similarly named ones for the `init_libfuncs'
5360 routine. (See above). */
5363 init_integral_libfuncs (optab optable, const char *opname, int suffix)
5365 int maxsize = 2*BITS_PER_WORD;
5366 if (maxsize < LONG_LONG_TYPE_SIZE)
5367 maxsize = LONG_LONG_TYPE_SIZE;
5368 init_libfuncs (optable, word_mode,
5369 mode_for_size (maxsize, MODE_INT, 0),
5373 /* Initialize the libfunc fields of an entire group of entries in some
5374 optab which correspond to all real mode operations. The parameters
5375 have the same meaning as similarly named ones for the `init_libfuncs'
5376 routine. (See above). */
5379 init_floating_libfuncs (optab optable, const char *opname, int suffix)
5381 char *dec_opname = alloca (sizeof (DECIMAL_PREFIX) + strlen (opname));
5383 /* For BID support, change the name to have either a bid_ or dpd_ prefix
5384 depending on the low level floating format used. */
5385 memcpy (dec_opname, DECIMAL_PREFIX, sizeof (DECIMAL_PREFIX) - 1);
5386 strcpy (dec_opname + sizeof (DECIMAL_PREFIX) - 1, opname);
5388 init_libfuncs (optable, MIN_MODE_FLOAT, MAX_MODE_FLOAT, opname, suffix);
5389 init_libfuncs (optable, MIN_MODE_DECIMAL_FLOAT, MAX_MODE_DECIMAL_FLOAT,
5390 dec_opname, suffix);
5393 /* Initialize the libfunc fields of an entire group of entries of an
5394 inter-mode-class conversion optab. The string formation rules are
5395 similar to the ones for init_libfuncs, above, but instead of having
5396 a mode name and an operand count these functions have two mode names
5397 and no operand count. */
5399 init_interclass_conv_libfuncs (convert_optab tab, const char *opname,
5400 enum mode_class from_class,
5401 enum mode_class to_class)
5403 enum machine_mode first_from_mode = GET_CLASS_NARROWEST_MODE (from_class);
5404 enum machine_mode first_to_mode = GET_CLASS_NARROWEST_MODE (to_class);
5405 size_t opname_len = strlen (opname);
5406 size_t max_mname_len = 0;
5408 enum machine_mode fmode, tmode;
5409 const char *fname, *tname;
5411 char *libfunc_name, *suffix;
5412 char *nondec_name, *dec_name, *nondec_suffix, *dec_suffix;
5415 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5416 depends on which underlying decimal floating point format is used. */
5417 const size_t dec_len = sizeof (DECIMAL_PREFIX) - 1;
5419 for (fmode = first_from_mode;
5421 fmode = GET_MODE_WIDER_MODE (fmode))
5422 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (fmode)));
5424 for (tmode = first_to_mode;
5426 tmode = GET_MODE_WIDER_MODE (tmode))
5427 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (tmode)));
5429 nondec_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
5430 nondec_name[0] = '_';
5431 nondec_name[1] = '_';
5432 memcpy (&nondec_name[2], opname, opname_len);
5433 nondec_suffix = nondec_name + opname_len + 2;
5435 dec_name = alloca (2 + dec_len + opname_len + 2*max_mname_len + 1 + 1);
5438 memcpy (&dec_name[2], DECIMAL_PREFIX, dec_len);
5439 memcpy (&dec_name[2+dec_len], opname, opname_len);
5440 dec_suffix = dec_name + dec_len + opname_len + 2;
5442 for (fmode = first_from_mode; fmode != VOIDmode;
5443 fmode = GET_MODE_WIDER_MODE (fmode))
5444 for (tmode = first_to_mode; tmode != VOIDmode;
5445 tmode = GET_MODE_WIDER_MODE (tmode))
5447 fname = GET_MODE_NAME (fmode);
5448 tname = GET_MODE_NAME (tmode);
5450 if (DECIMAL_FLOAT_MODE_P(fmode) || DECIMAL_FLOAT_MODE_P(tmode))
5452 libfunc_name = dec_name;
5453 suffix = dec_suffix;
5457 libfunc_name = nondec_name;
5458 suffix = nondec_suffix;
5462 for (q = fname; *q; p++, q++)
5464 for (q = tname; *q; p++, q++)
5469 convert_optab_handler (tab, tmode, fmode)->libfunc
5470 = init_one_libfunc (ggc_alloc_string (libfunc_name,
5475 /* Initialize the libfunc fields of an entire group of entries of an
5476 intra-mode-class conversion optab. The string formation rules are
5477 similar to the ones for init_libfunc, above. WIDENING says whether
5478 the optab goes from narrow to wide modes or vice versa. These functions
5479 have two mode names _and_ an operand count. */
5481 init_intraclass_conv_libfuncs (convert_optab tab, const char *opname,
5482 enum mode_class class, bool widening)
5484 enum machine_mode first_mode = GET_CLASS_NARROWEST_MODE (class);
5485 size_t opname_len = strlen (opname);
5486 size_t max_mname_len = 0;
5488 enum machine_mode nmode, wmode;
5489 const char *nname, *wname;
5491 char *nondec_name, *dec_name, *nondec_suffix, *dec_suffix;
5492 char *libfunc_name, *suffix;
5495 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5496 depends on which underlying decimal floating point format is used. */
5497 const size_t dec_len = sizeof (DECIMAL_PREFIX) - 1;
5499 for (nmode = first_mode; nmode != VOIDmode;
5500 nmode = GET_MODE_WIDER_MODE (nmode))
5501 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (nmode)));
5503 nondec_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
5504 nondec_name[0] = '_';
5505 nondec_name[1] = '_';
5506 memcpy (&nondec_name[2], opname, opname_len);
5507 nondec_suffix = nondec_name + opname_len + 2;
5509 dec_name = alloca (2 + dec_len + opname_len + 2*max_mname_len + 1 + 1);
5512 memcpy (&dec_name[2], DECIMAL_PREFIX, dec_len);
5513 memcpy (&dec_name[2 + dec_len], opname, opname_len);
5514 dec_suffix = dec_name + dec_len + opname_len + 2;
5516 for (nmode = first_mode; nmode != VOIDmode;
5517 nmode = GET_MODE_WIDER_MODE (nmode))
5518 for (wmode = GET_MODE_WIDER_MODE (nmode); wmode != VOIDmode;
5519 wmode = GET_MODE_WIDER_MODE (wmode))
5521 nname = GET_MODE_NAME (nmode);
5522 wname = GET_MODE_NAME (wmode);
5524 if (DECIMAL_FLOAT_MODE_P(nmode) || DECIMAL_FLOAT_MODE_P(wmode))
5526 libfunc_name = dec_name;
5527 suffix = dec_suffix;
5531 libfunc_name = nondec_name;
5532 suffix = nondec_suffix;
5536 for (q = widening ? nname : wname; *q; p++, q++)
5538 for (q = widening ? wname : nname; *q; p++, q++)
5544 convert_optab_handler(tab, widening ? wmode : nmode,
5545 widening ? nmode : wmode)->libfunc
5546 = init_one_libfunc (ggc_alloc_string (libfunc_name,
5553 init_one_libfunc (const char *name)
5557 /* Create a FUNCTION_DECL that can be passed to
5558 targetm.encode_section_info. */
5559 /* ??? We don't have any type information except for this is
5560 a function. Pretend this is "int foo()". */
5561 tree decl = build_decl (FUNCTION_DECL, get_identifier (name),
5562 build_function_type (integer_type_node, NULL_TREE));
5563 DECL_ARTIFICIAL (decl) = 1;
5564 DECL_EXTERNAL (decl) = 1;
5565 TREE_PUBLIC (decl) = 1;
5567 symbol = XEXP (DECL_RTL (decl), 0);
5569 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
5570 are the flags assigned by targetm.encode_section_info. */
5571 SET_SYMBOL_REF_DECL (symbol, 0);
5576 /* Call this to reset the function entry for one optab (OPTABLE) in mode
5577 MODE to NAME, which should be either 0 or a string constant. */
5579 set_optab_libfunc (optab optable, enum machine_mode mode, const char *name)
5582 optab_handler (optable, mode)->libfunc = init_one_libfunc (name);
5584 optab_handler (optable, mode)->libfunc = 0;
5587 /* Call this to reset the function entry for one conversion optab
5588 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
5589 either 0 or a string constant. */
5591 set_conv_libfunc (convert_optab optable, enum machine_mode tmode,
5592 enum machine_mode fmode, const char *name)
5595 convert_optab_handler (optable, tmode, fmode)->libfunc
5596 = init_one_libfunc (name);
5598 convert_optab_handler (optable, tmode, fmode)->libfunc = 0;
5601 /* Call this to initialize the contents of the optabs
5602 appropriately for the current target machine. */
5608 enum machine_mode int_mode;
5610 /* Start by initializing all tables to contain CODE_FOR_nothing. */
5612 for (i = 0; i < NUM_RTX_CODE; i++)
5613 setcc_gen_code[i] = CODE_FOR_nothing;
5615 #ifdef HAVE_conditional_move
5616 for (i = 0; i < NUM_MACHINE_MODES; i++)
5617 movcc_gen_code[i] = CODE_FOR_nothing;
5620 for (i = 0; i < NUM_MACHINE_MODES; i++)
5622 vcond_gen_code[i] = CODE_FOR_nothing;
5623 vcondu_gen_code[i] = CODE_FOR_nothing;
5626 add_optab = init_optab (PLUS);
5627 addv_optab = init_optabv (PLUS);
5628 sub_optab = init_optab (MINUS);
5629 subv_optab = init_optabv (MINUS);
5630 smul_optab = init_optab (MULT);
5631 smulv_optab = init_optabv (MULT);
5632 smul_highpart_optab = init_optab (UNKNOWN);
5633 umul_highpart_optab = init_optab (UNKNOWN);
5634 smul_widen_optab = init_optab (UNKNOWN);
5635 umul_widen_optab = init_optab (UNKNOWN);
5636 usmul_widen_optab = init_optab (UNKNOWN);
5637 smadd_widen_optab = init_optab (UNKNOWN);
5638 umadd_widen_optab = init_optab (UNKNOWN);
5639 smsub_widen_optab = init_optab (UNKNOWN);
5640 umsub_widen_optab = init_optab (UNKNOWN);
5641 sdiv_optab = init_optab (DIV);
5642 sdivv_optab = init_optabv (DIV);
5643 sdivmod_optab = init_optab (UNKNOWN);
5644 udiv_optab = init_optab (UDIV);
5645 udivmod_optab = init_optab (UNKNOWN);
5646 smod_optab = init_optab (MOD);
5647 umod_optab = init_optab (UMOD);
5648 fmod_optab = init_optab (UNKNOWN);
5649 remainder_optab = init_optab (UNKNOWN);
5650 ftrunc_optab = init_optab (UNKNOWN);
5651 and_optab = init_optab (AND);
5652 ior_optab = init_optab (IOR);
5653 xor_optab = init_optab (XOR);
5654 ashl_optab = init_optab (ASHIFT);
5655 ashr_optab = init_optab (ASHIFTRT);
5656 lshr_optab = init_optab (LSHIFTRT);
5657 rotl_optab = init_optab (ROTATE);
5658 rotr_optab = init_optab (ROTATERT);
5659 smin_optab = init_optab (SMIN);
5660 smax_optab = init_optab (SMAX);
5661 umin_optab = init_optab (UMIN);
5662 umax_optab = init_optab (UMAX);
5663 pow_optab = init_optab (UNKNOWN);
5664 atan2_optab = init_optab (UNKNOWN);
5666 /* These three have codes assigned exclusively for the sake of
5668 mov_optab = init_optab (SET);
5669 movstrict_optab = init_optab (STRICT_LOW_PART);
5670 cmp_optab = init_optab (COMPARE);
5672 storent_optab = init_optab (UNKNOWN);
5674 ucmp_optab = init_optab (UNKNOWN);
5675 tst_optab = init_optab (UNKNOWN);
5677 eq_optab = init_optab (EQ);
5678 ne_optab = init_optab (NE);
5679 gt_optab = init_optab (GT);
5680 ge_optab = init_optab (GE);
5681 lt_optab = init_optab (LT);
5682 le_optab = init_optab (LE);
5683 unord_optab = init_optab (UNORDERED);
5685 neg_optab = init_optab (NEG);
5686 negv_optab = init_optabv (NEG);
5687 abs_optab = init_optab (ABS);
5688 absv_optab = init_optabv (ABS);
5689 addcc_optab = init_optab (UNKNOWN);
5690 one_cmpl_optab = init_optab (NOT);
5691 bswap_optab = init_optab (BSWAP);
5692 ffs_optab = init_optab (FFS);
5693 clz_optab = init_optab (CLZ);
5694 ctz_optab = init_optab (CTZ);
5695 popcount_optab = init_optab (POPCOUNT);
5696 parity_optab = init_optab (PARITY);
5697 sqrt_optab = init_optab (SQRT);
5698 floor_optab = init_optab (UNKNOWN);
5699 ceil_optab = init_optab (UNKNOWN);
5700 round_optab = init_optab (UNKNOWN);
5701 btrunc_optab = init_optab (UNKNOWN);
5702 nearbyint_optab = init_optab (UNKNOWN);
5703 rint_optab = init_optab (UNKNOWN);
5704 sincos_optab = init_optab (UNKNOWN);
5705 sin_optab = init_optab (UNKNOWN);
5706 asin_optab = init_optab (UNKNOWN);
5707 cos_optab = init_optab (UNKNOWN);
5708 acos_optab = init_optab (UNKNOWN);
5709 exp_optab = init_optab (UNKNOWN);
5710 exp10_optab = init_optab (UNKNOWN);
5711 exp2_optab = init_optab (UNKNOWN);
5712 expm1_optab = init_optab (UNKNOWN);
5713 ldexp_optab = init_optab (UNKNOWN);
5714 scalb_optab = init_optab (UNKNOWN);
5715 logb_optab = init_optab (UNKNOWN);
5716 ilogb_optab = init_optab (UNKNOWN);
5717 log_optab = init_optab (UNKNOWN);
5718 log10_optab = init_optab (UNKNOWN);
5719 log2_optab = init_optab (UNKNOWN);
5720 log1p_optab = init_optab (UNKNOWN);
5721 tan_optab = init_optab (UNKNOWN);
5722 atan_optab = init_optab (UNKNOWN);
5723 copysign_optab = init_optab (UNKNOWN);
5724 signbit_optab = init_optab (UNKNOWN);
5726 isinf_optab = init_optab (UNKNOWN);
5728 strlen_optab = init_optab (UNKNOWN);
5729 cbranch_optab = init_optab (UNKNOWN);
5730 cmov_optab = init_optab (UNKNOWN);
5731 cstore_optab = init_optab (UNKNOWN);
5732 push_optab = init_optab (UNKNOWN);
5734 reduc_smax_optab = init_optab (UNKNOWN);
5735 reduc_umax_optab = init_optab (UNKNOWN);
5736 reduc_smin_optab = init_optab (UNKNOWN);
5737 reduc_umin_optab = init_optab (UNKNOWN);
5738 reduc_splus_optab = init_optab (UNKNOWN);
5739 reduc_uplus_optab = init_optab (UNKNOWN);
5741 ssum_widen_optab = init_optab (UNKNOWN);
5742 usum_widen_optab = init_optab (UNKNOWN);
5743 sdot_prod_optab = init_optab (UNKNOWN);
5744 udot_prod_optab = init_optab (UNKNOWN);
5746 vec_extract_optab = init_optab (UNKNOWN);
5747 vec_extract_even_optab = init_optab (UNKNOWN);
5748 vec_extract_odd_optab = init_optab (UNKNOWN);
5749 vec_interleave_high_optab = init_optab (UNKNOWN);
5750 vec_interleave_low_optab = init_optab (UNKNOWN);
5751 vec_set_optab = init_optab (UNKNOWN);
5752 vec_init_optab = init_optab (UNKNOWN);
5753 vec_shl_optab = init_optab (UNKNOWN);
5754 vec_shr_optab = init_optab (UNKNOWN);
5755 vec_realign_load_optab = init_optab (UNKNOWN);
5756 movmisalign_optab = init_optab (UNKNOWN);
5757 vec_widen_umult_hi_optab = init_optab (UNKNOWN);
5758 vec_widen_umult_lo_optab = init_optab (UNKNOWN);
5759 vec_widen_smult_hi_optab = init_optab (UNKNOWN);
5760 vec_widen_smult_lo_optab = init_optab (UNKNOWN);
5761 vec_unpacks_hi_optab = init_optab (UNKNOWN);
5762 vec_unpacks_lo_optab = init_optab (UNKNOWN);
5763 vec_unpacku_hi_optab = init_optab (UNKNOWN);
5764 vec_unpacku_lo_optab = init_optab (UNKNOWN);
5765 vec_unpacks_float_hi_optab = init_optab (UNKNOWN);
5766 vec_unpacks_float_lo_optab = init_optab (UNKNOWN);
5767 vec_unpacku_float_hi_optab = init_optab (UNKNOWN);
5768 vec_unpacku_float_lo_optab = init_optab (UNKNOWN);
5769 vec_pack_trunc_optab = init_optab (UNKNOWN);
5770 vec_pack_usat_optab = init_optab (UNKNOWN);
5771 vec_pack_ssat_optab = init_optab (UNKNOWN);
5772 vec_pack_ufix_trunc_optab = init_optab (UNKNOWN);
5773 vec_pack_sfix_trunc_optab = init_optab (UNKNOWN);
5775 powi_optab = init_optab (UNKNOWN);
5778 sext_optab = init_convert_optab (SIGN_EXTEND);
5779 zext_optab = init_convert_optab (ZERO_EXTEND);
5780 trunc_optab = init_convert_optab (TRUNCATE);
5781 sfix_optab = init_convert_optab (FIX);
5782 ufix_optab = init_convert_optab (UNSIGNED_FIX);
5783 sfixtrunc_optab = init_convert_optab (UNKNOWN);
5784 ufixtrunc_optab = init_convert_optab (UNKNOWN);
5785 sfloat_optab = init_convert_optab (FLOAT);
5786 ufloat_optab = init_convert_optab (UNSIGNED_FLOAT);
5787 lrint_optab = init_convert_optab (UNKNOWN);
5788 lround_optab = init_convert_optab (UNKNOWN);
5789 lfloor_optab = init_convert_optab (UNKNOWN);
5790 lceil_optab = init_convert_optab (UNKNOWN);
5792 for (i = 0; i < NUM_MACHINE_MODES; i++)
5794 movmem_optab[i] = CODE_FOR_nothing;
5795 cmpstr_optab[i] = CODE_FOR_nothing;
5796 cmpstrn_optab[i] = CODE_FOR_nothing;
5797 cmpmem_optab[i] = CODE_FOR_nothing;
5798 setmem_optab[i] = CODE_FOR_nothing;
5800 sync_add_optab[i] = CODE_FOR_nothing;
5801 sync_sub_optab[i] = CODE_FOR_nothing;
5802 sync_ior_optab[i] = CODE_FOR_nothing;
5803 sync_and_optab[i] = CODE_FOR_nothing;
5804 sync_xor_optab[i] = CODE_FOR_nothing;
5805 sync_nand_optab[i] = CODE_FOR_nothing;
5806 sync_old_add_optab[i] = CODE_FOR_nothing;
5807 sync_old_sub_optab[i] = CODE_FOR_nothing;
5808 sync_old_ior_optab[i] = CODE_FOR_nothing;
5809 sync_old_and_optab[i] = CODE_FOR_nothing;
5810 sync_old_xor_optab[i] = CODE_FOR_nothing;
5811 sync_old_nand_optab[i] = CODE_FOR_nothing;
5812 sync_new_add_optab[i] = CODE_FOR_nothing;
5813 sync_new_sub_optab[i] = CODE_FOR_nothing;
5814 sync_new_ior_optab[i] = CODE_FOR_nothing;
5815 sync_new_and_optab[i] = CODE_FOR_nothing;
5816 sync_new_xor_optab[i] = CODE_FOR_nothing;
5817 sync_new_nand_optab[i] = CODE_FOR_nothing;
5818 sync_compare_and_swap[i] = CODE_FOR_nothing;
5819 sync_compare_and_swap_cc[i] = CODE_FOR_nothing;
5820 sync_lock_test_and_set[i] = CODE_FOR_nothing;
5821 sync_lock_release[i] = CODE_FOR_nothing;
5823 reload_in_optab[i] = reload_out_optab[i] = CODE_FOR_nothing;
5826 /* Fill in the optabs with the insns we support. */
5829 /* The ffs function operates on `int'. Fall back on it if we do not
5830 have a libgcc2 function for that width. */
5831 int_mode = mode_for_size (INT_TYPE_SIZE, MODE_INT, 0);
5832 optab_handler (ffs_optab, int_mode)->libfunc = init_one_libfunc ("ffs");
5834 /* Initialize the optabs with the names of the library functions. */
5835 init_integral_libfuncs (add_optab, "add", '3');
5836 init_floating_libfuncs (add_optab, "add", '3');
5837 init_integral_libfuncs (addv_optab, "addv", '3');
5838 init_floating_libfuncs (addv_optab, "add", '3');
5839 init_integral_libfuncs (sub_optab, "sub", '3');
5840 init_floating_libfuncs (sub_optab, "sub", '3');
5841 init_integral_libfuncs (subv_optab, "subv", '3');
5842 init_floating_libfuncs (subv_optab, "sub", '3');
5843 init_integral_libfuncs (smul_optab, "mul", '3');
5844 init_floating_libfuncs (smul_optab, "mul", '3');
5845 init_integral_libfuncs (smulv_optab, "mulv", '3');
5846 init_floating_libfuncs (smulv_optab, "mul", '3');
5847 init_integral_libfuncs (sdiv_optab, "div", '3');
5848 init_floating_libfuncs (sdiv_optab, "div", '3');
5849 init_integral_libfuncs (sdivv_optab, "divv", '3');
5850 init_integral_libfuncs (udiv_optab, "udiv", '3');
5851 init_integral_libfuncs (sdivmod_optab, "divmod", '4');
5852 init_integral_libfuncs (udivmod_optab, "udivmod", '4');
5853 init_integral_libfuncs (smod_optab, "mod", '3');
5854 init_integral_libfuncs (umod_optab, "umod", '3');
5855 init_floating_libfuncs (ftrunc_optab, "ftrunc", '2');
5856 init_integral_libfuncs (and_optab, "and", '3');
5857 init_integral_libfuncs (ior_optab, "ior", '3');
5858 init_integral_libfuncs (xor_optab, "xor", '3');
5859 init_integral_libfuncs (ashl_optab, "ashl", '3');
5860 init_integral_libfuncs (ashr_optab, "ashr", '3');
5861 init_integral_libfuncs (lshr_optab, "lshr", '3');
5862 init_integral_libfuncs (smin_optab, "min", '3');
5863 init_floating_libfuncs (smin_optab, "min", '3');
5864 init_integral_libfuncs (smax_optab, "max", '3');
5865 init_floating_libfuncs (smax_optab, "max", '3');
5866 init_integral_libfuncs (umin_optab, "umin", '3');
5867 init_integral_libfuncs (umax_optab, "umax", '3');
5868 init_integral_libfuncs (neg_optab, "neg", '2');
5869 init_floating_libfuncs (neg_optab, "neg", '2');
5870 init_integral_libfuncs (negv_optab, "negv", '2');
5871 init_floating_libfuncs (negv_optab, "neg", '2');
5872 init_integral_libfuncs (one_cmpl_optab, "one_cmpl", '2');
5873 init_integral_libfuncs (ffs_optab, "ffs", '2');
5874 init_integral_libfuncs (clz_optab, "clz", '2');
5875 init_integral_libfuncs (ctz_optab, "ctz", '2');
5876 init_integral_libfuncs (popcount_optab, "popcount", '2');
5877 init_integral_libfuncs (parity_optab, "parity", '2');
5879 /* Comparison libcalls for integers MUST come in pairs,
5881 init_integral_libfuncs (cmp_optab, "cmp", '2');
5882 init_integral_libfuncs (ucmp_optab, "ucmp", '2');
5883 init_floating_libfuncs (cmp_optab, "cmp", '2');
5885 /* EQ etc are floating point only. */
5886 init_floating_libfuncs (eq_optab, "eq", '2');
5887 init_floating_libfuncs (ne_optab, "ne", '2');
5888 init_floating_libfuncs (gt_optab, "gt", '2');
5889 init_floating_libfuncs (ge_optab, "ge", '2');
5890 init_floating_libfuncs (lt_optab, "lt", '2');
5891 init_floating_libfuncs (le_optab, "le", '2');
5892 init_floating_libfuncs (unord_optab, "unord", '2');
5894 init_floating_libfuncs (powi_optab, "powi", '2');
5897 init_interclass_conv_libfuncs (sfloat_optab, "float",
5898 MODE_INT, MODE_FLOAT);
5899 init_interclass_conv_libfuncs (sfloat_optab, "float",
5900 MODE_INT, MODE_DECIMAL_FLOAT);
5901 init_interclass_conv_libfuncs (ufloat_optab, "floatun",
5902 MODE_INT, MODE_FLOAT);
5903 init_interclass_conv_libfuncs (ufloat_optab, "floatun",
5904 MODE_INT, MODE_DECIMAL_FLOAT);
5905 init_interclass_conv_libfuncs (sfix_optab, "fix",
5906 MODE_FLOAT, MODE_INT);
5907 init_interclass_conv_libfuncs (sfix_optab, "fix",
5908 MODE_DECIMAL_FLOAT, MODE_INT);
5909 init_interclass_conv_libfuncs (ufix_optab, "fixuns",
5910 MODE_FLOAT, MODE_INT);
5911 init_interclass_conv_libfuncs (ufix_optab, "fixuns",
5912 MODE_DECIMAL_FLOAT, MODE_INT);
5913 init_interclass_conv_libfuncs (ufloat_optab, "floatuns",
5914 MODE_INT, MODE_DECIMAL_FLOAT);
5915 init_interclass_conv_libfuncs (lrint_optab, "lrint",
5916 MODE_INT, MODE_FLOAT);
5917 init_interclass_conv_libfuncs (lround_optab, "lround",
5918 MODE_INT, MODE_FLOAT);
5919 init_interclass_conv_libfuncs (lfloor_optab, "lfloor",
5920 MODE_INT, MODE_FLOAT);
5921 init_interclass_conv_libfuncs (lceil_optab, "lceil",
5922 MODE_INT, MODE_FLOAT);
5924 /* sext_optab is also used for FLOAT_EXTEND. */
5925 init_intraclass_conv_libfuncs (sext_optab, "extend", MODE_FLOAT, true);
5926 init_intraclass_conv_libfuncs (sext_optab, "extend", MODE_DECIMAL_FLOAT, true);
5927 init_interclass_conv_libfuncs (sext_optab, "extend", MODE_FLOAT, MODE_DECIMAL_FLOAT);
5928 init_interclass_conv_libfuncs (sext_optab, "extend", MODE_DECIMAL_FLOAT, MODE_FLOAT);
5929 init_intraclass_conv_libfuncs (trunc_optab, "trunc", MODE_FLOAT, false);
5930 init_intraclass_conv_libfuncs (trunc_optab, "trunc", MODE_DECIMAL_FLOAT, false);
5931 init_interclass_conv_libfuncs (trunc_optab, "trunc", MODE_FLOAT, MODE_DECIMAL_FLOAT);
5932 init_interclass_conv_libfuncs (trunc_optab, "trunc", MODE_DECIMAL_FLOAT, MODE_FLOAT);
5934 /* Explicitly initialize the bswap libfuncs since we need them to be
5935 valid for things other than word_mode. */
5936 set_optab_libfunc (bswap_optab, SImode, "__bswapsi2");
5937 set_optab_libfunc (bswap_optab, DImode, "__bswapdi2");
5939 /* Use cabs for double complex abs, since systems generally have cabs.
5940 Don't define any libcall for float complex, so that cabs will be used. */
5941 if (complex_double_type_node)
5942 optab_handler (abs_optab, TYPE_MODE (complex_double_type_node))->libfunc
5943 = init_one_libfunc ("cabs");
5945 abort_libfunc = init_one_libfunc ("abort");
5946 memcpy_libfunc = init_one_libfunc ("memcpy");
5947 memmove_libfunc = init_one_libfunc ("memmove");
5948 memcmp_libfunc = init_one_libfunc ("memcmp");
5949 memset_libfunc = init_one_libfunc ("memset");
5950 setbits_libfunc = init_one_libfunc ("__setbits");
5952 #ifndef DONT_USE_BUILTIN_SETJMP
5953 setjmp_libfunc = init_one_libfunc ("__builtin_setjmp");
5954 longjmp_libfunc = init_one_libfunc ("__builtin_longjmp");
5956 setjmp_libfunc = init_one_libfunc ("setjmp");
5957 longjmp_libfunc = init_one_libfunc ("longjmp");
5959 unwind_sjlj_register_libfunc = init_one_libfunc ("_Unwind_SjLj_Register");
5960 unwind_sjlj_unregister_libfunc
5961 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5963 /* For function entry/exit instrumentation. */
5964 profile_function_entry_libfunc
5965 = init_one_libfunc ("__cyg_profile_func_enter");
5966 profile_function_exit_libfunc
5967 = init_one_libfunc ("__cyg_profile_func_exit");
5969 gcov_flush_libfunc = init_one_libfunc ("__gcov_flush");
5971 if (HAVE_conditional_trap)
5972 trap_rtx = gen_rtx_fmt_ee (EQ, VOIDmode, NULL_RTX, NULL_RTX);
5974 /* Allow the target to add more libcalls or rename some, etc. */
5975 targetm.init_libfuncs ();
5980 /* Print information about the current contents of the optabs on
5984 debug_optab_libfuncs (void)
5990 /* Dump the arithmetic optabs. */
5991 for (i = 0; i != (int) OTI_MAX; i++)
5992 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5995 struct optab_handlers *h;
5998 h = optab_handler (o, j);
6001 gcc_assert (GET_CODE (h->libfunc) == SYMBOL_REF);
6002 fprintf (stderr, "%s\t%s:\t%s\n",
6003 GET_RTX_NAME (o->code),
6005 XSTR (h->libfunc, 0));
6009 /* Dump the conversion optabs. */
6010 for (i = 0; i < (int) COI_MAX; ++i)
6011 for (j = 0; j < NUM_MACHINE_MODES; ++j)
6012 for (k = 0; k < NUM_MACHINE_MODES; ++k)
6015 struct optab_handlers *h;
6017 o = &convert_optab_table[i];
6018 h = convert_optab_handler(o, j, k);
6021 gcc_assert (GET_CODE (h->libfunc) == SYMBOL_REF);
6022 fprintf (stderr, "%s\t%s\t%s:\t%s\n",
6023 GET_RTX_NAME (o->code),
6026 XSTR (h->libfunc, 0));
6034 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
6035 CODE. Return 0 on failure. */
6038 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED, rtx op1,
6039 rtx op2 ATTRIBUTE_UNUSED, rtx tcode ATTRIBUTE_UNUSED)
6041 enum machine_mode mode = GET_MODE (op1);
6042 enum insn_code icode;
6045 if (!HAVE_conditional_trap)
6048 if (mode == VOIDmode)
6051 icode = optab_handler (cmp_optab, mode)->insn_code;
6052 if (icode == CODE_FOR_nothing)
6056 op1 = prepare_operand (icode, op1, 0, mode, mode, 0);
6057 op2 = prepare_operand (icode, op2, 1, mode, mode, 0);
6063 emit_insn (GEN_FCN (icode) (op1, op2));
6065 PUT_CODE (trap_rtx, code);
6066 gcc_assert (HAVE_conditional_trap);
6067 insn = gen_conditional_trap (trap_rtx, tcode);
6071 insn = get_insns ();
6078 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
6079 or unsigned operation code. */
6081 static enum rtx_code
6082 get_rtx_code (enum tree_code tcode, bool unsignedp)
6094 code = unsignedp ? LTU : LT;
6097 code = unsignedp ? LEU : LE;
6100 code = unsignedp ? GTU : GT;
6103 code = unsignedp ? GEU : GE;
6106 case UNORDERED_EXPR:
6137 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
6138 unsigned operators. Do not generate compare instruction. */
6141 vector_compare_rtx (tree cond, bool unsignedp, enum insn_code icode)
6143 enum rtx_code rcode;
6145 rtx rtx_op0, rtx_op1;
6147 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
6148 ensures that condition is a relational operation. */
6149 gcc_assert (COMPARISON_CLASS_P (cond));
6151 rcode = get_rtx_code (TREE_CODE (cond), unsignedp);
6152 t_op0 = TREE_OPERAND (cond, 0);
6153 t_op1 = TREE_OPERAND (cond, 1);
6155 /* Expand operands. */
6156 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)),
6158 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)),
6161 if (!insn_data[icode].operand[4].predicate (rtx_op0, GET_MODE (rtx_op0))
6162 && GET_MODE (rtx_op0) != VOIDmode)
6163 rtx_op0 = force_reg (GET_MODE (rtx_op0), rtx_op0);
6165 if (!insn_data[icode].operand[5].predicate (rtx_op1, GET_MODE (rtx_op1))
6166 && GET_MODE (rtx_op1) != VOIDmode)
6167 rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1);
6169 return gen_rtx_fmt_ee (rcode, VOIDmode, rtx_op0, rtx_op1);
6172 /* Return insn code for VEC_COND_EXPR EXPR. */
6174 static inline enum insn_code
6175 get_vcond_icode (tree expr, enum machine_mode mode)
6177 enum insn_code icode = CODE_FOR_nothing;
6179 if (TYPE_UNSIGNED (TREE_TYPE (expr)))
6180 icode = vcondu_gen_code[mode];
6182 icode = vcond_gen_code[mode];
6186 /* Return TRUE iff, appropriate vector insns are available
6187 for vector cond expr expr in VMODE mode. */
6190 expand_vec_cond_expr_p (tree expr, enum machine_mode vmode)
6192 if (get_vcond_icode (expr, vmode) == CODE_FOR_nothing)
6197 /* Generate insns for VEC_COND_EXPR. */
6200 expand_vec_cond_expr (tree vec_cond_expr, rtx target)
6202 enum insn_code icode;
6203 rtx comparison, rtx_op1, rtx_op2, cc_op0, cc_op1;
6204 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_cond_expr));
6205 bool unsignedp = TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr));
6207 icode = get_vcond_icode (vec_cond_expr, mode);
6208 if (icode == CODE_FOR_nothing)
6211 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6212 target = gen_reg_rtx (mode);
6214 /* Get comparison rtx. First expand both cond expr operands. */
6215 comparison = vector_compare_rtx (TREE_OPERAND (vec_cond_expr, 0),
6217 cc_op0 = XEXP (comparison, 0);
6218 cc_op1 = XEXP (comparison, 1);
6219 /* Expand both operands and force them in reg, if required. */
6220 rtx_op1 = expand_normal (TREE_OPERAND (vec_cond_expr, 1));
6221 if (!insn_data[icode].operand[1].predicate (rtx_op1, mode)
6222 && mode != VOIDmode)
6223 rtx_op1 = force_reg (mode, rtx_op1);
6225 rtx_op2 = expand_normal (TREE_OPERAND (vec_cond_expr, 2));
6226 if (!insn_data[icode].operand[2].predicate (rtx_op2, mode)
6227 && mode != VOIDmode)
6228 rtx_op2 = force_reg (mode, rtx_op2);
6230 /* Emit instruction! */
6231 emit_insn (GEN_FCN (icode) (target, rtx_op1, rtx_op2,
6232 comparison, cc_op0, cc_op1));
6238 /* This is an internal subroutine of the other compare_and_swap expanders.
6239 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
6240 operation. TARGET is an optional place to store the value result of
6241 the operation. ICODE is the particular instruction to expand. Return
6242 the result of the operation. */
6245 expand_val_compare_and_swap_1 (rtx mem, rtx old_val, rtx new_val,
6246 rtx target, enum insn_code icode)
6248 enum machine_mode mode = GET_MODE (mem);
6251 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6252 target = gen_reg_rtx (mode);
6254 if (GET_MODE (old_val) != VOIDmode && GET_MODE (old_val) != mode)
6255 old_val = convert_modes (mode, GET_MODE (old_val), old_val, 1);
6256 if (!insn_data[icode].operand[2].predicate (old_val, mode))
6257 old_val = force_reg (mode, old_val);
6259 if (GET_MODE (new_val) != VOIDmode && GET_MODE (new_val) != mode)
6260 new_val = convert_modes (mode, GET_MODE (new_val), new_val, 1);
6261 if (!insn_data[icode].operand[3].predicate (new_val, mode))
6262 new_val = force_reg (mode, new_val);
6264 insn = GEN_FCN (icode) (target, mem, old_val, new_val);
6265 if (insn == NULL_RTX)
6272 /* Expand a compare-and-swap operation and return its value. */
6275 expand_val_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
6277 enum machine_mode mode = GET_MODE (mem);
6278 enum insn_code icode = sync_compare_and_swap[mode];
6280 if (icode == CODE_FOR_nothing)
6283 return expand_val_compare_and_swap_1 (mem, old_val, new_val, target, icode);
6286 /* Expand a compare-and-swap operation and store true into the result if
6287 the operation was successful and false otherwise. Return the result.
6288 Unlike other routines, TARGET is not optional. */
6291 expand_bool_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
6293 enum machine_mode mode = GET_MODE (mem);
6294 enum insn_code icode;
6295 rtx subtarget, label0, label1;
6297 /* If the target supports a compare-and-swap pattern that simultaneously
6298 sets some flag for success, then use it. Otherwise use the regular
6299 compare-and-swap and follow that immediately with a compare insn. */
6300 icode = sync_compare_and_swap_cc[mode];
6304 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
6306 if (subtarget != NULL_RTX)
6310 case CODE_FOR_nothing:
6311 icode = sync_compare_and_swap[mode];
6312 if (icode == CODE_FOR_nothing)
6315 /* Ensure that if old_val == mem, that we're not comparing
6316 against an old value. */
6317 if (MEM_P (old_val))
6318 old_val = force_reg (mode, old_val);
6320 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
6322 if (subtarget == NULL_RTX)
6325 emit_cmp_insn (subtarget, old_val, EQ, const0_rtx, mode, true);
6328 /* If the target has a sane STORE_FLAG_VALUE, then go ahead and use a
6329 setcc instruction from the beginning. We don't work too hard here,
6330 but it's nice to not be stupid about initial code gen either. */
6331 if (STORE_FLAG_VALUE == 1)
6333 icode = setcc_gen_code[EQ];
6334 if (icode != CODE_FOR_nothing)
6336 enum machine_mode cmode = insn_data[icode].operand[0].mode;
6340 if (!insn_data[icode].operand[0].predicate (target, cmode))
6341 subtarget = gen_reg_rtx (cmode);
6343 insn = GEN_FCN (icode) (subtarget);
6347 if (GET_MODE (target) != GET_MODE (subtarget))
6349 convert_move (target, subtarget, 1);
6357 /* Without an appropriate setcc instruction, use a set of branches to
6358 get 1 and 0 stored into target. Presumably if the target has a
6359 STORE_FLAG_VALUE that isn't 1, then this will get cleaned up by ifcvt. */
6361 label0 = gen_label_rtx ();
6362 label1 = gen_label_rtx ();
6364 emit_jump_insn (bcc_gen_fctn[EQ] (label0));
6365 emit_move_insn (target, const0_rtx);
6366 emit_jump_insn (gen_jump (label1));
6368 emit_label (label0);
6369 emit_move_insn (target, const1_rtx);
6370 emit_label (label1);
6375 /* This is a helper function for the other atomic operations. This function
6376 emits a loop that contains SEQ that iterates until a compare-and-swap
6377 operation at the end succeeds. MEM is the memory to be modified. SEQ is
6378 a set of instructions that takes a value from OLD_REG as an input and
6379 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
6380 set to the current contents of MEM. After SEQ, a compare-and-swap will
6381 attempt to update MEM with NEW_REG. The function returns true when the
6382 loop was generated successfully. */
6385 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
6387 enum machine_mode mode = GET_MODE (mem);
6388 enum insn_code icode;
6389 rtx label, cmp_reg, subtarget;
6391 /* The loop we want to generate looks like
6397 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
6398 if (cmp_reg != old_reg)
6401 Note that we only do the plain load from memory once. Subsequent
6402 iterations use the value loaded by the compare-and-swap pattern. */
6404 label = gen_label_rtx ();
6405 cmp_reg = gen_reg_rtx (mode);
6407 emit_move_insn (cmp_reg, mem);
6409 emit_move_insn (old_reg, cmp_reg);
6413 /* If the target supports a compare-and-swap pattern that simultaneously
6414 sets some flag for success, then use it. Otherwise use the regular
6415 compare-and-swap and follow that immediately with a compare insn. */
6416 icode = sync_compare_and_swap_cc[mode];
6420 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
6422 if (subtarget != NULL_RTX)
6424 gcc_assert (subtarget == cmp_reg);
6429 case CODE_FOR_nothing:
6430 icode = sync_compare_and_swap[mode];
6431 if (icode == CODE_FOR_nothing)
6434 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
6436 if (subtarget == NULL_RTX)
6438 if (subtarget != cmp_reg)
6439 emit_move_insn (cmp_reg, subtarget);
6441 emit_cmp_insn (cmp_reg, old_reg, EQ, const0_rtx, mode, true);
6444 /* ??? Mark this jump predicted not taken? */
6445 emit_jump_insn (bcc_gen_fctn[NE] (label));
6450 /* This function generates the atomic operation MEM CODE= VAL. In this
6451 case, we do not care about any resulting value. Returns NULL if we
6452 cannot generate the operation. */
6455 expand_sync_operation (rtx mem, rtx val, enum rtx_code code)
6457 enum machine_mode mode = GET_MODE (mem);
6458 enum insn_code icode;
6461 /* Look to see if the target supports the operation directly. */
6465 icode = sync_add_optab[mode];
6468 icode = sync_ior_optab[mode];
6471 icode = sync_xor_optab[mode];
6474 icode = sync_and_optab[mode];
6477 icode = sync_nand_optab[mode];
6481 icode = sync_sub_optab[mode];
6482 if (icode == CODE_FOR_nothing)
6484 icode = sync_add_optab[mode];
6485 if (icode != CODE_FOR_nothing)
6487 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
6497 /* Generate the direct operation, if present. */
6498 if (icode != CODE_FOR_nothing)
6500 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6501 val = convert_modes (mode, GET_MODE (val), val, 1);
6502 if (!insn_data[icode].operand[1].predicate (val, mode))
6503 val = force_reg (mode, val);
6505 insn = GEN_FCN (icode) (mem, val);
6513 /* Failing that, generate a compare-and-swap loop in which we perform the
6514 operation with normal arithmetic instructions. */
6515 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6517 rtx t0 = gen_reg_rtx (mode), t1;
6524 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
6527 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
6528 true, OPTAB_LIB_WIDEN);
6530 insn = get_insns ();
6533 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
6540 /* This function generates the atomic operation MEM CODE= VAL. In this
6541 case, we do care about the resulting value: if AFTER is true then
6542 return the value MEM holds after the operation, if AFTER is false
6543 then return the value MEM holds before the operation. TARGET is an
6544 optional place for the result value to be stored. */
6547 expand_sync_fetch_operation (rtx mem, rtx val, enum rtx_code code,
6548 bool after, rtx target)
6550 enum machine_mode mode = GET_MODE (mem);
6551 enum insn_code old_code, new_code, icode;
6555 /* Look to see if the target supports the operation directly. */
6559 old_code = sync_old_add_optab[mode];
6560 new_code = sync_new_add_optab[mode];
6563 old_code = sync_old_ior_optab[mode];
6564 new_code = sync_new_ior_optab[mode];
6567 old_code = sync_old_xor_optab[mode];
6568 new_code = sync_new_xor_optab[mode];
6571 old_code = sync_old_and_optab[mode];
6572 new_code = sync_new_and_optab[mode];
6575 old_code = sync_old_nand_optab[mode];
6576 new_code = sync_new_nand_optab[mode];
6580 old_code = sync_old_sub_optab[mode];
6581 new_code = sync_new_sub_optab[mode];
6582 if (old_code == CODE_FOR_nothing && new_code == CODE_FOR_nothing)
6584 old_code = sync_old_add_optab[mode];
6585 new_code = sync_new_add_optab[mode];
6586 if (old_code != CODE_FOR_nothing || new_code != CODE_FOR_nothing)
6588 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
6598 /* If the target does supports the proper new/old operation, great. But
6599 if we only support the opposite old/new operation, check to see if we
6600 can compensate. In the case in which the old value is supported, then
6601 we can always perform the operation again with normal arithmetic. In
6602 the case in which the new value is supported, then we can only handle
6603 this in the case the operation is reversible. */
6608 if (icode == CODE_FOR_nothing)
6611 if (icode != CODE_FOR_nothing)
6618 if (icode == CODE_FOR_nothing
6619 && (code == PLUS || code == MINUS || code == XOR))
6622 if (icode != CODE_FOR_nothing)
6627 /* If we found something supported, great. */
6628 if (icode != CODE_FOR_nothing)
6630 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6631 target = gen_reg_rtx (mode);
6633 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6634 val = convert_modes (mode, GET_MODE (val), val, 1);
6635 if (!insn_data[icode].operand[2].predicate (val, mode))
6636 val = force_reg (mode, val);
6638 insn = GEN_FCN (icode) (target, mem, val);
6643 /* If we need to compensate for using an operation with the
6644 wrong return value, do so now. */
6651 else if (code == MINUS)
6656 target = expand_simple_unop (mode, NOT, target, NULL_RTX, true);
6657 target = expand_simple_binop (mode, code, target, val, NULL_RTX,
6658 true, OPTAB_LIB_WIDEN);
6665 /* Failing that, generate a compare-and-swap loop in which we perform the
6666 operation with normal arithmetic instructions. */
6667 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6669 rtx t0 = gen_reg_rtx (mode), t1;
6671 if (!target || !register_operand (target, mode))
6672 target = gen_reg_rtx (mode);
6677 emit_move_insn (target, t0);
6681 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
6684 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
6685 true, OPTAB_LIB_WIDEN);
6687 emit_move_insn (target, t1);
6689 insn = get_insns ();
6692 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
6699 /* This function expands a test-and-set operation. Ideally we atomically
6700 store VAL in MEM and return the previous value in MEM. Some targets
6701 may not support this operation and only support VAL with the constant 1;
6702 in this case while the return value will be 0/1, but the exact value
6703 stored in MEM is target defined. TARGET is an option place to stick
6704 the return value. */
6707 expand_sync_lock_test_and_set (rtx mem, rtx val, rtx target)
6709 enum machine_mode mode = GET_MODE (mem);
6710 enum insn_code icode;
6713 /* If the target supports the test-and-set directly, great. */
6714 icode = sync_lock_test_and_set[mode];
6715 if (icode != CODE_FOR_nothing)
6717 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6718 target = gen_reg_rtx (mode);
6720 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6721 val = convert_modes (mode, GET_MODE (val), val, 1);
6722 if (!insn_data[icode].operand[2].predicate (val, mode))
6723 val = force_reg (mode, val);
6725 insn = GEN_FCN (icode) (target, mem, val);
6733 /* Otherwise, use a compare-and-swap loop for the exchange. */
6734 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6736 if (!target || !register_operand (target, mode))
6737 target = gen_reg_rtx (mode);
6738 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6739 val = convert_modes (mode, GET_MODE (val), val, 1);
6740 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
6747 #include "gt-optabs.h"