1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
44 #include "basic-block.h"
47 /* Each optab contains info on how this target machine
48 can perform a particular operation
49 for all sizes and kinds of operands.
51 The operation to be performed is often specified
52 by passing one of these optabs as an argument.
54 See expr.h for documentation of these optabs. */
56 struct optab_d optab_table[OTI_MAX];
58 rtx libfunc_table[LTI_MAX];
60 /* Tables of patterns for converting one mode to another. */
61 struct convert_optab_d convert_optab_table[COI_MAX];
63 /* Contains the optab used for each rtx code. */
64 optab code_to_optab[NUM_RTX_CODE + 1];
66 #ifdef HAVE_conditional_move
67 /* Indexed by the machine mode, gives the insn code to make a conditional
68 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
69 setcc_gen_code to cut down on the number of named patterns. Consider a day
70 when a lot more rtx codes are conditional (eg: for the ARM). */
72 enum insn_code movcc_gen_code[NUM_MACHINE_MODES];
75 /* Indexed by the machine mode, gives the insn code for vector conditional
78 enum insn_code vcond_gen_code[NUM_MACHINE_MODES];
79 enum insn_code vcondu_gen_code[NUM_MACHINE_MODES];
81 static void prepare_float_lib_cmp (rtx, rtx, enum rtx_code, rtx *,
83 static rtx expand_unop_direct (enum machine_mode, optab, rtx, rtx, int);
85 /* Debug facility for use in GDB. */
86 void debug_optab_libfuncs (void);
88 /* Prefixes for the current version of decimal floating point (BID vs. DPD) */
89 #if ENABLE_DECIMAL_BID_FORMAT
90 #define DECIMAL_PREFIX "bid_"
92 #define DECIMAL_PREFIX "dpd_"
96 /* Info about libfunc. We use same hashtable for normal optabs and conversion
97 optab. In the first case mode2 is unused. */
98 struct GTY(()) libfunc_entry {
100 enum machine_mode mode1, mode2;
104 /* Hash table used to convert declarations into nodes. */
105 static GTY((param_is (struct libfunc_entry))) htab_t libfunc_hash;
107 /* Used for attribute_hash. */
110 hash_libfunc (const void *p)
112 const struct libfunc_entry *const e = (const struct libfunc_entry *) p;
114 return (((int) e->mode1 + (int) e->mode2 * NUM_MACHINE_MODES)
118 /* Used for optab_hash. */
121 eq_libfunc (const void *p, const void *q)
123 const struct libfunc_entry *const e1 = (const struct libfunc_entry *) p;
124 const struct libfunc_entry *const e2 = (const struct libfunc_entry *) q;
126 return (e1->optab == e2->optab
127 && e1->mode1 == e2->mode1
128 && e1->mode2 == e2->mode2);
131 /* Return libfunc corresponding operation defined by OPTAB converting
132 from MODE2 to MODE1. Trigger lazy initialization if needed, return NULL
133 if no libfunc is available. */
135 convert_optab_libfunc (convert_optab optab, enum machine_mode mode1,
136 enum machine_mode mode2)
138 struct libfunc_entry e;
139 struct libfunc_entry **slot;
141 e.optab = (size_t) (optab - &convert_optab_table[0]);
144 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, NO_INSERT);
147 if (optab->libcall_gen)
149 optab->libcall_gen (optab, optab->libcall_basename, mode1, mode2);
150 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, NO_INSERT);
152 return (*slot)->libfunc;
158 return (*slot)->libfunc;
161 /* Return libfunc corresponding operation defined by OPTAB in MODE.
162 Trigger lazy initialization if needed, return NULL if no libfunc is
165 optab_libfunc (optab optab, enum machine_mode mode)
167 struct libfunc_entry e;
168 struct libfunc_entry **slot;
170 e.optab = (size_t) (optab - &optab_table[0]);
173 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, NO_INSERT);
176 if (optab->libcall_gen)
178 optab->libcall_gen (optab, optab->libcall_basename,
179 optab->libcall_suffix, mode);
180 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash,
183 return (*slot)->libfunc;
189 return (*slot)->libfunc;
193 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
194 the result of operation CODE applied to OP0 (and OP1 if it is a binary
197 If the last insn does not set TARGET, don't do anything, but return 1.
199 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
200 don't add the REG_EQUAL note but return 0. Our caller can then try
201 again, ensuring that TARGET is not one of the operands. */
204 add_equal_note (rtx insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
206 rtx last_insn, insn, set;
209 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
211 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
212 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
213 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
214 && GET_RTX_CLASS (code) != RTX_COMPARE
215 && GET_RTX_CLASS (code) != RTX_UNARY)
218 if (GET_CODE (target) == ZERO_EXTRACT)
221 for (last_insn = insns;
222 NEXT_INSN (last_insn) != NULL_RTX;
223 last_insn = NEXT_INSN (last_insn))
226 set = single_set (last_insn);
230 if (! rtx_equal_p (SET_DEST (set), target)
231 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
232 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
233 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
236 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
237 besides the last insn. */
238 if (reg_overlap_mentioned_p (target, op0)
239 || (op1 && reg_overlap_mentioned_p (target, op1)))
241 insn = PREV_INSN (last_insn);
242 while (insn != NULL_RTX)
244 if (reg_set_p (target, insn))
247 insn = PREV_INSN (insn);
251 if (GET_RTX_CLASS (code) == RTX_UNARY)
252 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
254 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
256 set_unique_reg_note (last_insn, REG_EQUAL, note);
261 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
262 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
263 not actually do a sign-extend or zero-extend, but can leave the
264 higher-order bits of the result rtx undefined, for example, in the case
265 of logical operations, but not right shifts. */
268 widen_operand (rtx op, enum machine_mode mode, enum machine_mode oldmode,
269 int unsignedp, int no_extend)
273 /* If we don't have to extend and this is a constant, return it. */
274 if (no_extend && GET_MODE (op) == VOIDmode)
277 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
278 extend since it will be more efficient to do so unless the signedness of
279 a promoted object differs from our extension. */
281 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
282 && SUBREG_PROMOTED_UNSIGNED_P (op) == unsignedp))
283 return convert_modes (mode, oldmode, op, unsignedp);
285 /* If MODE is no wider than a single word, we return a paradoxical
287 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
288 return gen_rtx_SUBREG (mode, force_reg (GET_MODE (op), op), 0);
290 /* Otherwise, get an object of MODE, clobber it, and set the low-order
293 result = gen_reg_rtx (mode);
294 emit_clobber (result);
295 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
299 /* Return the optab used for computing the operation given by the tree code,
300 CODE and the tree EXP. This function is not always usable (for example, it
301 cannot give complete results for multiplication or division) but probably
302 ought to be relied on more widely throughout the expander. */
304 optab_for_tree_code (enum tree_code code, const_tree type,
305 enum optab_subtype subtype)
317 return one_cmpl_optab;
326 return TYPE_UNSIGNED (type) ? umod_optab : smod_optab;
334 if (TYPE_SATURATING(type))
335 return TYPE_UNSIGNED(type) ? usdiv_optab : ssdiv_optab;
336 return TYPE_UNSIGNED (type) ? udiv_optab : sdiv_optab;
339 if (VECTOR_MODE_P (TYPE_MODE (type)))
341 if (subtype == optab_vector)
342 return TYPE_SATURATING (type) ? NULL : vashl_optab;
344 gcc_assert (subtype == optab_scalar);
346 if (TYPE_SATURATING(type))
347 return TYPE_UNSIGNED(type) ? usashl_optab : ssashl_optab;
351 if (VECTOR_MODE_P (TYPE_MODE (type)))
353 if (subtype == optab_vector)
354 return TYPE_UNSIGNED (type) ? vlshr_optab : vashr_optab;
356 gcc_assert (subtype == optab_scalar);
358 return TYPE_UNSIGNED (type) ? lshr_optab : ashr_optab;
361 if (VECTOR_MODE_P (TYPE_MODE (type)))
363 if (subtype == optab_vector)
366 gcc_assert (subtype == optab_scalar);
371 if (VECTOR_MODE_P (TYPE_MODE (type)))
373 if (subtype == optab_vector)
376 gcc_assert (subtype == optab_scalar);
381 return TYPE_UNSIGNED (type) ? umax_optab : smax_optab;
384 return TYPE_UNSIGNED (type) ? umin_optab : smin_optab;
386 case REALIGN_LOAD_EXPR:
387 return vec_realign_load_optab;
390 return TYPE_UNSIGNED (type) ? usum_widen_optab : ssum_widen_optab;
393 return TYPE_UNSIGNED (type) ? udot_prod_optab : sdot_prod_optab;
395 case WIDEN_MULT_PLUS_EXPR:
396 return (TYPE_UNSIGNED (type)
397 ? (TYPE_SATURATING (type)
398 ? usmadd_widen_optab : umadd_widen_optab)
399 : (TYPE_SATURATING (type)
400 ? ssmadd_widen_optab : smadd_widen_optab));
402 case WIDEN_MULT_MINUS_EXPR:
403 return (TYPE_UNSIGNED (type)
404 ? (TYPE_SATURATING (type)
405 ? usmsub_widen_optab : umsub_widen_optab)
406 : (TYPE_SATURATING (type)
407 ? ssmsub_widen_optab : smsub_widen_optab));
410 return TYPE_UNSIGNED (type) ? reduc_umax_optab : reduc_smax_optab;
413 return TYPE_UNSIGNED (type) ? reduc_umin_optab : reduc_smin_optab;
415 case REDUC_PLUS_EXPR:
416 return TYPE_UNSIGNED (type) ? reduc_uplus_optab : reduc_splus_optab;
418 case VEC_LSHIFT_EXPR:
419 return vec_shl_optab;
421 case VEC_RSHIFT_EXPR:
422 return vec_shr_optab;
424 case VEC_WIDEN_MULT_HI_EXPR:
425 return TYPE_UNSIGNED (type) ?
426 vec_widen_umult_hi_optab : vec_widen_smult_hi_optab;
428 case VEC_WIDEN_MULT_LO_EXPR:
429 return TYPE_UNSIGNED (type) ?
430 vec_widen_umult_lo_optab : vec_widen_smult_lo_optab;
432 case VEC_UNPACK_HI_EXPR:
433 return TYPE_UNSIGNED (type) ?
434 vec_unpacku_hi_optab : vec_unpacks_hi_optab;
436 case VEC_UNPACK_LO_EXPR:
437 return TYPE_UNSIGNED (type) ?
438 vec_unpacku_lo_optab : vec_unpacks_lo_optab;
440 case VEC_UNPACK_FLOAT_HI_EXPR:
441 /* The signedness is determined from input operand. */
442 return TYPE_UNSIGNED (type) ?
443 vec_unpacku_float_hi_optab : vec_unpacks_float_hi_optab;
445 case VEC_UNPACK_FLOAT_LO_EXPR:
446 /* The signedness is determined from input operand. */
447 return TYPE_UNSIGNED (type) ?
448 vec_unpacku_float_lo_optab : vec_unpacks_float_lo_optab;
450 case VEC_PACK_TRUNC_EXPR:
451 return vec_pack_trunc_optab;
453 case VEC_PACK_SAT_EXPR:
454 return TYPE_UNSIGNED (type) ? vec_pack_usat_optab : vec_pack_ssat_optab;
456 case VEC_PACK_FIX_TRUNC_EXPR:
457 /* The signedness is determined from output operand. */
458 return TYPE_UNSIGNED (type) ?
459 vec_pack_ufix_trunc_optab : vec_pack_sfix_trunc_optab;
465 trapv = INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type);
468 case POINTER_PLUS_EXPR:
470 if (TYPE_SATURATING(type))
471 return TYPE_UNSIGNED(type) ? usadd_optab : ssadd_optab;
472 return trapv ? addv_optab : add_optab;
475 if (TYPE_SATURATING(type))
476 return TYPE_UNSIGNED(type) ? ussub_optab : sssub_optab;
477 return trapv ? subv_optab : sub_optab;
480 if (TYPE_SATURATING(type))
481 return TYPE_UNSIGNED(type) ? usmul_optab : ssmul_optab;
482 return trapv ? smulv_optab : smul_optab;
485 if (TYPE_SATURATING(type))
486 return TYPE_UNSIGNED(type) ? usneg_optab : ssneg_optab;
487 return trapv ? negv_optab : neg_optab;
490 return trapv ? absv_optab : abs_optab;
492 case VEC_EXTRACT_EVEN_EXPR:
493 return vec_extract_even_optab;
495 case VEC_EXTRACT_ODD_EXPR:
496 return vec_extract_odd_optab;
498 case VEC_INTERLEAVE_HIGH_EXPR:
499 return vec_interleave_high_optab;
501 case VEC_INTERLEAVE_LOW_EXPR:
502 return vec_interleave_low_optab;
510 /* Expand vector widening operations.
512 There are two different classes of operations handled here:
513 1) Operations whose result is wider than all the arguments to the operation.
514 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
515 In this case OP0 and optionally OP1 would be initialized,
516 but WIDE_OP wouldn't (not relevant for this case).
517 2) Operations whose result is of the same size as the last argument to the
518 operation, but wider than all the other arguments to the operation.
519 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
520 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
522 E.g, when called to expand the following operations, this is how
523 the arguments will be initialized:
525 widening-sum 2 oprnd0 - oprnd1
526 widening-dot-product 3 oprnd0 oprnd1 oprnd2
527 widening-mult 2 oprnd0 oprnd1 -
528 type-promotion (vec-unpack) 1 oprnd0 - - */
531 expand_widen_pattern_expr (sepops ops, rtx op0, rtx op1, rtx wide_op,
532 rtx target, int unsignedp)
534 tree oprnd0, oprnd1, oprnd2;
535 enum machine_mode wmode = VOIDmode, tmode0, tmode1 = VOIDmode;
536 optab widen_pattern_optab;
538 enum machine_mode xmode0, xmode1 = VOIDmode, wxmode = VOIDmode;
541 rtx xop0, xop1, wxop;
542 int nops = TREE_CODE_LENGTH (ops->code);
545 tmode0 = TYPE_MODE (TREE_TYPE (oprnd0));
546 widen_pattern_optab =
547 optab_for_tree_code (ops->code, TREE_TYPE (oprnd0), optab_default);
548 if (ops->code == WIDEN_MULT_PLUS_EXPR
549 || ops->code == WIDEN_MULT_MINUS_EXPR)
550 icode = (int) optab_handler (widen_pattern_optab,
551 TYPE_MODE (TREE_TYPE (ops->op2)));
553 icode = (int) optab_handler (widen_pattern_optab, tmode0);
554 gcc_assert (icode != CODE_FOR_nothing);
555 xmode0 = insn_data[icode].operand[1].mode;
560 tmode1 = TYPE_MODE (TREE_TYPE (oprnd1));
561 xmode1 = insn_data[icode].operand[2].mode;
564 /* The last operand is of a wider mode than the rest of the operands. */
572 gcc_assert (tmode1 == tmode0);
575 wmode = TYPE_MODE (TREE_TYPE (oprnd2));
576 wxmode = insn_data[icode].operand[3].mode;
580 wmode = wxmode = insn_data[icode].operand[0].mode;
583 || ! (*insn_data[icode].operand[0].predicate) (target, wmode))
584 temp = gen_reg_rtx (wmode);
592 /* In case the insn wants input operands in modes different from
593 those of the actual operands, convert the operands. It would
594 seem that we don't need to convert CONST_INTs, but we do, so
595 that they're properly zero-extended, sign-extended or truncated
598 if (GET_MODE (op0) != xmode0 && xmode0 != VOIDmode)
599 xop0 = convert_modes (xmode0,
600 GET_MODE (op0) != VOIDmode
606 if (GET_MODE (op1) != xmode1 && xmode1 != VOIDmode)
607 xop1 = convert_modes (xmode1,
608 GET_MODE (op1) != VOIDmode
614 if (GET_MODE (wide_op) != wxmode && wxmode != VOIDmode)
615 wxop = convert_modes (wxmode,
616 GET_MODE (wide_op) != VOIDmode
621 /* Now, if insn's predicates don't allow our operands, put them into
624 if (! (*insn_data[icode].operand[1].predicate) (xop0, xmode0)
625 && xmode0 != VOIDmode)
626 xop0 = copy_to_mode_reg (xmode0, xop0);
630 if (! (*insn_data[icode].operand[2].predicate) (xop1, xmode1)
631 && xmode1 != VOIDmode)
632 xop1 = copy_to_mode_reg (xmode1, xop1);
636 if (! (*insn_data[icode].operand[3].predicate) (wxop, wxmode)
637 && wxmode != VOIDmode)
638 wxop = copy_to_mode_reg (wxmode, wxop);
640 pat = GEN_FCN (icode) (temp, xop0, xop1, wxop);
643 pat = GEN_FCN (icode) (temp, xop0, xop1);
649 if (! (*insn_data[icode].operand[2].predicate) (wxop, wxmode)
650 && wxmode != VOIDmode)
651 wxop = copy_to_mode_reg (wxmode, wxop);
653 pat = GEN_FCN (icode) (temp, xop0, wxop);
656 pat = GEN_FCN (icode) (temp, xop0);
663 /* Generate code to perform an operation specified by TERNARY_OPTAB
664 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
666 UNSIGNEDP is for the case where we have to widen the operands
667 to perform the operation. It says to use zero-extension.
669 If TARGET is nonzero, the value
670 is generated there, if it is convenient to do so.
671 In all cases an rtx is returned for the locus of the value;
672 this may or may not be TARGET. */
675 expand_ternary_op (enum machine_mode mode, optab ternary_optab, rtx op0,
676 rtx op1, rtx op2, rtx target, int unsignedp)
678 int icode = (int) optab_handler (ternary_optab, mode);
679 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
680 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
681 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
684 rtx xop0 = op0, xop1 = op1, xop2 = op2;
686 gcc_assert (optab_handler (ternary_optab, mode) != CODE_FOR_nothing);
688 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
689 temp = gen_reg_rtx (mode);
693 /* In case the insn wants input operands in modes different from
694 those of the actual operands, convert the operands. It would
695 seem that we don't need to convert CONST_INTs, but we do, so
696 that they're properly zero-extended, sign-extended or truncated
699 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
700 xop0 = convert_modes (mode0,
701 GET_MODE (op0) != VOIDmode
706 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
707 xop1 = convert_modes (mode1,
708 GET_MODE (op1) != VOIDmode
713 if (GET_MODE (op2) != mode2 && mode2 != VOIDmode)
714 xop2 = convert_modes (mode2,
715 GET_MODE (op2) != VOIDmode
720 /* Now, if insn's predicates don't allow our operands, put them into
723 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
724 && mode0 != VOIDmode)
725 xop0 = copy_to_mode_reg (mode0, xop0);
727 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
728 && mode1 != VOIDmode)
729 xop1 = copy_to_mode_reg (mode1, xop1);
731 if (!insn_data[icode].operand[3].predicate (xop2, mode2)
732 && mode2 != VOIDmode)
733 xop2 = copy_to_mode_reg (mode2, xop2);
735 pat = GEN_FCN (icode) (temp, xop0, xop1, xop2);
742 /* Like expand_binop, but return a constant rtx if the result can be
743 calculated at compile time. The arguments and return value are
744 otherwise the same as for expand_binop. */
747 simplify_expand_binop (enum machine_mode mode, optab binoptab,
748 rtx op0, rtx op1, rtx target, int unsignedp,
749 enum optab_methods methods)
751 if (CONSTANT_P (op0) && CONSTANT_P (op1))
753 rtx x = simplify_binary_operation (binoptab->code, mode, op0, op1);
759 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
762 /* Like simplify_expand_binop, but always put the result in TARGET.
763 Return true if the expansion succeeded. */
766 force_expand_binop (enum machine_mode mode, optab binoptab,
767 rtx op0, rtx op1, rtx target, int unsignedp,
768 enum optab_methods methods)
770 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
771 target, unsignedp, methods);
775 emit_move_insn (target, x);
779 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
782 expand_vec_shift_expr (sepops ops, rtx target)
784 enum insn_code icode;
785 rtx rtx_op1, rtx_op2;
786 enum machine_mode mode1;
787 enum machine_mode mode2;
788 enum machine_mode mode = TYPE_MODE (ops->type);
789 tree vec_oprnd = ops->op0;
790 tree shift_oprnd = ops->op1;
796 case VEC_RSHIFT_EXPR:
797 shift_optab = vec_shr_optab;
799 case VEC_LSHIFT_EXPR:
800 shift_optab = vec_shl_optab;
806 icode = optab_handler (shift_optab, mode);
807 gcc_assert (icode != CODE_FOR_nothing);
809 mode1 = insn_data[icode].operand[1].mode;
810 mode2 = insn_data[icode].operand[2].mode;
812 rtx_op1 = expand_normal (vec_oprnd);
813 if (!(*insn_data[icode].operand[1].predicate) (rtx_op1, mode1)
814 && mode1 != VOIDmode)
815 rtx_op1 = force_reg (mode1, rtx_op1);
817 rtx_op2 = expand_normal (shift_oprnd);
818 if (!(*insn_data[icode].operand[2].predicate) (rtx_op2, mode2)
819 && mode2 != VOIDmode)
820 rtx_op2 = force_reg (mode2, rtx_op2);
823 || ! (*insn_data[icode].operand[0].predicate) (target, mode))
824 target = gen_reg_rtx (mode);
826 /* Emit instruction */
827 pat = GEN_FCN (icode) (target, rtx_op1, rtx_op2);
834 /* This subroutine of expand_doubleword_shift handles the cases in which
835 the effective shift value is >= BITS_PER_WORD. The arguments and return
836 value are the same as for the parent routine, except that SUPERWORD_OP1
837 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
838 INTO_TARGET may be null if the caller has decided to calculate it. */
841 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
842 rtx outof_target, rtx into_target,
843 int unsignedp, enum optab_methods methods)
845 if (into_target != 0)
846 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
847 into_target, unsignedp, methods))
850 if (outof_target != 0)
852 /* For a signed right shift, we must fill OUTOF_TARGET with copies
853 of the sign bit, otherwise we must fill it with zeros. */
854 if (binoptab != ashr_optab)
855 emit_move_insn (outof_target, CONST0_RTX (word_mode));
857 if (!force_expand_binop (word_mode, binoptab,
858 outof_input, GEN_INT (BITS_PER_WORD - 1),
859 outof_target, unsignedp, methods))
865 /* This subroutine of expand_doubleword_shift handles the cases in which
866 the effective shift value is < BITS_PER_WORD. The arguments and return
867 value are the same as for the parent routine. */
870 expand_subword_shift (enum machine_mode op1_mode, optab binoptab,
871 rtx outof_input, rtx into_input, rtx op1,
872 rtx outof_target, rtx into_target,
873 int unsignedp, enum optab_methods methods,
874 unsigned HOST_WIDE_INT shift_mask)
876 optab reverse_unsigned_shift, unsigned_shift;
879 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
880 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
882 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
883 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
884 the opposite direction to BINOPTAB. */
885 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
887 carries = outof_input;
888 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
889 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
894 /* We must avoid shifting by BITS_PER_WORD bits since that is either
895 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
896 has unknown behavior. Do a single shift first, then shift by the
897 remainder. It's OK to use ~OP1 as the remainder if shift counts
898 are truncated to the mode size. */
899 carries = expand_binop (word_mode, reverse_unsigned_shift,
900 outof_input, const1_rtx, 0, unsignedp, methods);
901 if (shift_mask == BITS_PER_WORD - 1)
903 tmp = immed_double_const (-1, -1, op1_mode);
904 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
909 tmp = immed_double_const (BITS_PER_WORD - 1, 0, op1_mode);
910 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
914 if (tmp == 0 || carries == 0)
916 carries = expand_binop (word_mode, reverse_unsigned_shift,
917 carries, tmp, 0, unsignedp, methods);
921 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
922 so the result can go directly into INTO_TARGET if convenient. */
923 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
924 into_target, unsignedp, methods);
928 /* Now OR in the bits carried over from OUTOF_INPUT. */
929 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
930 into_target, unsignedp, methods))
933 /* Use a standard word_mode shift for the out-of half. */
934 if (outof_target != 0)
935 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
936 outof_target, unsignedp, methods))
943 #ifdef HAVE_conditional_move
944 /* Try implementing expand_doubleword_shift using conditional moves.
945 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
946 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
947 are the shift counts to use in the former and latter case. All other
948 arguments are the same as the parent routine. */
951 expand_doubleword_shift_condmove (enum machine_mode op1_mode, optab binoptab,
952 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
953 rtx outof_input, rtx into_input,
954 rtx subword_op1, rtx superword_op1,
955 rtx outof_target, rtx into_target,
956 int unsignedp, enum optab_methods methods,
957 unsigned HOST_WIDE_INT shift_mask)
959 rtx outof_superword, into_superword;
961 /* Put the superword version of the output into OUTOF_SUPERWORD and
963 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
964 if (outof_target != 0 && subword_op1 == superword_op1)
966 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
967 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
968 into_superword = outof_target;
969 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
970 outof_superword, 0, unsignedp, methods))
975 into_superword = gen_reg_rtx (word_mode);
976 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
977 outof_superword, into_superword,
982 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
983 if (!expand_subword_shift (op1_mode, binoptab,
984 outof_input, into_input, subword_op1,
985 outof_target, into_target,
986 unsignedp, methods, shift_mask))
989 /* Select between them. Do the INTO half first because INTO_SUPERWORD
990 might be the current value of OUTOF_TARGET. */
991 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
992 into_target, into_superword, word_mode, false))
995 if (outof_target != 0)
996 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
997 outof_target, outof_superword,
1005 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
1006 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
1007 input operand; the shift moves bits in the direction OUTOF_INPUT->
1008 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
1009 of the target. OP1 is the shift count and OP1_MODE is its mode.
1010 If OP1 is constant, it will have been truncated as appropriate
1011 and is known to be nonzero.
1013 If SHIFT_MASK is zero, the result of word shifts is undefined when the
1014 shift count is outside the range [0, BITS_PER_WORD). This routine must
1015 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
1017 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
1018 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
1019 fill with zeros or sign bits as appropriate.
1021 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
1022 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
1023 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
1024 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
1027 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
1028 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
1029 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
1030 function wants to calculate it itself.
1032 Return true if the shift could be successfully synthesized. */
1035 expand_doubleword_shift (enum machine_mode op1_mode, optab binoptab,
1036 rtx outof_input, rtx into_input, rtx op1,
1037 rtx outof_target, rtx into_target,
1038 int unsignedp, enum optab_methods methods,
1039 unsigned HOST_WIDE_INT shift_mask)
1041 rtx superword_op1, tmp, cmp1, cmp2;
1042 rtx subword_label, done_label;
1043 enum rtx_code cmp_code;
1045 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
1046 fill the result with sign or zero bits as appropriate. If so, the value
1047 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
1048 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
1049 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
1051 This isn't worthwhile for constant shifts since the optimizers will
1052 cope better with in-range shift counts. */
1053 if (shift_mask >= BITS_PER_WORD
1054 && outof_target != 0
1055 && !CONSTANT_P (op1))
1057 if (!expand_doubleword_shift (op1_mode, binoptab,
1058 outof_input, into_input, op1,
1060 unsignedp, methods, shift_mask))
1062 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
1063 outof_target, unsignedp, methods))
1068 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
1069 is true when the effective shift value is less than BITS_PER_WORD.
1070 Set SUPERWORD_OP1 to the shift count that should be used to shift
1071 OUTOF_INPUT into INTO_TARGET when the condition is false. */
1072 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
1073 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
1075 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
1076 is a subword shift count. */
1077 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
1079 cmp2 = CONST0_RTX (op1_mode);
1081 superword_op1 = op1;
1085 /* Set CMP1 to OP1 - BITS_PER_WORD. */
1086 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
1088 cmp2 = CONST0_RTX (op1_mode);
1090 superword_op1 = cmp1;
1095 /* If we can compute the condition at compile time, pick the
1096 appropriate subroutine. */
1097 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
1098 if (tmp != 0 && CONST_INT_P (tmp))
1100 if (tmp == const0_rtx)
1101 return expand_superword_shift (binoptab, outof_input, superword_op1,
1102 outof_target, into_target,
1103 unsignedp, methods);
1105 return expand_subword_shift (op1_mode, binoptab,
1106 outof_input, into_input, op1,
1107 outof_target, into_target,
1108 unsignedp, methods, shift_mask);
1111 #ifdef HAVE_conditional_move
1112 /* Try using conditional moves to generate straight-line code. */
1114 rtx start = get_last_insn ();
1115 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
1116 cmp_code, cmp1, cmp2,
1117 outof_input, into_input,
1119 outof_target, into_target,
1120 unsignedp, methods, shift_mask))
1122 delete_insns_since (start);
1126 /* As a last resort, use branches to select the correct alternative. */
1127 subword_label = gen_label_rtx ();
1128 done_label = gen_label_rtx ();
1131 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
1132 0, 0, subword_label, -1);
1135 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
1136 outof_target, into_target,
1137 unsignedp, methods))
1140 emit_jump_insn (gen_jump (done_label));
1142 emit_label (subword_label);
1144 if (!expand_subword_shift (op1_mode, binoptab,
1145 outof_input, into_input, op1,
1146 outof_target, into_target,
1147 unsignedp, methods, shift_mask))
1150 emit_label (done_label);
1154 /* Subroutine of expand_binop. Perform a double word multiplication of
1155 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
1156 as the target's word_mode. This function return NULL_RTX if anything
1157 goes wrong, in which case it may have already emitted instructions
1158 which need to be deleted.
1160 If we want to multiply two two-word values and have normal and widening
1161 multiplies of single-word values, we can do this with three smaller
1164 The multiplication proceeds as follows:
1165 _______________________
1166 [__op0_high_|__op0_low__]
1167 _______________________
1168 * [__op1_high_|__op1_low__]
1169 _______________________________________________
1170 _______________________
1171 (1) [__op0_low__*__op1_low__]
1172 _______________________
1173 (2a) [__op0_low__*__op1_high_]
1174 _______________________
1175 (2b) [__op0_high_*__op1_low__]
1176 _______________________
1177 (3) [__op0_high_*__op1_high_]
1180 This gives a 4-word result. Since we are only interested in the
1181 lower 2 words, partial result (3) and the upper words of (2a) and
1182 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1183 calculated using non-widening multiplication.
1185 (1), however, needs to be calculated with an unsigned widening
1186 multiplication. If this operation is not directly supported we
1187 try using a signed widening multiplication and adjust the result.
1188 This adjustment works as follows:
1190 If both operands are positive then no adjustment is needed.
1192 If the operands have different signs, for example op0_low < 0 and
1193 op1_low >= 0, the instruction treats the most significant bit of
1194 op0_low as a sign bit instead of a bit with significance
1195 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1196 with 2**BITS_PER_WORD - op0_low, and two's complements the
1197 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1200 Similarly, if both operands are negative, we need to add
1201 (op0_low + op1_low) * 2**BITS_PER_WORD.
1203 We use a trick to adjust quickly. We logically shift op0_low right
1204 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1205 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1206 logical shift exists, we do an arithmetic right shift and subtract
1210 expand_doubleword_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
1211 bool umulp, enum optab_methods methods)
1213 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
1214 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
1215 rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
1216 rtx product, adjust, product_high, temp;
1218 rtx op0_high = operand_subword_force (op0, high, mode);
1219 rtx op0_low = operand_subword_force (op0, low, mode);
1220 rtx op1_high = operand_subword_force (op1, high, mode);
1221 rtx op1_low = operand_subword_force (op1, low, mode);
1223 /* If we're using an unsigned multiply to directly compute the product
1224 of the low-order words of the operands and perform any required
1225 adjustments of the operands, we begin by trying two more multiplications
1226 and then computing the appropriate sum.
1228 We have checked above that the required addition is provided.
1229 Full-word addition will normally always succeed, especially if
1230 it is provided at all, so we don't worry about its failure. The
1231 multiplication may well fail, however, so we do handle that. */
1235 /* ??? This could be done with emit_store_flag where available. */
1236 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
1237 NULL_RTX, 1, methods);
1239 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
1240 NULL_RTX, 0, OPTAB_DIRECT);
1243 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
1244 NULL_RTX, 0, methods);
1247 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
1248 NULL_RTX, 0, OPTAB_DIRECT);
1255 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
1256 NULL_RTX, 0, OPTAB_DIRECT);
1260 /* OP0_HIGH should now be dead. */
1264 /* ??? This could be done with emit_store_flag where available. */
1265 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
1266 NULL_RTX, 1, methods);
1268 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
1269 NULL_RTX, 0, OPTAB_DIRECT);
1272 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
1273 NULL_RTX, 0, methods);
1276 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
1277 NULL_RTX, 0, OPTAB_DIRECT);
1284 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
1285 NULL_RTX, 0, OPTAB_DIRECT);
1289 /* OP1_HIGH should now be dead. */
1291 adjust = expand_binop (word_mode, add_optab, adjust, temp,
1292 adjust, 0, OPTAB_DIRECT);
1294 if (target && !REG_P (target))
1298 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
1299 target, 1, OPTAB_DIRECT);
1301 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
1302 target, 1, OPTAB_DIRECT);
1307 product_high = operand_subword (product, high, 1, mode);
1308 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
1309 REG_P (product_high) ? product_high : adjust,
1311 emit_move_insn (product_high, adjust);
1315 /* Wrapper around expand_binop which takes an rtx code to specify
1316 the operation to perform, not an optab pointer. All other
1317 arguments are the same. */
1319 expand_simple_binop (enum machine_mode mode, enum rtx_code code, rtx op0,
1320 rtx op1, rtx target, int unsignedp,
1321 enum optab_methods methods)
1323 optab binop = code_to_optab[(int) code];
1326 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
1329 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1330 binop. Order them according to commutative_operand_precedence and, if
1331 possible, try to put TARGET or a pseudo first. */
1333 swap_commutative_operands_with_target (rtx target, rtx op0, rtx op1)
1335 int op0_prec = commutative_operand_precedence (op0);
1336 int op1_prec = commutative_operand_precedence (op1);
1338 if (op0_prec < op1_prec)
1341 if (op0_prec > op1_prec)
1344 /* With equal precedence, both orders are ok, but it is better if the
1345 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
1346 if (target == 0 || REG_P (target))
1347 return (REG_P (op1) && !REG_P (op0)) || target == op1;
1349 return rtx_equal_p (op1, target);
1352 /* Return true if BINOPTAB implements a shift operation. */
1355 shift_optab_p (optab binoptab)
1357 switch (binoptab->code)
1373 /* Return true if BINOPTAB implements a commutative binary operation. */
1376 commutative_optab_p (optab binoptab)
1378 return (GET_RTX_CLASS (binoptab->code) == RTX_COMM_ARITH
1379 || binoptab == smul_widen_optab
1380 || binoptab == umul_widen_optab
1381 || binoptab == smul_highpart_optab
1382 || binoptab == umul_highpart_optab);
1385 /* X is to be used in mode MODE as an operand to BINOPTAB. If we're
1386 optimizing, and if the operand is a constant that costs more than
1387 1 instruction, force the constant into a register and return that
1388 register. Return X otherwise. UNSIGNEDP says whether X is unsigned. */
1391 avoid_expensive_constant (enum machine_mode mode, optab binoptab,
1392 rtx x, bool unsignedp)
1394 bool speed = optimize_insn_for_speed_p ();
1396 if (mode != VOIDmode
1399 && rtx_cost (x, binoptab->code, speed) > rtx_cost (x, SET, speed))
1401 if (CONST_INT_P (x))
1403 HOST_WIDE_INT intval = trunc_int_for_mode (INTVAL (x), mode);
1404 if (intval != INTVAL (x))
1405 x = GEN_INT (intval);
1408 x = convert_modes (mode, VOIDmode, x, unsignedp);
1409 x = force_reg (mode, x);
1414 /* Helper function for expand_binop: handle the case where there
1415 is an insn that directly implements the indicated operation.
1416 Returns null if this is not possible. */
1418 expand_binop_directly (enum machine_mode mode, optab binoptab,
1420 rtx target, int unsignedp, enum optab_methods methods,
1423 int icode = (int) optab_handler (binoptab, mode);
1424 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
1425 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
1426 enum machine_mode tmp_mode;
1429 rtx xop0 = op0, xop1 = op1;
1436 temp = gen_reg_rtx (mode);
1438 /* If it is a commutative operator and the modes would match
1439 if we would swap the operands, we can save the conversions. */
1440 commutative_p = commutative_optab_p (binoptab);
1442 && GET_MODE (xop0) != mode0 && GET_MODE (xop1) != mode1
1443 && GET_MODE (xop0) == mode1 && GET_MODE (xop1) == mode1)
1450 /* If we are optimizing, force expensive constants into a register. */
1451 xop0 = avoid_expensive_constant (mode0, binoptab, xop0, unsignedp);
1452 if (!shift_optab_p (binoptab))
1453 xop1 = avoid_expensive_constant (mode1, binoptab, xop1, unsignedp);
1455 /* In case the insn wants input operands in modes different from
1456 those of the actual operands, convert the operands. It would
1457 seem that we don't need to convert CONST_INTs, but we do, so
1458 that they're properly zero-extended, sign-extended or truncated
1461 if (GET_MODE (xop0) != mode0 && mode0 != VOIDmode)
1462 xop0 = convert_modes (mode0,
1463 GET_MODE (xop0) != VOIDmode
1468 if (GET_MODE (xop1) != mode1 && mode1 != VOIDmode)
1469 xop1 = convert_modes (mode1,
1470 GET_MODE (xop1) != VOIDmode
1475 /* If operation is commutative,
1476 try to make the first operand a register.
1477 Even better, try to make it the same as the target.
1478 Also try to make the last operand a constant. */
1480 && swap_commutative_operands_with_target (target, xop0, xop1))
1487 /* Now, if insn's predicates don't allow our operands, put them into
1490 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
1491 && mode0 != VOIDmode)
1492 xop0 = copy_to_mode_reg (mode0, xop0);
1494 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
1495 && mode1 != VOIDmode)
1496 xop1 = copy_to_mode_reg (mode1, xop1);
1498 if (binoptab == vec_pack_trunc_optab
1499 || binoptab == vec_pack_usat_optab
1500 || binoptab == vec_pack_ssat_optab
1501 || binoptab == vec_pack_ufix_trunc_optab
1502 || binoptab == vec_pack_sfix_trunc_optab)
1504 /* The mode of the result is different then the mode of the
1506 tmp_mode = insn_data[icode].operand[0].mode;
1507 if (GET_MODE_NUNITS (tmp_mode) != 2 * GET_MODE_NUNITS (mode))
1513 if (!insn_data[icode].operand[0].predicate (temp, tmp_mode))
1514 temp = gen_reg_rtx (tmp_mode);
1516 pat = GEN_FCN (icode) (temp, xop0, xop1);
1519 /* If PAT is composed of more than one insn, try to add an appropriate
1520 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1521 operand, call expand_binop again, this time without a target. */
1522 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1523 && ! add_equal_note (pat, temp, binoptab->code, xop0, xop1))
1525 delete_insns_since (last);
1526 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1527 unsignedp, methods);
1534 delete_insns_since (last);
1538 /* Generate code to perform an operation specified by BINOPTAB
1539 on operands OP0 and OP1, with result having machine-mode MODE.
1541 UNSIGNEDP is for the case where we have to widen the operands
1542 to perform the operation. It says to use zero-extension.
1544 If TARGET is nonzero, the value
1545 is generated there, if it is convenient to do so.
1546 In all cases an rtx is returned for the locus of the value;
1547 this may or may not be TARGET. */
1550 expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1,
1551 rtx target, int unsignedp, enum optab_methods methods)
1553 enum optab_methods next_methods
1554 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1555 ? OPTAB_WIDEN : methods);
1556 enum mode_class mclass;
1557 enum machine_mode wider_mode;
1560 rtx entry_last = get_last_insn ();
1563 mclass = GET_MODE_CLASS (mode);
1565 /* If subtracting an integer constant, convert this into an addition of
1566 the negated constant. */
1568 if (binoptab == sub_optab && CONST_INT_P (op1))
1570 op1 = negate_rtx (mode, op1);
1571 binoptab = add_optab;
1574 /* Record where to delete back to if we backtrack. */
1575 last = get_last_insn ();
1577 /* If we can do it with a three-operand insn, do so. */
1579 if (methods != OPTAB_MUST_WIDEN
1580 && optab_handler (binoptab, mode) != CODE_FOR_nothing)
1582 temp = expand_binop_directly (mode, binoptab, op0, op1, target,
1583 unsignedp, methods, last);
1588 /* If we were trying to rotate, and that didn't work, try rotating
1589 the other direction before falling back to shifts and bitwise-or. */
1590 if (((binoptab == rotl_optab
1591 && optab_handler (rotr_optab, mode) != CODE_FOR_nothing)
1592 || (binoptab == rotr_optab
1593 && optab_handler (rotl_optab, mode) != CODE_FOR_nothing))
1594 && mclass == MODE_INT)
1596 optab otheroptab = (binoptab == rotl_optab ? rotr_optab : rotl_optab);
1598 unsigned int bits = GET_MODE_BITSIZE (mode);
1600 if (CONST_INT_P (op1))
1601 newop1 = GEN_INT (bits - INTVAL (op1));
1602 else if (targetm.shift_truncation_mask (mode) == bits - 1)
1603 newop1 = negate_rtx (GET_MODE (op1), op1);
1605 newop1 = expand_binop (GET_MODE (op1), sub_optab,
1606 GEN_INT (bits), op1,
1607 NULL_RTX, unsignedp, OPTAB_DIRECT);
1609 temp = expand_binop_directly (mode, otheroptab, op0, newop1,
1610 target, unsignedp, methods, last);
1615 /* If this is a multiply, see if we can do a widening operation that
1616 takes operands of this mode and makes a wider mode. */
1618 if (binoptab == smul_optab
1619 && GET_MODE_WIDER_MODE (mode) != VOIDmode
1620 && (optab_handler ((unsignedp ? umul_widen_optab : smul_widen_optab),
1621 GET_MODE_WIDER_MODE (mode))
1622 != CODE_FOR_nothing))
1624 temp = expand_binop (GET_MODE_WIDER_MODE (mode),
1625 unsignedp ? umul_widen_optab : smul_widen_optab,
1626 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1630 if (GET_MODE_CLASS (mode) == MODE_INT
1631 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1632 GET_MODE_BITSIZE (GET_MODE (temp))))
1633 return gen_lowpart (mode, temp);
1635 return convert_to_mode (mode, temp, unsignedp);
1639 /* Look for a wider mode of the same class for which we think we
1640 can open-code the operation. Check for a widening multiply at the
1641 wider mode as well. */
1643 if (CLASS_HAS_WIDER_MODES_P (mclass)
1644 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1645 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1646 wider_mode != VOIDmode;
1647 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1649 if (optab_handler (binoptab, wider_mode) != CODE_FOR_nothing
1650 || (binoptab == smul_optab
1651 && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
1652 && (optab_handler ((unsignedp ? umul_widen_optab
1653 : smul_widen_optab),
1654 GET_MODE_WIDER_MODE (wider_mode))
1655 != CODE_FOR_nothing)))
1657 rtx xop0 = op0, xop1 = op1;
1660 /* For certain integer operations, we need not actually extend
1661 the narrow operands, as long as we will truncate
1662 the results to the same narrowness. */
1664 if ((binoptab == ior_optab || binoptab == and_optab
1665 || binoptab == xor_optab
1666 || binoptab == add_optab || binoptab == sub_optab
1667 || binoptab == smul_optab || binoptab == ashl_optab)
1668 && mclass == MODE_INT)
1671 xop0 = avoid_expensive_constant (mode, binoptab,
1673 if (binoptab != ashl_optab)
1674 xop1 = avoid_expensive_constant (mode, binoptab,
1678 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1680 /* The second operand of a shift must always be extended. */
1681 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1682 no_extend && binoptab != ashl_optab);
1684 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1685 unsignedp, OPTAB_DIRECT);
1688 if (mclass != MODE_INT
1689 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1690 GET_MODE_BITSIZE (wider_mode)))
1693 target = gen_reg_rtx (mode);
1694 convert_move (target, temp, 0);
1698 return gen_lowpart (mode, temp);
1701 delete_insns_since (last);
1705 /* If operation is commutative,
1706 try to make the first operand a register.
1707 Even better, try to make it the same as the target.
1708 Also try to make the last operand a constant. */
1709 if (commutative_optab_p (binoptab)
1710 && swap_commutative_operands_with_target (target, op0, op1))
1717 /* These can be done a word at a time. */
1718 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1719 && mclass == MODE_INT
1720 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
1721 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing)
1726 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1727 won't be accurate, so use a new target. */
1728 if (target == 0 || target == op0 || target == op1)
1729 target = gen_reg_rtx (mode);
1733 /* Do the actual arithmetic. */
1734 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
1736 rtx target_piece = operand_subword (target, i, 1, mode);
1737 rtx x = expand_binop (word_mode, binoptab,
1738 operand_subword_force (op0, i, mode),
1739 operand_subword_force (op1, i, mode),
1740 target_piece, unsignedp, next_methods);
1745 if (target_piece != x)
1746 emit_move_insn (target_piece, x);
1749 insns = get_insns ();
1752 if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
1759 /* Synthesize double word shifts from single word shifts. */
1760 if ((binoptab == lshr_optab || binoptab == ashl_optab
1761 || binoptab == ashr_optab)
1762 && mclass == MODE_INT
1763 && (CONST_INT_P (op1) || optimize_insn_for_speed_p ())
1764 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1765 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing
1766 && optab_handler (ashl_optab, word_mode) != CODE_FOR_nothing
1767 && optab_handler (lshr_optab, word_mode) != CODE_FOR_nothing)
1769 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1770 enum machine_mode op1_mode;
1772 double_shift_mask = targetm.shift_truncation_mask (mode);
1773 shift_mask = targetm.shift_truncation_mask (word_mode);
1774 op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode;
1776 /* Apply the truncation to constant shifts. */
1777 if (double_shift_mask > 0 && CONST_INT_P (op1))
1778 op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
1780 if (op1 == CONST0_RTX (op1_mode))
1783 /* Make sure that this is a combination that expand_doubleword_shift
1784 can handle. See the comments there for details. */
1785 if (double_shift_mask == 0
1786 || (shift_mask == BITS_PER_WORD - 1
1787 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1790 rtx into_target, outof_target;
1791 rtx into_input, outof_input;
1792 int left_shift, outof_word;
1794 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1795 won't be accurate, so use a new target. */
1796 if (target == 0 || target == op0 || target == op1)
1797 target = gen_reg_rtx (mode);
1801 /* OUTOF_* is the word we are shifting bits away from, and
1802 INTO_* is the word that we are shifting bits towards, thus
1803 they differ depending on the direction of the shift and
1804 WORDS_BIG_ENDIAN. */
1806 left_shift = binoptab == ashl_optab;
1807 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1809 outof_target = operand_subword (target, outof_word, 1, mode);
1810 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1812 outof_input = operand_subword_force (op0, outof_word, mode);
1813 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1815 if (expand_doubleword_shift (op1_mode, binoptab,
1816 outof_input, into_input, op1,
1817 outof_target, into_target,
1818 unsignedp, next_methods, shift_mask))
1820 insns = get_insns ();
1830 /* Synthesize double word rotates from single word shifts. */
1831 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1832 && mclass == MODE_INT
1833 && CONST_INT_P (op1)
1834 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1835 && optab_handler (ashl_optab, word_mode) != CODE_FOR_nothing
1836 && optab_handler (lshr_optab, word_mode) != CODE_FOR_nothing)
1839 rtx into_target, outof_target;
1840 rtx into_input, outof_input;
1842 int shift_count, left_shift, outof_word;
1844 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1845 won't be accurate, so use a new target. Do this also if target is not
1846 a REG, first because having a register instead may open optimization
1847 opportunities, and second because if target and op0 happen to be MEMs
1848 designating the same location, we would risk clobbering it too early
1849 in the code sequence we generate below. */
1850 if (target == 0 || target == op0 || target == op1 || ! REG_P (target))
1851 target = gen_reg_rtx (mode);
1855 shift_count = INTVAL (op1);
1857 /* OUTOF_* is the word we are shifting bits away from, and
1858 INTO_* is the word that we are shifting bits towards, thus
1859 they differ depending on the direction of the shift and
1860 WORDS_BIG_ENDIAN. */
1862 left_shift = (binoptab == rotl_optab);
1863 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1865 outof_target = operand_subword (target, outof_word, 1, mode);
1866 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1868 outof_input = operand_subword_force (op0, outof_word, mode);
1869 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1871 if (shift_count == BITS_PER_WORD)
1873 /* This is just a word swap. */
1874 emit_move_insn (outof_target, into_input);
1875 emit_move_insn (into_target, outof_input);
1880 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1881 rtx first_shift_count, second_shift_count;
1882 optab reverse_unsigned_shift, unsigned_shift;
1884 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1885 ? lshr_optab : ashl_optab);
1887 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1888 ? ashl_optab : lshr_optab);
1890 if (shift_count > BITS_PER_WORD)
1892 first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1893 second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1897 first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1898 second_shift_count = GEN_INT (shift_count);
1901 into_temp1 = expand_binop (word_mode, unsigned_shift,
1902 outof_input, first_shift_count,
1903 NULL_RTX, unsignedp, next_methods);
1904 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1905 into_input, second_shift_count,
1906 NULL_RTX, unsignedp, next_methods);
1908 if (into_temp1 != 0 && into_temp2 != 0)
1909 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1910 into_target, unsignedp, next_methods);
1914 if (inter != 0 && inter != into_target)
1915 emit_move_insn (into_target, inter);
1917 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1918 into_input, first_shift_count,
1919 NULL_RTX, unsignedp, next_methods);
1920 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1921 outof_input, second_shift_count,
1922 NULL_RTX, unsignedp, next_methods);
1924 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1925 inter = expand_binop (word_mode, ior_optab,
1926 outof_temp1, outof_temp2,
1927 outof_target, unsignedp, next_methods);
1929 if (inter != 0 && inter != outof_target)
1930 emit_move_insn (outof_target, inter);
1933 insns = get_insns ();
1943 /* These can be done a word at a time by propagating carries. */
1944 if ((binoptab == add_optab || binoptab == sub_optab)
1945 && mclass == MODE_INT
1946 && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
1947 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing)
1950 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1951 const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
1952 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1953 rtx xop0, xop1, xtarget;
1955 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1956 value is one of those, use it. Otherwise, use 1 since it is the
1957 one easiest to get. */
1958 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1959 int normalizep = STORE_FLAG_VALUE;
1964 /* Prepare the operands. */
1965 xop0 = force_reg (mode, op0);
1966 xop1 = force_reg (mode, op1);
1968 xtarget = gen_reg_rtx (mode);
1970 if (target == 0 || !REG_P (target))
1973 /* Indicate for flow that the entire target reg is being set. */
1975 emit_clobber (xtarget);
1977 /* Do the actual arithmetic. */
1978 for (i = 0; i < nwords; i++)
1980 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1981 rtx target_piece = operand_subword (xtarget, index, 1, mode);
1982 rtx op0_piece = operand_subword_force (xop0, index, mode);
1983 rtx op1_piece = operand_subword_force (xop1, index, mode);
1986 /* Main add/subtract of the input operands. */
1987 x = expand_binop (word_mode, binoptab,
1988 op0_piece, op1_piece,
1989 target_piece, unsignedp, next_methods);
1995 /* Store carry from main add/subtract. */
1996 carry_out = gen_reg_rtx (word_mode);
1997 carry_out = emit_store_flag_force (carry_out,
1998 (binoptab == add_optab
2001 word_mode, 1, normalizep);
2008 /* Add/subtract previous carry to main result. */
2009 newx = expand_binop (word_mode,
2010 normalizep == 1 ? binoptab : otheroptab,
2012 NULL_RTX, 1, next_methods);
2016 /* Get out carry from adding/subtracting carry in. */
2017 rtx carry_tmp = gen_reg_rtx (word_mode);
2018 carry_tmp = emit_store_flag_force (carry_tmp,
2019 (binoptab == add_optab
2022 word_mode, 1, normalizep);
2024 /* Logical-ior the two poss. carry together. */
2025 carry_out = expand_binop (word_mode, ior_optab,
2026 carry_out, carry_tmp,
2027 carry_out, 0, next_methods);
2031 emit_move_insn (target_piece, newx);
2035 if (x != target_piece)
2036 emit_move_insn (target_piece, x);
2039 carry_in = carry_out;
2042 if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
2044 if (optab_handler (mov_optab, mode) != CODE_FOR_nothing
2045 || ! rtx_equal_p (target, xtarget))
2047 rtx temp = emit_move_insn (target, xtarget);
2049 set_unique_reg_note (temp,
2051 gen_rtx_fmt_ee (binoptab->code, mode,
2062 delete_insns_since (last);
2065 /* Attempt to synthesize double word multiplies using a sequence of word
2066 mode multiplications. We first attempt to generate a sequence using a
2067 more efficient unsigned widening multiply, and if that fails we then
2068 try using a signed widening multiply. */
2070 if (binoptab == smul_optab
2071 && mclass == MODE_INT
2072 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
2073 && optab_handler (smul_optab, word_mode) != CODE_FOR_nothing
2074 && optab_handler (add_optab, word_mode) != CODE_FOR_nothing)
2076 rtx product = NULL_RTX;
2078 if (optab_handler (umul_widen_optab, mode) != CODE_FOR_nothing)
2080 product = expand_doubleword_mult (mode, op0, op1, target,
2083 delete_insns_since (last);
2086 if (product == NULL_RTX
2087 && optab_handler (smul_widen_optab, mode) != CODE_FOR_nothing)
2089 product = expand_doubleword_mult (mode, op0, op1, target,
2092 delete_insns_since (last);
2095 if (product != NULL_RTX)
2097 if (optab_handler (mov_optab, mode) != CODE_FOR_nothing)
2099 temp = emit_move_insn (target ? target : product, product);
2100 set_unique_reg_note (temp,
2102 gen_rtx_fmt_ee (MULT, mode,
2110 /* It can't be open-coded in this mode.
2111 Use a library call if one is available and caller says that's ok. */
2113 libfunc = optab_libfunc (binoptab, mode);
2115 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
2119 enum machine_mode op1_mode = mode;
2124 if (shift_optab_p (binoptab))
2126 op1_mode = targetm.libgcc_shift_count_mode ();
2127 /* Specify unsigned here,
2128 since negative shift counts are meaningless. */
2129 op1x = convert_to_mode (op1_mode, op1, 1);
2132 if (GET_MODE (op0) != VOIDmode
2133 && GET_MODE (op0) != mode)
2134 op0 = convert_to_mode (mode, op0, unsignedp);
2136 /* Pass 1 for NO_QUEUE so we don't lose any increments
2137 if the libcall is cse'd or moved. */
2138 value = emit_library_call_value (libfunc,
2139 NULL_RTX, LCT_CONST, mode, 2,
2140 op0, mode, op1x, op1_mode);
2142 insns = get_insns ();
2145 target = gen_reg_rtx (mode);
2146 emit_libcall_block (insns, target, value,
2147 gen_rtx_fmt_ee (binoptab->code, mode, op0, op1));
2152 delete_insns_since (last);
2154 /* It can't be done in this mode. Can we do it in a wider mode? */
2156 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
2157 || methods == OPTAB_MUST_WIDEN))
2159 /* Caller says, don't even try. */
2160 delete_insns_since (entry_last);
2164 /* Compute the value of METHODS to pass to recursive calls.
2165 Don't allow widening to be tried recursively. */
2167 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
2169 /* Look for a wider mode of the same class for which it appears we can do
2172 if (CLASS_HAS_WIDER_MODES_P (mclass))
2174 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2175 wider_mode != VOIDmode;
2176 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2178 if (optab_handler (binoptab, wider_mode) != CODE_FOR_nothing
2179 || (methods == OPTAB_LIB
2180 && optab_libfunc (binoptab, wider_mode)))
2182 rtx xop0 = op0, xop1 = op1;
2185 /* For certain integer operations, we need not actually extend
2186 the narrow operands, as long as we will truncate
2187 the results to the same narrowness. */
2189 if ((binoptab == ior_optab || binoptab == and_optab
2190 || binoptab == xor_optab
2191 || binoptab == add_optab || binoptab == sub_optab
2192 || binoptab == smul_optab || binoptab == ashl_optab)
2193 && mclass == MODE_INT)
2196 xop0 = widen_operand (xop0, wider_mode, mode,
2197 unsignedp, no_extend);
2199 /* The second operand of a shift must always be extended. */
2200 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
2201 no_extend && binoptab != ashl_optab);
2203 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
2204 unsignedp, methods);
2207 if (mclass != MODE_INT
2208 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
2209 GET_MODE_BITSIZE (wider_mode)))
2212 target = gen_reg_rtx (mode);
2213 convert_move (target, temp, 0);
2217 return gen_lowpart (mode, temp);
2220 delete_insns_since (last);
2225 delete_insns_since (entry_last);
2229 /* Expand a binary operator which has both signed and unsigned forms.
2230 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2233 If we widen unsigned operands, we may use a signed wider operation instead
2234 of an unsigned wider operation, since the result would be the same. */
2237 sign_expand_binop (enum machine_mode mode, optab uoptab, optab soptab,
2238 rtx op0, rtx op1, rtx target, int unsignedp,
2239 enum optab_methods methods)
2242 optab direct_optab = unsignedp ? uoptab : soptab;
2243 struct optab_d wide_soptab;
2245 /* Do it without widening, if possible. */
2246 temp = expand_binop (mode, direct_optab, op0, op1, target,
2247 unsignedp, OPTAB_DIRECT);
2248 if (temp || methods == OPTAB_DIRECT)
2251 /* Try widening to a signed int. Make a fake signed optab that
2252 hides any signed insn for direct use. */
2253 wide_soptab = *soptab;
2254 set_optab_handler (&wide_soptab, mode, CODE_FOR_nothing);
2255 /* We don't want to generate new hash table entries from this fake
2257 wide_soptab.libcall_gen = NULL;
2259 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2260 unsignedp, OPTAB_WIDEN);
2262 /* For unsigned operands, try widening to an unsigned int. */
2263 if (temp == 0 && unsignedp)
2264 temp = expand_binop (mode, uoptab, op0, op1, target,
2265 unsignedp, OPTAB_WIDEN);
2266 if (temp || methods == OPTAB_WIDEN)
2269 /* Use the right width libcall if that exists. */
2270 temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_LIB);
2271 if (temp || methods == OPTAB_LIB)
2274 /* Must widen and use a libcall, use either signed or unsigned. */
2275 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2276 unsignedp, methods);
2280 return expand_binop (mode, uoptab, op0, op1, target,
2281 unsignedp, methods);
2285 /* Generate code to perform an operation specified by UNOPPTAB
2286 on operand OP0, with two results to TARG0 and TARG1.
2287 We assume that the order of the operands for the instruction
2288 is TARG0, TARG1, OP0.
2290 Either TARG0 or TARG1 may be zero, but what that means is that
2291 the result is not actually wanted. We will generate it into
2292 a dummy pseudo-reg and discard it. They may not both be zero.
2294 Returns 1 if this operation can be performed; 0 if not. */
2297 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
2300 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2301 enum mode_class mclass;
2302 enum machine_mode wider_mode;
2303 rtx entry_last = get_last_insn ();
2306 mclass = GET_MODE_CLASS (mode);
2309 targ0 = gen_reg_rtx (mode);
2311 targ1 = gen_reg_rtx (mode);
2313 /* Record where to go back to if we fail. */
2314 last = get_last_insn ();
2316 if (optab_handler (unoptab, mode) != CODE_FOR_nothing)
2318 int icode = (int) optab_handler (unoptab, mode);
2319 enum machine_mode mode0 = insn_data[icode].operand[2].mode;
2323 if (GET_MODE (xop0) != VOIDmode
2324 && GET_MODE (xop0) != mode0)
2325 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2327 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2328 if (!insn_data[icode].operand[2].predicate (xop0, mode0))
2329 xop0 = copy_to_mode_reg (mode0, xop0);
2331 /* We could handle this, but we should always be called with a pseudo
2332 for our targets and all insns should take them as outputs. */
2333 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2334 gcc_assert (insn_data[icode].operand[1].predicate (targ1, mode));
2336 pat = GEN_FCN (icode) (targ0, targ1, xop0);
2343 delete_insns_since (last);
2346 /* It can't be done in this mode. Can we do it in a wider mode? */
2348 if (CLASS_HAS_WIDER_MODES_P (mclass))
2350 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2351 wider_mode != VOIDmode;
2352 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2354 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
2356 rtx t0 = gen_reg_rtx (wider_mode);
2357 rtx t1 = gen_reg_rtx (wider_mode);
2358 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2360 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
2362 convert_move (targ0, t0, unsignedp);
2363 convert_move (targ1, t1, unsignedp);
2367 delete_insns_since (last);
2372 delete_insns_since (entry_last);
2376 /* Generate code to perform an operation specified by BINOPTAB
2377 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2378 We assume that the order of the operands for the instruction
2379 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2380 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2382 Either TARG0 or TARG1 may be zero, but what that means is that
2383 the result is not actually wanted. We will generate it into
2384 a dummy pseudo-reg and discard it. They may not both be zero.
2386 Returns 1 if this operation can be performed; 0 if not. */
2389 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
2392 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2393 enum mode_class mclass;
2394 enum machine_mode wider_mode;
2395 rtx entry_last = get_last_insn ();
2398 mclass = GET_MODE_CLASS (mode);
2401 targ0 = gen_reg_rtx (mode);
2403 targ1 = gen_reg_rtx (mode);
2405 /* Record where to go back to if we fail. */
2406 last = get_last_insn ();
2408 if (optab_handler (binoptab, mode) != CODE_FOR_nothing)
2410 int icode = (int) optab_handler (binoptab, mode);
2411 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2412 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
2414 rtx xop0 = op0, xop1 = op1;
2416 /* If we are optimizing, force expensive constants into a register. */
2417 xop0 = avoid_expensive_constant (mode0, binoptab, xop0, unsignedp);
2418 xop1 = avoid_expensive_constant (mode1, binoptab, xop1, unsignedp);
2420 /* In case the insn wants input operands in modes different from
2421 those of the actual operands, convert the operands. It would
2422 seem that we don't need to convert CONST_INTs, but we do, so
2423 that they're properly zero-extended, sign-extended or truncated
2426 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
2427 xop0 = convert_modes (mode0,
2428 GET_MODE (op0) != VOIDmode
2433 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
2434 xop1 = convert_modes (mode1,
2435 GET_MODE (op1) != VOIDmode
2440 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2441 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2442 xop0 = copy_to_mode_reg (mode0, xop0);
2444 if (!insn_data[icode].operand[2].predicate (xop1, mode1))
2445 xop1 = copy_to_mode_reg (mode1, xop1);
2447 /* We could handle this, but we should always be called with a pseudo
2448 for our targets and all insns should take them as outputs. */
2449 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2450 gcc_assert (insn_data[icode].operand[3].predicate (targ1, mode));
2452 pat = GEN_FCN (icode) (targ0, xop0, xop1, targ1);
2459 delete_insns_since (last);
2462 /* It can't be done in this mode. Can we do it in a wider mode? */
2464 if (CLASS_HAS_WIDER_MODES_P (mclass))
2466 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2467 wider_mode != VOIDmode;
2468 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2470 if (optab_handler (binoptab, wider_mode) != CODE_FOR_nothing)
2472 rtx t0 = gen_reg_rtx (wider_mode);
2473 rtx t1 = gen_reg_rtx (wider_mode);
2474 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2475 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2477 if (expand_twoval_binop (binoptab, cop0, cop1,
2480 convert_move (targ0, t0, unsignedp);
2481 convert_move (targ1, t1, unsignedp);
2485 delete_insns_since (last);
2490 delete_insns_since (entry_last);
2494 /* Expand the two-valued library call indicated by BINOPTAB, but
2495 preserve only one of the values. If TARG0 is non-NULL, the first
2496 value is placed into TARG0; otherwise the second value is placed
2497 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2498 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2499 This routine assumes that the value returned by the library call is
2500 as if the return value was of an integral mode twice as wide as the
2501 mode of OP0. Returns 1 if the call was successful. */
2504 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2505 rtx targ0, rtx targ1, enum rtx_code code)
2507 enum machine_mode mode;
2508 enum machine_mode libval_mode;
2513 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2514 gcc_assert (!targ0 != !targ1);
2516 mode = GET_MODE (op0);
2517 libfunc = optab_libfunc (binoptab, mode);
2521 /* The value returned by the library function will have twice as
2522 many bits as the nominal MODE. */
2523 libval_mode = smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode),
2526 libval = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
2530 /* Get the part of VAL containing the value that we want. */
2531 libval = simplify_gen_subreg (mode, libval, libval_mode,
2532 targ0 ? 0 : GET_MODE_SIZE (mode));
2533 insns = get_insns ();
2535 /* Move the into the desired location. */
2536 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2537 gen_rtx_fmt_ee (code, mode, op0, op1));
2543 /* Wrapper around expand_unop which takes an rtx code to specify
2544 the operation to perform, not an optab pointer. All other
2545 arguments are the same. */
2547 expand_simple_unop (enum machine_mode mode, enum rtx_code code, rtx op0,
2548 rtx target, int unsignedp)
2550 optab unop = code_to_optab[(int) code];
2553 return expand_unop (mode, unop, op0, target, unsignedp);
2559 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2561 widen_clz (enum machine_mode mode, rtx op0, rtx target)
2563 enum mode_class mclass = GET_MODE_CLASS (mode);
2564 if (CLASS_HAS_WIDER_MODES_P (mclass))
2566 enum machine_mode wider_mode;
2567 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2568 wider_mode != VOIDmode;
2569 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2571 if (optab_handler (clz_optab, wider_mode) != CODE_FOR_nothing)
2573 rtx xop0, temp, last;
2575 last = get_last_insn ();
2578 target = gen_reg_rtx (mode);
2579 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2580 temp = expand_unop (wider_mode, clz_optab, xop0, NULL_RTX, true);
2582 temp = expand_binop (wider_mode, sub_optab, temp,
2583 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2584 - GET_MODE_BITSIZE (mode)),
2585 target, true, OPTAB_DIRECT);
2587 delete_insns_since (last);
2596 /* Try calculating clz of a double-word quantity as two clz's of word-sized
2597 quantities, choosing which based on whether the high word is nonzero. */
2599 expand_doubleword_clz (enum machine_mode mode, rtx op0, rtx target)
2601 rtx xop0 = force_reg (mode, op0);
2602 rtx subhi = gen_highpart (word_mode, xop0);
2603 rtx sublo = gen_lowpart (word_mode, xop0);
2604 rtx hi0_label = gen_label_rtx ();
2605 rtx after_label = gen_label_rtx ();
2606 rtx seq, temp, result;
2608 /* If we were not given a target, use a word_mode register, not a
2609 'mode' register. The result will fit, and nobody is expecting
2610 anything bigger (the return type of __builtin_clz* is int). */
2612 target = gen_reg_rtx (word_mode);
2614 /* In any case, write to a word_mode scratch in both branches of the
2615 conditional, so we can ensure there is a single move insn setting
2616 'target' to tag a REG_EQUAL note on. */
2617 result = gen_reg_rtx (word_mode);
2621 /* If the high word is not equal to zero,
2622 then clz of the full value is clz of the high word. */
2623 emit_cmp_and_jump_insns (subhi, CONST0_RTX (word_mode), EQ, 0,
2624 word_mode, true, hi0_label);
2626 temp = expand_unop_direct (word_mode, clz_optab, subhi, result, true);
2631 convert_move (result, temp, true);
2633 emit_jump_insn (gen_jump (after_label));
2636 /* Else clz of the full value is clz of the low word plus the number
2637 of bits in the high word. */
2638 emit_label (hi0_label);
2640 temp = expand_unop_direct (word_mode, clz_optab, sublo, 0, true);
2643 temp = expand_binop (word_mode, add_optab, temp,
2644 GEN_INT (GET_MODE_BITSIZE (word_mode)),
2645 result, true, OPTAB_DIRECT);
2649 convert_move (result, temp, true);
2651 emit_label (after_label);
2652 convert_move (target, result, true);
2657 add_equal_note (seq, target, CLZ, xop0, 0);
2669 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2671 widen_bswap (enum machine_mode mode, rtx op0, rtx target)
2673 enum mode_class mclass = GET_MODE_CLASS (mode);
2674 enum machine_mode wider_mode;
2677 if (!CLASS_HAS_WIDER_MODES_P (mclass))
2680 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2681 wider_mode != VOIDmode;
2682 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2683 if (optab_handler (bswap_optab, wider_mode) != CODE_FOR_nothing)
2688 last = get_last_insn ();
2690 x = widen_operand (op0, wider_mode, mode, true, true);
2691 x = expand_unop (wider_mode, bswap_optab, x, NULL_RTX, true);
2694 x = expand_shift (RSHIFT_EXPR, wider_mode, x,
2695 size_int (GET_MODE_BITSIZE (wider_mode)
2696 - GET_MODE_BITSIZE (mode)),
2702 target = gen_reg_rtx (mode);
2703 emit_move_insn (target, gen_lowpart (mode, x));
2706 delete_insns_since (last);
2711 /* Try calculating bswap as two bswaps of two word-sized operands. */
2714 expand_doubleword_bswap (enum machine_mode mode, rtx op, rtx target)
2718 t1 = expand_unop (word_mode, bswap_optab,
2719 operand_subword_force (op, 0, mode), NULL_RTX, true);
2720 t0 = expand_unop (word_mode, bswap_optab,
2721 operand_subword_force (op, 1, mode), NULL_RTX, true);
2724 target = gen_reg_rtx (mode);
2726 emit_clobber (target);
2727 emit_move_insn (operand_subword (target, 0, 1, mode), t0);
2728 emit_move_insn (operand_subword (target, 1, 1, mode), t1);
2733 /* Try calculating (parity x) as (and (popcount x) 1), where
2734 popcount can also be done in a wider mode. */
2736 expand_parity (enum machine_mode mode, rtx op0, rtx target)
2738 enum mode_class mclass = GET_MODE_CLASS (mode);
2739 if (CLASS_HAS_WIDER_MODES_P (mclass))
2741 enum machine_mode wider_mode;
2742 for (wider_mode = mode; wider_mode != VOIDmode;
2743 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2745 if (optab_handler (popcount_optab, wider_mode) != CODE_FOR_nothing)
2747 rtx xop0, temp, last;
2749 last = get_last_insn ();
2752 target = gen_reg_rtx (mode);
2753 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2754 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2757 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2758 target, true, OPTAB_DIRECT);
2760 delete_insns_since (last);
2769 /* Try calculating ctz(x) as K - clz(x & -x) ,
2770 where K is GET_MODE_BITSIZE(mode) - 1.
2772 Both __builtin_ctz and __builtin_clz are undefined at zero, so we
2773 don't have to worry about what the hardware does in that case. (If
2774 the clz instruction produces the usual value at 0, which is K, the
2775 result of this code sequence will be -1; expand_ffs, below, relies
2776 on this. It might be nice to have it be K instead, for consistency
2777 with the (very few) processors that provide a ctz with a defined
2778 value, but that would take one more instruction, and it would be
2779 less convenient for expand_ffs anyway. */
2782 expand_ctz (enum machine_mode mode, rtx op0, rtx target)
2786 if (optab_handler (clz_optab, mode) == CODE_FOR_nothing)
2791 temp = expand_unop_direct (mode, neg_optab, op0, NULL_RTX, true);
2793 temp = expand_binop (mode, and_optab, op0, temp, NULL_RTX,
2794 true, OPTAB_DIRECT);
2796 temp = expand_unop_direct (mode, clz_optab, temp, NULL_RTX, true);
2798 temp = expand_binop (mode, sub_optab, GEN_INT (GET_MODE_BITSIZE (mode) - 1),
2800 true, OPTAB_DIRECT);
2810 add_equal_note (seq, temp, CTZ, op0, 0);
2816 /* Try calculating ffs(x) using ctz(x) if we have that instruction, or
2817 else with the sequence used by expand_clz.
2819 The ffs builtin promises to return zero for a zero value and ctz/clz
2820 may have an undefined value in that case. If they do not give us a
2821 convenient value, we have to generate a test and branch. */
2823 expand_ffs (enum machine_mode mode, rtx op0, rtx target)
2825 HOST_WIDE_INT val = 0;
2826 bool defined_at_zero = false;
2829 if (optab_handler (ctz_optab, mode) != CODE_FOR_nothing)
2833 temp = expand_unop_direct (mode, ctz_optab, op0, 0, true);
2837 defined_at_zero = (CTZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2);
2839 else if (optab_handler (clz_optab, mode) != CODE_FOR_nothing)
2842 temp = expand_ctz (mode, op0, 0);
2846 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2)
2848 defined_at_zero = true;
2849 val = (GET_MODE_BITSIZE (mode) - 1) - val;
2855 if (defined_at_zero && val == -1)
2856 /* No correction needed at zero. */;
2859 /* We don't try to do anything clever with the situation found
2860 on some processors (eg Alpha) where ctz(0:mode) ==
2861 bitsize(mode). If someone can think of a way to send N to -1
2862 and leave alone all values in the range 0..N-1 (where N is a
2863 power of two), cheaper than this test-and-branch, please add it.
2865 The test-and-branch is done after the operation itself, in case
2866 the operation sets condition codes that can be recycled for this.
2867 (This is true on i386, for instance.) */
2869 rtx nonzero_label = gen_label_rtx ();
2870 emit_cmp_and_jump_insns (op0, CONST0_RTX (mode), NE, 0,
2871 mode, true, nonzero_label);
2873 convert_move (temp, GEN_INT (-1), false);
2874 emit_label (nonzero_label);
2877 /* temp now has a value in the range -1..bitsize-1. ffs is supposed
2878 to produce a value in the range 0..bitsize. */
2879 temp = expand_binop (mode, add_optab, temp, GEN_INT (1),
2880 target, false, OPTAB_DIRECT);
2887 add_equal_note (seq, temp, FFS, op0, 0);
2896 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2897 conditions, VAL may already be a SUBREG against which we cannot generate
2898 a further SUBREG. In this case, we expect forcing the value into a
2899 register will work around the situation. */
2902 lowpart_subreg_maybe_copy (enum machine_mode omode, rtx val,
2903 enum machine_mode imode)
2906 ret = lowpart_subreg (omode, val, imode);
2909 val = force_reg (imode, val);
2910 ret = lowpart_subreg (omode, val, imode);
2911 gcc_assert (ret != NULL);
2916 /* Expand a floating point absolute value or negation operation via a
2917 logical operation on the sign bit. */
2920 expand_absneg_bit (enum rtx_code code, enum machine_mode mode,
2921 rtx op0, rtx target)
2923 const struct real_format *fmt;
2924 int bitpos, word, nwords, i;
2925 enum machine_mode imode;
2929 /* The format has to have a simple sign bit. */
2930 fmt = REAL_MODE_FORMAT (mode);
2934 bitpos = fmt->signbit_rw;
2938 /* Don't create negative zeros if the format doesn't support them. */
2939 if (code == NEG && !fmt->has_signed_zero)
2942 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2944 imode = int_mode_for_mode (mode);
2945 if (imode == BLKmode)
2954 if (FLOAT_WORDS_BIG_ENDIAN)
2955 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2957 word = bitpos / BITS_PER_WORD;
2958 bitpos = bitpos % BITS_PER_WORD;
2959 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2962 mask = double_int_setbit (double_int_zero, bitpos);
2964 mask = double_int_not (mask);
2966 if (target == 0 || target == op0)
2967 target = gen_reg_rtx (mode);
2973 for (i = 0; i < nwords; ++i)
2975 rtx targ_piece = operand_subword (target, i, 1, mode);
2976 rtx op0_piece = operand_subword_force (op0, i, mode);
2980 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2982 immed_double_int_const (mask, imode),
2983 targ_piece, 1, OPTAB_LIB_WIDEN);
2984 if (temp != targ_piece)
2985 emit_move_insn (targ_piece, temp);
2988 emit_move_insn (targ_piece, op0_piece);
2991 insns = get_insns ();
2998 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2999 gen_lowpart (imode, op0),
3000 immed_double_int_const (mask, imode),
3001 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3002 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3004 set_unique_reg_note (get_last_insn (), REG_EQUAL,
3005 gen_rtx_fmt_e (code, mode, copy_rtx (op0)));
3011 /* As expand_unop, but will fail rather than attempt the operation in a
3012 different mode or with a libcall. */
3014 expand_unop_direct (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
3017 if (optab_handler (unoptab, mode) != CODE_FOR_nothing)
3019 int icode = (int) optab_handler (unoptab, mode);
3020 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
3022 rtx last = get_last_insn ();
3028 temp = gen_reg_rtx (mode);
3030 if (GET_MODE (xop0) != VOIDmode
3031 && GET_MODE (xop0) != mode0)
3032 xop0 = convert_to_mode (mode0, xop0, unsignedp);
3034 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
3036 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
3037 xop0 = copy_to_mode_reg (mode0, xop0);
3039 if (!insn_data[icode].operand[0].predicate (temp, mode))
3040 temp = gen_reg_rtx (mode);
3042 pat = GEN_FCN (icode) (temp, xop0);
3045 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
3046 && ! add_equal_note (pat, temp, unoptab->code, xop0, NULL_RTX))
3048 delete_insns_since (last);
3049 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
3057 delete_insns_since (last);
3062 /* Generate code to perform an operation specified by UNOPTAB
3063 on operand OP0, with result having machine-mode MODE.
3065 UNSIGNEDP is for the case where we have to widen the operands
3066 to perform the operation. It says to use zero-extension.
3068 If TARGET is nonzero, the value
3069 is generated there, if it is convenient to do so.
3070 In all cases an rtx is returned for the locus of the value;
3071 this may or may not be TARGET. */
3074 expand_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
3077 enum mode_class mclass = GET_MODE_CLASS (mode);
3078 enum machine_mode wider_mode;
3082 temp = expand_unop_direct (mode, unoptab, op0, target, unsignedp);
3086 /* It can't be done in this mode. Can we open-code it in a wider mode? */
3088 /* Widening (or narrowing) clz needs special treatment. */
3089 if (unoptab == clz_optab)
3091 temp = widen_clz (mode, op0, target);
3095 if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
3096 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
3098 temp = expand_doubleword_clz (mode, op0, target);
3106 /* Widening (or narrowing) bswap needs special treatment. */
3107 if (unoptab == bswap_optab)
3109 temp = widen_bswap (mode, op0, target);
3113 if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
3114 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
3116 temp = expand_doubleword_bswap (mode, op0, target);
3124 if (CLASS_HAS_WIDER_MODES_P (mclass))
3125 for (wider_mode = GET_MODE_WIDER_MODE (mode);
3126 wider_mode != VOIDmode;
3127 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
3129 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
3132 rtx last = get_last_insn ();
3134 /* For certain operations, we need not actually extend
3135 the narrow operand, as long as we will truncate the
3136 results to the same narrowness. */
3138 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
3139 (unoptab == neg_optab
3140 || unoptab == one_cmpl_optab)
3141 && mclass == MODE_INT);
3143 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
3148 if (mclass != MODE_INT
3149 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
3150 GET_MODE_BITSIZE (wider_mode)))
3153 target = gen_reg_rtx (mode);
3154 convert_move (target, temp, 0);
3158 return gen_lowpart (mode, temp);
3161 delete_insns_since (last);
3165 /* These can be done a word at a time. */
3166 if (unoptab == one_cmpl_optab
3167 && mclass == MODE_INT
3168 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
3169 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
3174 if (target == 0 || target == op0)
3175 target = gen_reg_rtx (mode);
3179 /* Do the actual arithmetic. */
3180 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
3182 rtx target_piece = operand_subword (target, i, 1, mode);
3183 rtx x = expand_unop (word_mode, unoptab,
3184 operand_subword_force (op0, i, mode),
3185 target_piece, unsignedp);
3187 if (target_piece != x)
3188 emit_move_insn (target_piece, x);
3191 insns = get_insns ();
3198 if (unoptab->code == NEG)
3200 /* Try negating floating point values by flipping the sign bit. */
3201 if (SCALAR_FLOAT_MODE_P (mode))
3203 temp = expand_absneg_bit (NEG, mode, op0, target);
3208 /* If there is no negation pattern, and we have no negative zero,
3209 try subtracting from zero. */
3210 if (!HONOR_SIGNED_ZEROS (mode))
3212 temp = expand_binop (mode, (unoptab == negv_optab
3213 ? subv_optab : sub_optab),
3214 CONST0_RTX (mode), op0, target,
3215 unsignedp, OPTAB_DIRECT);
3221 /* Try calculating parity (x) as popcount (x) % 2. */
3222 if (unoptab == parity_optab)
3224 temp = expand_parity (mode, op0, target);
3229 /* Try implementing ffs (x) in terms of clz (x). */
3230 if (unoptab == ffs_optab)
3232 temp = expand_ffs (mode, op0, target);
3237 /* Try implementing ctz (x) in terms of clz (x). */
3238 if (unoptab == ctz_optab)
3240 temp = expand_ctz (mode, op0, target);
3246 /* Now try a library call in this mode. */
3247 libfunc = optab_libfunc (unoptab, mode);
3253 enum machine_mode outmode = mode;
3255 /* All of these functions return small values. Thus we choose to
3256 have them return something that isn't a double-word. */
3257 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
3258 || unoptab == popcount_optab || unoptab == parity_optab)
3260 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node),
3261 optab_libfunc (unoptab, mode)));
3265 /* Pass 1 for NO_QUEUE so we don't lose any increments
3266 if the libcall is cse'd or moved. */
3267 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, outmode,
3269 insns = get_insns ();
3272 target = gen_reg_rtx (outmode);
3273 eq_value = gen_rtx_fmt_e (unoptab->code, mode, op0);
3274 if (GET_MODE_SIZE (outmode) < GET_MODE_SIZE (mode))
3275 eq_value = simplify_gen_unary (TRUNCATE, outmode, eq_value, mode);
3276 else if (GET_MODE_SIZE (outmode) > GET_MODE_SIZE (mode))
3277 eq_value = simplify_gen_unary (ZERO_EXTEND, outmode, eq_value, mode);
3278 emit_libcall_block (insns, target, value, eq_value);
3283 /* It can't be done in this mode. Can we do it in a wider mode? */
3285 if (CLASS_HAS_WIDER_MODES_P (mclass))
3287 for (wider_mode = GET_MODE_WIDER_MODE (mode);
3288 wider_mode != VOIDmode;
3289 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
3291 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing
3292 || optab_libfunc (unoptab, wider_mode))
3295 rtx last = get_last_insn ();
3297 /* For certain operations, we need not actually extend
3298 the narrow operand, as long as we will truncate the
3299 results to the same narrowness. */
3301 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
3302 (unoptab == neg_optab
3303 || unoptab == one_cmpl_optab)
3304 && mclass == MODE_INT);
3306 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
3309 /* If we are generating clz using wider mode, adjust the
3311 if (unoptab == clz_optab && temp != 0)
3312 temp = expand_binop (wider_mode, sub_optab, temp,
3313 GEN_INT (GET_MODE_BITSIZE (wider_mode)
3314 - GET_MODE_BITSIZE (mode)),
3315 target, true, OPTAB_DIRECT);
3319 if (mclass != MODE_INT)
3322 target = gen_reg_rtx (mode);
3323 convert_move (target, temp, 0);
3327 return gen_lowpart (mode, temp);
3330 delete_insns_since (last);
3335 /* One final attempt at implementing negation via subtraction,
3336 this time allowing widening of the operand. */
3337 if (unoptab->code == NEG && !HONOR_SIGNED_ZEROS (mode))
3340 temp = expand_binop (mode,
3341 unoptab == negv_optab ? subv_optab : sub_optab,
3342 CONST0_RTX (mode), op0,
3343 target, unsignedp, OPTAB_LIB_WIDEN);
3351 /* Emit code to compute the absolute value of OP0, with result to
3352 TARGET if convenient. (TARGET may be 0.) The return value says
3353 where the result actually is to be found.
3355 MODE is the mode of the operand; the mode of the result is
3356 different but can be deduced from MODE.
3361 expand_abs_nojump (enum machine_mode mode, rtx op0, rtx target,
3362 int result_unsignedp)
3367 result_unsignedp = 1;
3369 /* First try to do it with a special abs instruction. */
3370 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
3375 /* For floating point modes, try clearing the sign bit. */
3376 if (SCALAR_FLOAT_MODE_P (mode))
3378 temp = expand_absneg_bit (ABS, mode, op0, target);
3383 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3384 if (optab_handler (smax_optab, mode) != CODE_FOR_nothing
3385 && !HONOR_SIGNED_ZEROS (mode))
3387 rtx last = get_last_insn ();
3389 temp = expand_unop (mode, neg_optab, op0, NULL_RTX, 0);
3391 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3397 delete_insns_since (last);
3400 /* If this machine has expensive jumps, we can do integer absolute
3401 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3402 where W is the width of MODE. */
3404 if (GET_MODE_CLASS (mode) == MODE_INT
3405 && BRANCH_COST (optimize_insn_for_speed_p (),
3408 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
3409 size_int (GET_MODE_BITSIZE (mode) - 1),
3412 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
3415 temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab,
3416 temp, extended, target, 0, OPTAB_LIB_WIDEN);
3426 expand_abs (enum machine_mode mode, rtx op0, rtx target,
3427 int result_unsignedp, int safe)
3432 result_unsignedp = 1;
3434 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
3438 /* If that does not win, use conditional jump and negate. */
3440 /* It is safe to use the target if it is the same
3441 as the source if this is also a pseudo register */
3442 if (op0 == target && REG_P (op0)
3443 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
3446 op1 = gen_label_rtx ();
3447 if (target == 0 || ! safe
3448 || GET_MODE (target) != mode
3449 || (MEM_P (target) && MEM_VOLATILE_P (target))
3451 && REGNO (target) < FIRST_PSEUDO_REGISTER))
3452 target = gen_reg_rtx (mode);
3454 emit_move_insn (target, op0);
3457 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
3458 NULL_RTX, NULL_RTX, op1, -1);
3460 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3463 emit_move_insn (target, op0);
3469 /* Emit code to compute the one's complement absolute value of OP0
3470 (if (OP0 < 0) OP0 = ~OP0), with result to TARGET if convenient.
3471 (TARGET may be NULL_RTX.) The return value says where the result
3472 actually is to be found.
3474 MODE is the mode of the operand; the mode of the result is
3475 different but can be deduced from MODE. */
3478 expand_one_cmpl_abs_nojump (enum machine_mode mode, rtx op0, rtx target)
3482 /* Not applicable for floating point modes. */
3483 if (FLOAT_MODE_P (mode))
3486 /* If we have a MAX insn, we can do this as MAX (x, ~x). */
3487 if (optab_handler (smax_optab, mode) != CODE_FOR_nothing)
3489 rtx last = get_last_insn ();
3491 temp = expand_unop (mode, one_cmpl_optab, op0, NULL_RTX, 0);
3493 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3499 delete_insns_since (last);
3502 /* If this machine has expensive jumps, we can do one's complement
3503 absolute value of X as (((signed) x >> (W-1)) ^ x). */
3505 if (GET_MODE_CLASS (mode) == MODE_INT
3506 && BRANCH_COST (optimize_insn_for_speed_p (),
3509 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
3510 size_int (GET_MODE_BITSIZE (mode) - 1),
3513 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
3523 /* A subroutine of expand_copysign, perform the copysign operation using the
3524 abs and neg primitives advertised to exist on the target. The assumption
3525 is that we have a split register file, and leaving op0 in fp registers,
3526 and not playing with subregs so much, will help the register allocator. */
3529 expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3530 int bitpos, bool op0_is_abs)
3532 enum machine_mode imode;
3539 /* Check if the back end provides an insn that handles signbit for the
3541 icode = (int) optab_handler (signbit_optab, mode);
3542 if (icode != CODE_FOR_nothing)
3544 imode = insn_data[icode].operand[0].mode;
3545 sign = gen_reg_rtx (imode);
3546 emit_unop_insn (icode, sign, op1, UNKNOWN);
3552 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3554 imode = int_mode_for_mode (mode);
3555 if (imode == BLKmode)
3557 op1 = gen_lowpart (imode, op1);
3564 if (FLOAT_WORDS_BIG_ENDIAN)
3565 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3567 word = bitpos / BITS_PER_WORD;
3568 bitpos = bitpos % BITS_PER_WORD;
3569 op1 = operand_subword_force (op1, word, mode);
3572 mask = double_int_setbit (double_int_zero, bitpos);
3574 sign = expand_binop (imode, and_optab, op1,
3575 immed_double_int_const (mask, imode),
3576 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3581 op0 = expand_unop (mode, abs_optab, op0, target, 0);
3588 if (target == NULL_RTX)
3589 target = copy_to_reg (op0);
3591 emit_move_insn (target, op0);
3594 label = gen_label_rtx ();
3595 emit_cmp_and_jump_insns (sign, const0_rtx, EQ, NULL_RTX, imode, 1, label);
3597 if (GET_CODE (op0) == CONST_DOUBLE)
3598 op0 = simplify_unary_operation (NEG, mode, op0, mode);
3600 op0 = expand_unop (mode, neg_optab, op0, target, 0);
3602 emit_move_insn (target, op0);
3610 /* A subroutine of expand_copysign, perform the entire copysign operation
3611 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3612 is true if op0 is known to have its sign bit clear. */
3615 expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3616 int bitpos, bool op0_is_abs)
3618 enum machine_mode imode;
3620 int word, nwords, i;
3623 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3625 imode = int_mode_for_mode (mode);
3626 if (imode == BLKmode)
3635 if (FLOAT_WORDS_BIG_ENDIAN)
3636 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3638 word = bitpos / BITS_PER_WORD;
3639 bitpos = bitpos % BITS_PER_WORD;
3640 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
3643 mask = double_int_setbit (double_int_zero, bitpos);
3645 if (target == 0 || target == op0 || target == op1)
3646 target = gen_reg_rtx (mode);
3652 for (i = 0; i < nwords; ++i)
3654 rtx targ_piece = operand_subword (target, i, 1, mode);
3655 rtx op0_piece = operand_subword_force (op0, i, mode);
3661 = expand_binop (imode, and_optab, op0_piece,
3662 immed_double_int_const (double_int_not (mask),
3664 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3666 op1 = expand_binop (imode, and_optab,
3667 operand_subword_force (op1, i, mode),
3668 immed_double_int_const (mask, imode),
3669 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3671 temp = expand_binop (imode, ior_optab, op0_piece, op1,
3672 targ_piece, 1, OPTAB_LIB_WIDEN);
3673 if (temp != targ_piece)
3674 emit_move_insn (targ_piece, temp);
3677 emit_move_insn (targ_piece, op0_piece);
3680 insns = get_insns ();
3687 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
3688 immed_double_int_const (mask, imode),
3689 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3691 op0 = gen_lowpart (imode, op0);
3693 op0 = expand_binop (imode, and_optab, op0,
3694 immed_double_int_const (double_int_not (mask),
3696 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3698 temp = expand_binop (imode, ior_optab, op0, op1,
3699 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3700 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3706 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3707 scalar floating point mode. Return NULL if we do not know how to
3708 expand the operation inline. */
3711 expand_copysign (rtx op0, rtx op1, rtx target)
3713 enum machine_mode mode = GET_MODE (op0);
3714 const struct real_format *fmt;
3718 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3719 gcc_assert (GET_MODE (op1) == mode);
3721 /* First try to do it with a special instruction. */
3722 temp = expand_binop (mode, copysign_optab, op0, op1,
3723 target, 0, OPTAB_DIRECT);
3727 fmt = REAL_MODE_FORMAT (mode);
3728 if (fmt == NULL || !fmt->has_signed_zero)
3732 if (GET_CODE (op0) == CONST_DOUBLE)
3734 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
3735 op0 = simplify_unary_operation (ABS, mode, op0, mode);
3739 if (fmt->signbit_ro >= 0
3740 && (GET_CODE (op0) == CONST_DOUBLE
3741 || (optab_handler (neg_optab, mode) != CODE_FOR_nothing
3742 && optab_handler (abs_optab, mode) != CODE_FOR_nothing)))
3744 temp = expand_copysign_absneg (mode, op0, op1, target,
3745 fmt->signbit_ro, op0_is_abs);
3750 if (fmt->signbit_rw < 0)
3752 return expand_copysign_bit (mode, op0, op1, target,
3753 fmt->signbit_rw, op0_is_abs);
3756 /* Generate an instruction whose insn-code is INSN_CODE,
3757 with two operands: an output TARGET and an input OP0.
3758 TARGET *must* be nonzero, and the output is always stored there.
3759 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3760 the value that is stored into TARGET.
3762 Return false if expansion failed. */
3765 maybe_emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code)
3768 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
3770 rtx last = get_last_insn ();
3774 /* Now, if insn does not accept our operands, put them into pseudos. */
3776 if (!insn_data[icode].operand[1].predicate (op0, mode0))
3777 op0 = copy_to_mode_reg (mode0, op0);
3779 if (!insn_data[icode].operand[0].predicate (temp, GET_MODE (temp)))
3780 temp = gen_reg_rtx (GET_MODE (temp));
3782 pat = GEN_FCN (icode) (temp, op0);
3785 delete_insns_since (last);
3789 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && code != UNKNOWN)
3790 add_equal_note (pat, temp, code, op0, NULL_RTX);
3795 emit_move_insn (target, temp);
3798 /* Generate an instruction whose insn-code is INSN_CODE,
3799 with two operands: an output TARGET and an input OP0.
3800 TARGET *must* be nonzero, and the output is always stored there.
3801 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3802 the value that is stored into TARGET. */
3805 emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code)
3807 bool ok = maybe_emit_unop_insn (icode, target, op0, code);
3811 struct no_conflict_data
3813 rtx target, first, insn;
3817 /* Called via note_stores by emit_libcall_block. Set P->must_stay if
3818 the currently examined clobber / store has to stay in the list of
3819 insns that constitute the actual libcall block. */
3821 no_conflict_move_test (rtx dest, const_rtx set, void *p0)
3823 struct no_conflict_data *p= (struct no_conflict_data *) p0;
3825 /* If this inns directly contributes to setting the target, it must stay. */
3826 if (reg_overlap_mentioned_p (p->target, dest))
3827 p->must_stay = true;
3828 /* If we haven't committed to keeping any other insns in the list yet,
3829 there is nothing more to check. */
3830 else if (p->insn == p->first)
3832 /* If this insn sets / clobbers a register that feeds one of the insns
3833 already in the list, this insn has to stay too. */
3834 else if (reg_overlap_mentioned_p (dest, PATTERN (p->first))
3835 || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest)))
3836 || reg_used_between_p (dest, p->first, p->insn)
3837 /* Likewise if this insn depends on a register set by a previous
3838 insn in the list, or if it sets a result (presumably a hard
3839 register) that is set or clobbered by a previous insn.
3840 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3841 SET_DEST perform the former check on the address, and the latter
3842 check on the MEM. */
3843 || (GET_CODE (set) == SET
3844 && (modified_in_p (SET_SRC (set), p->first)
3845 || modified_in_p (SET_DEST (set), p->first)
3846 || modified_between_p (SET_SRC (set), p->first, p->insn)
3847 || modified_between_p (SET_DEST (set), p->first, p->insn))))
3848 p->must_stay = true;
3852 /* Emit code to make a call to a constant function or a library call.
3854 INSNS is a list containing all insns emitted in the call.
3855 These insns leave the result in RESULT. Our block is to copy RESULT
3856 to TARGET, which is logically equivalent to EQUIV.
3858 We first emit any insns that set a pseudo on the assumption that these are
3859 loading constants into registers; doing so allows them to be safely cse'ed
3860 between blocks. Then we emit all the other insns in the block, followed by
3861 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3862 note with an operand of EQUIV. */
3865 emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv)
3867 rtx final_dest = target;
3868 rtx next, last, insn;
3870 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3871 into a MEM later. Protect the libcall block from this change. */
3872 if (! REG_P (target) || REG_USERVAR_P (target))
3873 target = gen_reg_rtx (GET_MODE (target));
3875 /* If we're using non-call exceptions, a libcall corresponding to an
3876 operation that may trap may also trap. */
3877 /* ??? See the comment in front of make_reg_eh_region_note. */
3878 if (cfun->can_throw_non_call_exceptions && may_trap_p (equiv))
3880 for (insn = insns; insn; insn = NEXT_INSN (insn))
3883 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3886 int lp_nr = INTVAL (XEXP (note, 0));
3887 if (lp_nr == 0 || lp_nr == INT_MIN)
3888 remove_note (insn, note);
3894 /* Look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3895 reg note to indicate that this call cannot throw or execute a nonlocal
3896 goto (unless there is already a REG_EH_REGION note, in which case
3898 for (insn = insns; insn; insn = NEXT_INSN (insn))
3900 make_reg_eh_region_note_nothrow_nononlocal (insn);
3903 /* First emit all insns that set pseudos. Remove them from the list as
3904 we go. Avoid insns that set pseudos which were referenced in previous
3905 insns. These can be generated by move_by_pieces, for example,
3906 to update an address. Similarly, avoid insns that reference things
3907 set in previous insns. */
3909 for (insn = insns; insn; insn = next)
3911 rtx set = single_set (insn);
3913 next = NEXT_INSN (insn);
3915 if (set != 0 && REG_P (SET_DEST (set))
3916 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3918 struct no_conflict_data data;
3920 data.target = const0_rtx;
3924 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3925 if (! data.must_stay)
3927 if (PREV_INSN (insn))
3928 NEXT_INSN (PREV_INSN (insn)) = next;
3933 PREV_INSN (next) = PREV_INSN (insn);
3939 /* Some ports use a loop to copy large arguments onto the stack.
3940 Don't move anything outside such a loop. */
3945 /* Write the remaining insns followed by the final copy. */
3946 for (insn = insns; insn; insn = next)
3948 next = NEXT_INSN (insn);
3953 last = emit_move_insn (target, result);
3954 if (optab_handler (mov_optab, GET_MODE (target)) != CODE_FOR_nothing)
3955 set_unique_reg_note (last, REG_EQUAL, copy_rtx (equiv));
3957 if (final_dest != target)
3958 emit_move_insn (final_dest, target);
3961 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3962 PURPOSE describes how this comparison will be used. CODE is the rtx
3963 comparison code we will be using.
3965 ??? Actually, CODE is slightly weaker than that. A target is still
3966 required to implement all of the normal bcc operations, but not
3967 required to implement all (or any) of the unordered bcc operations. */
3970 can_compare_p (enum rtx_code code, enum machine_mode mode,
3971 enum can_compare_purpose purpose)
3974 test = gen_rtx_fmt_ee (code, mode, const0_rtx, const0_rtx);
3979 if (purpose == ccp_jump
3980 && (icode = optab_handler (cbranch_optab, mode)) != CODE_FOR_nothing
3981 && insn_data[icode].operand[0].predicate (test, mode))
3983 if (purpose == ccp_store_flag
3984 && (icode = optab_handler (cstore_optab, mode)) != CODE_FOR_nothing
3985 && insn_data[icode].operand[1].predicate (test, mode))
3987 if (purpose == ccp_cmov
3988 && optab_handler (cmov_optab, mode) != CODE_FOR_nothing)
3991 mode = GET_MODE_WIDER_MODE (mode);
3992 PUT_MODE (test, mode);
3994 while (mode != VOIDmode);
3999 /* This function is called when we are going to emit a compare instruction that
4000 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
4002 *PMODE is the mode of the inputs (in case they are const_int).
4003 *PUNSIGNEDP nonzero says that the operands are unsigned;
4004 this matters if they need to be widened (as given by METHODS).
4006 If they have mode BLKmode, then SIZE specifies the size of both operands.
4008 This function performs all the setup necessary so that the caller only has
4009 to emit a single comparison insn. This setup can involve doing a BLKmode
4010 comparison or emitting a library call to perform the comparison if no insn
4011 is available to handle it.
4012 The values which are passed in through pointers can be modified; the caller
4013 should perform the comparison on the modified values. Constant
4014 comparisons must have already been folded. */
4017 prepare_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
4018 int unsignedp, enum optab_methods methods,
4019 rtx *ptest, enum machine_mode *pmode)
4021 enum machine_mode mode = *pmode;
4023 enum machine_mode cmp_mode;
4024 enum mode_class mclass;
4026 /* The other methods are not needed. */
4027 gcc_assert (methods == OPTAB_DIRECT || methods == OPTAB_WIDEN
4028 || methods == OPTAB_LIB_WIDEN);
4030 /* If we are optimizing, force expensive constants into a register. */
4031 if (CONSTANT_P (x) && optimize
4032 && (rtx_cost (x, COMPARE, optimize_insn_for_speed_p ())
4033 > COSTS_N_INSNS (1)))
4034 x = force_reg (mode, x);
4036 if (CONSTANT_P (y) && optimize
4037 && (rtx_cost (y, COMPARE, optimize_insn_for_speed_p ())
4038 > COSTS_N_INSNS (1)))
4039 y = force_reg (mode, y);
4042 /* Make sure if we have a canonical comparison. The RTL
4043 documentation states that canonical comparisons are required only
4044 for targets which have cc0. */
4045 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
4048 /* Don't let both operands fail to indicate the mode. */
4049 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
4050 x = force_reg (mode, x);
4051 if (mode == VOIDmode)
4052 mode = GET_MODE (x) != VOIDmode ? GET_MODE (x) : GET_MODE (y);
4054 /* Handle all BLKmode compares. */
4056 if (mode == BLKmode)
4058 enum machine_mode result_mode;
4059 enum insn_code cmp_code;
4064 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
4068 /* Try to use a memory block compare insn - either cmpstr
4069 or cmpmem will do. */
4070 for (cmp_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
4071 cmp_mode != VOIDmode;
4072 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode))
4074 cmp_code = cmpmem_optab[cmp_mode];
4075 if (cmp_code == CODE_FOR_nothing)
4076 cmp_code = cmpstr_optab[cmp_mode];
4077 if (cmp_code == CODE_FOR_nothing)
4078 cmp_code = cmpstrn_optab[cmp_mode];
4079 if (cmp_code == CODE_FOR_nothing)
4082 /* Must make sure the size fits the insn's mode. */
4083 if ((CONST_INT_P (size)
4084 && INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode)))
4085 || (GET_MODE_BITSIZE (GET_MODE (size))
4086 > GET_MODE_BITSIZE (cmp_mode)))
4089 result_mode = insn_data[cmp_code].operand[0].mode;
4090 result = gen_reg_rtx (result_mode);
4091 size = convert_to_mode (cmp_mode, size, 1);
4092 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
4094 *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, result, const0_rtx);
4095 *pmode = result_mode;
4099 if (methods != OPTAB_LIB && methods != OPTAB_LIB_WIDEN)
4102 /* Otherwise call a library function, memcmp. */
4103 libfunc = memcmp_libfunc;
4104 length_type = sizetype;
4105 result_mode = TYPE_MODE (integer_type_node);
4106 cmp_mode = TYPE_MODE (length_type);
4107 size = convert_to_mode (TYPE_MODE (length_type), size,
4108 TYPE_UNSIGNED (length_type));
4110 result = emit_library_call_value (libfunc, 0, LCT_PURE,
4116 *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, result, const0_rtx);
4117 *pmode = result_mode;
4121 /* Don't allow operands to the compare to trap, as that can put the
4122 compare and branch in different basic blocks. */
4123 if (cfun->can_throw_non_call_exceptions)
4126 x = force_reg (mode, x);
4128 y = force_reg (mode, y);
4131 if (GET_MODE_CLASS (mode) == MODE_CC)
4133 gcc_assert (can_compare_p (comparison, CCmode, ccp_jump));
4134 *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, x, y);
4138 mclass = GET_MODE_CLASS (mode);
4139 test = gen_rtx_fmt_ee (comparison, VOIDmode, x, y);
4143 enum insn_code icode;
4144 icode = optab_handler (cbranch_optab, cmp_mode);
4145 if (icode != CODE_FOR_nothing
4146 && insn_data[icode].operand[0].predicate (test, VOIDmode))
4148 rtx last = get_last_insn ();
4149 rtx op0 = prepare_operand (icode, x, 1, mode, cmp_mode, unsignedp);
4150 rtx op1 = prepare_operand (icode, y, 2, mode, cmp_mode, unsignedp);
4152 && insn_data[icode].operand[1].predicate
4153 (op0, insn_data[icode].operand[1].mode)
4154 && insn_data[icode].operand[2].predicate
4155 (op1, insn_data[icode].operand[2].mode))
4157 XEXP (test, 0) = op0;
4158 XEXP (test, 1) = op1;
4163 delete_insns_since (last);
4166 if (methods == OPTAB_DIRECT || !CLASS_HAS_WIDER_MODES_P (mclass))
4168 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode);
4170 while (cmp_mode != VOIDmode);
4172 if (methods != OPTAB_LIB_WIDEN)
4175 if (!SCALAR_FLOAT_MODE_P (mode))
4179 /* Handle a libcall just for the mode we are using. */
4180 libfunc = optab_libfunc (cmp_optab, mode);
4181 gcc_assert (libfunc);
4183 /* If we want unsigned, and this mode has a distinct unsigned
4184 comparison routine, use that. */
4187 rtx ulibfunc = optab_libfunc (ucmp_optab, mode);
4192 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4193 targetm.libgcc_cmp_return_mode (),
4194 2, x, mode, y, mode);
4196 /* There are two kinds of comparison routines. Biased routines
4197 return 0/1/2, and unbiased routines return -1/0/1. Other parts
4198 of gcc expect that the comparison operation is equivalent
4199 to the modified comparison. For signed comparisons compare the
4200 result against 1 in the biased case, and zero in the unbiased
4201 case. For unsigned comparisons always compare against 1 after
4202 biasing the unbiased result by adding 1. This gives us a way to
4207 if (!TARGET_LIB_INT_CMP_BIASED)
4210 x = plus_constant (result, 1);
4216 prepare_cmp_insn (x, y, comparison, NULL_RTX, unsignedp, methods,
4220 prepare_float_lib_cmp (x, y, comparison, ptest, pmode);
4228 /* Before emitting an insn with code ICODE, make sure that X, which is going
4229 to be used for operand OPNUM of the insn, is converted from mode MODE to
4230 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
4231 that it is accepted by the operand predicate. Return the new value. */
4234 prepare_operand (int icode, rtx x, int opnum, enum machine_mode mode,
4235 enum machine_mode wider_mode, int unsignedp)
4237 if (mode != wider_mode)
4238 x = convert_modes (wider_mode, mode, x, unsignedp);
4240 if (!insn_data[icode].operand[opnum].predicate
4241 (x, insn_data[icode].operand[opnum].mode))
4243 if (reload_completed)
4245 x = copy_to_mode_reg (insn_data[icode].operand[opnum].mode, x);
4251 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
4252 we can do the branch. */
4255 emit_cmp_and_jump_insn_1 (rtx test, enum machine_mode mode, rtx label)
4257 enum machine_mode optab_mode;
4258 enum mode_class mclass;
4259 enum insn_code icode;
4261 mclass = GET_MODE_CLASS (mode);
4262 optab_mode = (mclass == MODE_CC) ? CCmode : mode;
4263 icode = optab_handler (cbranch_optab, optab_mode);
4265 gcc_assert (icode != CODE_FOR_nothing);
4266 gcc_assert (insn_data[icode].operand[0].predicate (test, VOIDmode));
4267 emit_jump_insn (GEN_FCN (icode) (test, XEXP (test, 0), XEXP (test, 1), label));
4270 /* Generate code to compare X with Y so that the condition codes are
4271 set and to jump to LABEL if the condition is true. If X is a
4272 constant and Y is not a constant, then the comparison is swapped to
4273 ensure that the comparison RTL has the canonical form.
4275 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
4276 need to be widened. UNSIGNEDP is also used to select the proper
4277 branch condition code.
4279 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4281 MODE is the mode of the inputs (in case they are const_int).
4283 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
4284 It will be potentially converted into an unsigned variant based on
4285 UNSIGNEDP to select a proper jump instruction. */
4288 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
4289 enum machine_mode mode, int unsignedp, rtx label)
4291 rtx op0 = x, op1 = y;
4294 /* Swap operands and condition to ensure canonical RTL. */
4295 if (swap_commutative_operands_p (x, y)
4296 && can_compare_p (swap_condition (comparison), mode, ccp_jump))
4299 comparison = swap_condition (comparison);
4302 /* If OP0 is still a constant, then both X and Y must be constants
4303 or the opposite comparison is not supported. Force X into a register
4304 to create canonical RTL. */
4305 if (CONSTANT_P (op0))
4306 op0 = force_reg (mode, op0);
4309 comparison = unsigned_condition (comparison);
4311 prepare_cmp_insn (op0, op1, comparison, size, unsignedp, OPTAB_LIB_WIDEN,
4313 emit_cmp_and_jump_insn_1 (test, mode, label);
4317 /* Emit a library call comparison between floating point X and Y.
4318 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4321 prepare_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison,
4322 rtx *ptest, enum machine_mode *pmode)
4324 enum rtx_code swapped = swap_condition (comparison);
4325 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
4326 enum machine_mode orig_mode = GET_MODE (x);
4327 enum machine_mode mode, cmp_mode;
4328 rtx true_rtx, false_rtx;
4329 rtx value, target, insns, equiv;
4331 bool reversed_p = false;
4332 cmp_mode = targetm.libgcc_cmp_return_mode ();
4334 for (mode = orig_mode;
4336 mode = GET_MODE_WIDER_MODE (mode))
4338 if (code_to_optab[comparison]
4339 && (libfunc = optab_libfunc (code_to_optab[comparison], mode)))
4342 if (code_to_optab[swapped]
4343 && (libfunc = optab_libfunc (code_to_optab[swapped], mode)))
4346 tmp = x; x = y; y = tmp;
4347 comparison = swapped;
4351 if (code_to_optab[reversed]
4352 && (libfunc = optab_libfunc (code_to_optab[reversed], mode)))
4354 comparison = reversed;
4360 gcc_assert (mode != VOIDmode);
4362 if (mode != orig_mode)
4364 x = convert_to_mode (mode, x, 0);
4365 y = convert_to_mode (mode, y, 0);
4368 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4369 the RTL. The allows the RTL optimizers to delete the libcall if the
4370 condition can be determined at compile-time. */
4371 if (comparison == UNORDERED
4372 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4374 true_rtx = const_true_rtx;
4375 false_rtx = const0_rtx;
4382 true_rtx = const0_rtx;
4383 false_rtx = const_true_rtx;
4387 true_rtx = const_true_rtx;
4388 false_rtx = const0_rtx;
4392 true_rtx = const1_rtx;
4393 false_rtx = const0_rtx;
4397 true_rtx = const0_rtx;
4398 false_rtx = constm1_rtx;
4402 true_rtx = constm1_rtx;
4403 false_rtx = const0_rtx;
4407 true_rtx = const0_rtx;
4408 false_rtx = const1_rtx;
4416 if (comparison == UNORDERED)
4418 rtx temp = simplify_gen_relational (NE, cmp_mode, mode, x, x);
4419 equiv = simplify_gen_relational (NE, cmp_mode, mode, y, y);
4420 equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4421 temp, const_true_rtx, equiv);
4425 equiv = simplify_gen_relational (comparison, cmp_mode, mode, x, y);
4426 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4427 equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4428 equiv, true_rtx, false_rtx);
4432 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4433 cmp_mode, 2, x, mode, y, mode);
4434 insns = get_insns ();
4437 target = gen_reg_rtx (cmp_mode);
4438 emit_libcall_block (insns, target, value, equiv);
4440 if (comparison == UNORDERED
4441 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison)
4443 *ptest = gen_rtx_fmt_ee (reversed_p ? EQ : NE, VOIDmode, target, false_rtx);
4445 *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, target, const0_rtx);
4450 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4453 emit_indirect_jump (rtx loc)
4455 if (!insn_data[(int) CODE_FOR_indirect_jump].operand[0].predicate
4457 loc = copy_to_mode_reg (Pmode, loc);
4459 emit_jump_insn (gen_indirect_jump (loc));
4463 #ifdef HAVE_conditional_move
4465 /* Emit a conditional move instruction if the machine supports one for that
4466 condition and machine mode.
4468 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4469 the mode to use should they be constants. If it is VOIDmode, they cannot
4472 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4473 should be stored there. MODE is the mode to use should they be constants.
4474 If it is VOIDmode, they cannot both be constants.
4476 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4477 is not supported. */
4480 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
4481 enum machine_mode cmode, rtx op2, rtx op3,
4482 enum machine_mode mode, int unsignedp)
4484 rtx tem, subtarget, comparison, insn;
4485 enum insn_code icode;
4486 enum rtx_code reversed;
4488 /* If one operand is constant, make it the second one. Only do this
4489 if the other operand is not constant as well. */
4491 if (swap_commutative_operands_p (op0, op1))
4496 code = swap_condition (code);
4499 /* get_condition will prefer to generate LT and GT even if the old
4500 comparison was against zero, so undo that canonicalization here since
4501 comparisons against zero are cheaper. */
4502 if (code == LT && op1 == const1_rtx)
4503 code = LE, op1 = const0_rtx;
4504 else if (code == GT && op1 == constm1_rtx)
4505 code = GE, op1 = const0_rtx;
4507 if (cmode == VOIDmode)
4508 cmode = GET_MODE (op0);
4510 if (swap_commutative_operands_p (op2, op3)
4511 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4520 if (mode == VOIDmode)
4521 mode = GET_MODE (op2);
4523 icode = movcc_gen_code[mode];
4525 if (icode == CODE_FOR_nothing)
4529 target = gen_reg_rtx (mode);
4533 /* If the insn doesn't accept these operands, put them in pseudos. */
4535 if (!insn_data[icode].operand[0].predicate
4536 (subtarget, insn_data[icode].operand[0].mode))
4537 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4539 if (!insn_data[icode].operand[2].predicate
4540 (op2, insn_data[icode].operand[2].mode))
4541 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4543 if (!insn_data[icode].operand[3].predicate
4544 (op3, insn_data[icode].operand[3].mode))
4545 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4547 /* Everything should now be in the suitable form. */
4549 code = unsignedp ? unsigned_condition (code) : code;
4550 comparison = simplify_gen_relational (code, VOIDmode, cmode, op0, op1);
4552 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4553 return NULL and let the caller figure out how best to deal with this
4555 if (!COMPARISON_P (comparison))
4558 do_pending_stack_adjust ();
4560 prepare_cmp_insn (XEXP (comparison, 0), XEXP (comparison, 1),
4561 GET_CODE (comparison), NULL_RTX, unsignedp, OPTAB_WIDEN,
4562 &comparison, &cmode);
4566 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4568 /* If that failed, then give up. */
4576 insn = get_insns ();
4579 if (subtarget != target)
4580 convert_move (target, subtarget, 0);
4585 /* Return nonzero if a conditional move of mode MODE is supported.
4587 This function is for combine so it can tell whether an insn that looks
4588 like a conditional move is actually supported by the hardware. If we
4589 guess wrong we lose a bit on optimization, but that's it. */
4590 /* ??? sparc64 supports conditionally moving integers values based on fp
4591 comparisons, and vice versa. How do we handle them? */
4594 can_conditionally_move_p (enum machine_mode mode)
4596 if (movcc_gen_code[mode] != CODE_FOR_nothing)
4602 #endif /* HAVE_conditional_move */
4604 /* Emit a conditional addition instruction if the machine supports one for that
4605 condition and machine mode.
4607 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4608 the mode to use should they be constants. If it is VOIDmode, they cannot
4611 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
4612 should be stored there. MODE is the mode to use should they be constants.
4613 If it is VOIDmode, they cannot both be constants.
4615 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4616 is not supported. */
4619 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
4620 enum machine_mode cmode, rtx op2, rtx op3,
4621 enum machine_mode mode, int unsignedp)
4623 rtx tem, subtarget, comparison, insn;
4624 enum insn_code icode;
4625 enum rtx_code reversed;
4627 /* If one operand is constant, make it the second one. Only do this
4628 if the other operand is not constant as well. */
4630 if (swap_commutative_operands_p (op0, op1))
4635 code = swap_condition (code);
4638 /* get_condition will prefer to generate LT and GT even if the old
4639 comparison was against zero, so undo that canonicalization here since
4640 comparisons against zero are cheaper. */
4641 if (code == LT && op1 == const1_rtx)
4642 code = LE, op1 = const0_rtx;
4643 else if (code == GT && op1 == constm1_rtx)
4644 code = GE, op1 = const0_rtx;
4646 if (cmode == VOIDmode)
4647 cmode = GET_MODE (op0);
4649 if (swap_commutative_operands_p (op2, op3)
4650 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4659 if (mode == VOIDmode)
4660 mode = GET_MODE (op2);
4662 icode = optab_handler (addcc_optab, mode);
4664 if (icode == CODE_FOR_nothing)
4668 target = gen_reg_rtx (mode);
4670 /* If the insn doesn't accept these operands, put them in pseudos. */
4672 if (!insn_data[icode].operand[0].predicate
4673 (target, insn_data[icode].operand[0].mode))
4674 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4678 if (!insn_data[icode].operand[2].predicate
4679 (op2, insn_data[icode].operand[2].mode))
4680 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4682 if (!insn_data[icode].operand[3].predicate
4683 (op3, insn_data[icode].operand[3].mode))
4684 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4686 /* Everything should now be in the suitable form. */
4688 code = unsignedp ? unsigned_condition (code) : code;
4689 comparison = simplify_gen_relational (code, VOIDmode, cmode, op0, op1);
4691 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4692 return NULL and let the caller figure out how best to deal with this
4694 if (!COMPARISON_P (comparison))
4697 do_pending_stack_adjust ();
4699 prepare_cmp_insn (XEXP (comparison, 0), XEXP (comparison, 1),
4700 GET_CODE (comparison), NULL_RTX, unsignedp, OPTAB_WIDEN,
4701 &comparison, &cmode);
4705 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4707 /* If that failed, then give up. */
4715 insn = get_insns ();
4718 if (subtarget != target)
4719 convert_move (target, subtarget, 0);
4724 /* These functions attempt to generate an insn body, rather than
4725 emitting the insn, but if the gen function already emits them, we
4726 make no attempt to turn them back into naked patterns. */
4728 /* Generate and return an insn body to add Y to X. */
4731 gen_add2_insn (rtx x, rtx y)
4733 int icode = (int) optab_handler (add_optab, GET_MODE (x));
4735 gcc_assert (insn_data[icode].operand[0].predicate
4736 (x, insn_data[icode].operand[0].mode));
4737 gcc_assert (insn_data[icode].operand[1].predicate
4738 (x, insn_data[icode].operand[1].mode));
4739 gcc_assert (insn_data[icode].operand[2].predicate
4740 (y, insn_data[icode].operand[2].mode));
4742 return GEN_FCN (icode) (x, x, y);
4745 /* Generate and return an insn body to add r1 and c,
4746 storing the result in r0. */
4749 gen_add3_insn (rtx r0, rtx r1, rtx c)
4751 int icode = (int) optab_handler (add_optab, GET_MODE (r0));
4753 if (icode == CODE_FOR_nothing
4754 || !(insn_data[icode].operand[0].predicate
4755 (r0, insn_data[icode].operand[0].mode))
4756 || !(insn_data[icode].operand[1].predicate
4757 (r1, insn_data[icode].operand[1].mode))
4758 || !(insn_data[icode].operand[2].predicate
4759 (c, insn_data[icode].operand[2].mode)))
4762 return GEN_FCN (icode) (r0, r1, c);
4766 have_add2_insn (rtx x, rtx y)
4770 gcc_assert (GET_MODE (x) != VOIDmode);
4772 icode = (int) optab_handler (add_optab, GET_MODE (x));
4774 if (icode == CODE_FOR_nothing)
4777 if (!(insn_data[icode].operand[0].predicate
4778 (x, insn_data[icode].operand[0].mode))
4779 || !(insn_data[icode].operand[1].predicate
4780 (x, insn_data[icode].operand[1].mode))
4781 || !(insn_data[icode].operand[2].predicate
4782 (y, insn_data[icode].operand[2].mode)))
4788 /* Generate and return an insn body to subtract Y from X. */
4791 gen_sub2_insn (rtx x, rtx y)
4793 int icode = (int) optab_handler (sub_optab, GET_MODE (x));
4795 gcc_assert (insn_data[icode].operand[0].predicate
4796 (x, insn_data[icode].operand[0].mode));
4797 gcc_assert (insn_data[icode].operand[1].predicate
4798 (x, insn_data[icode].operand[1].mode));
4799 gcc_assert (insn_data[icode].operand[2].predicate
4800 (y, insn_data[icode].operand[2].mode));
4802 return GEN_FCN (icode) (x, x, y);
4805 /* Generate and return an insn body to subtract r1 and c,
4806 storing the result in r0. */
4809 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4811 int icode = (int) optab_handler (sub_optab, GET_MODE (r0));
4813 if (icode == CODE_FOR_nothing
4814 || !(insn_data[icode].operand[0].predicate
4815 (r0, insn_data[icode].operand[0].mode))
4816 || !(insn_data[icode].operand[1].predicate
4817 (r1, insn_data[icode].operand[1].mode))
4818 || !(insn_data[icode].operand[2].predicate
4819 (c, insn_data[icode].operand[2].mode)))
4822 return GEN_FCN (icode) (r0, r1, c);
4826 have_sub2_insn (rtx x, rtx y)
4830 gcc_assert (GET_MODE (x) != VOIDmode);
4832 icode = (int) optab_handler (sub_optab, GET_MODE (x));
4834 if (icode == CODE_FOR_nothing)
4837 if (!(insn_data[icode].operand[0].predicate
4838 (x, insn_data[icode].operand[0].mode))
4839 || !(insn_data[icode].operand[1].predicate
4840 (x, insn_data[icode].operand[1].mode))
4841 || !(insn_data[icode].operand[2].predicate
4842 (y, insn_data[icode].operand[2].mode)))
4848 /* Generate the body of an instruction to copy Y into X.
4849 It may be a list of insns, if one insn isn't enough. */
4852 gen_move_insn (rtx x, rtx y)
4857 emit_move_insn_1 (x, y);
4863 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4864 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4865 no such operation exists, CODE_FOR_nothing will be returned. */
4868 can_extend_p (enum machine_mode to_mode, enum machine_mode from_mode,
4872 #ifdef HAVE_ptr_extend
4874 return CODE_FOR_ptr_extend;
4877 tab = unsignedp ? zext_optab : sext_optab;
4878 return convert_optab_handler (tab, to_mode, from_mode);
4881 /* Generate the body of an insn to extend Y (with mode MFROM)
4882 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4885 gen_extend_insn (rtx x, rtx y, enum machine_mode mto,
4886 enum machine_mode mfrom, int unsignedp)
4888 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
4889 return GEN_FCN (icode) (x, y);
4892 /* can_fix_p and can_float_p say whether the target machine
4893 can directly convert a given fixed point type to
4894 a given floating point type, or vice versa.
4895 The returned value is the CODE_FOR_... value to use,
4896 or CODE_FOR_nothing if these modes cannot be directly converted.
4898 *TRUNCP_PTR is set to 1 if it is necessary to output
4899 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4901 static enum insn_code
4902 can_fix_p (enum machine_mode fixmode, enum machine_mode fltmode,
4903 int unsignedp, int *truncp_ptr)
4906 enum insn_code icode;
4908 tab = unsignedp ? ufixtrunc_optab : sfixtrunc_optab;
4909 icode = convert_optab_handler (tab, fixmode, fltmode);
4910 if (icode != CODE_FOR_nothing)
4916 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4917 for this to work. We need to rework the fix* and ftrunc* patterns
4918 and documentation. */
4919 tab = unsignedp ? ufix_optab : sfix_optab;
4920 icode = convert_optab_handler (tab, fixmode, fltmode);
4921 if (icode != CODE_FOR_nothing
4922 && optab_handler (ftrunc_optab, fltmode) != CODE_FOR_nothing)
4929 return CODE_FOR_nothing;
4932 static enum insn_code
4933 can_float_p (enum machine_mode fltmode, enum machine_mode fixmode,
4938 tab = unsignedp ? ufloat_optab : sfloat_optab;
4939 return convert_optab_handler (tab, fltmode, fixmode);
4942 /* Generate code to convert FROM to floating point
4943 and store in TO. FROM must be fixed point and not VOIDmode.
4944 UNSIGNEDP nonzero means regard FROM as unsigned.
4945 Normally this is done by correcting the final value
4946 if it is negative. */
4949 expand_float (rtx to, rtx from, int unsignedp)
4951 enum insn_code icode;
4953 enum machine_mode fmode, imode;
4954 bool can_do_signed = false;
4956 /* Crash now, because we won't be able to decide which mode to use. */
4957 gcc_assert (GET_MODE (from) != VOIDmode);
4959 /* Look for an insn to do the conversion. Do it in the specified
4960 modes if possible; otherwise convert either input, output or both to
4961 wider mode. If the integer mode is wider than the mode of FROM,
4962 we can do the conversion signed even if the input is unsigned. */
4964 for (fmode = GET_MODE (to); fmode != VOIDmode;
4965 fmode = GET_MODE_WIDER_MODE (fmode))
4966 for (imode = GET_MODE (from); imode != VOIDmode;
4967 imode = GET_MODE_WIDER_MODE (imode))
4969 int doing_unsigned = unsignedp;
4971 if (fmode != GET_MODE (to)
4972 && significand_size (fmode) < GET_MODE_BITSIZE (GET_MODE (from)))
4975 icode = can_float_p (fmode, imode, unsignedp);
4976 if (icode == CODE_FOR_nothing && unsignedp)
4978 enum insn_code scode = can_float_p (fmode, imode, 0);
4979 if (scode != CODE_FOR_nothing)
4980 can_do_signed = true;
4981 if (imode != GET_MODE (from))
4982 icode = scode, doing_unsigned = 0;
4985 if (icode != CODE_FOR_nothing)
4987 if (imode != GET_MODE (from))
4988 from = convert_to_mode (imode, from, unsignedp);
4990 if (fmode != GET_MODE (to))
4991 target = gen_reg_rtx (fmode);
4993 emit_unop_insn (icode, target, from,
4994 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
4997 convert_move (to, target, 0);
5002 /* Unsigned integer, and no way to convert directly. Convert as signed,
5003 then unconditionally adjust the result. */
5004 if (unsignedp && can_do_signed)
5006 rtx label = gen_label_rtx ();
5008 REAL_VALUE_TYPE offset;
5010 /* Look for a usable floating mode FMODE wider than the source and at
5011 least as wide as the target. Using FMODE will avoid rounding woes
5012 with unsigned values greater than the signed maximum value. */
5014 for (fmode = GET_MODE (to); fmode != VOIDmode;
5015 fmode = GET_MODE_WIDER_MODE (fmode))
5016 if (GET_MODE_BITSIZE (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
5017 && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
5020 if (fmode == VOIDmode)
5022 /* There is no such mode. Pretend the target is wide enough. */
5023 fmode = GET_MODE (to);
5025 /* Avoid double-rounding when TO is narrower than FROM. */
5026 if ((significand_size (fmode) + 1)
5027 < GET_MODE_BITSIZE (GET_MODE (from)))
5030 rtx neglabel = gen_label_rtx ();
5032 /* Don't use TARGET if it isn't a register, is a hard register,
5033 or is the wrong mode. */
5035 || REGNO (target) < FIRST_PSEUDO_REGISTER
5036 || GET_MODE (target) != fmode)
5037 target = gen_reg_rtx (fmode);
5039 imode = GET_MODE (from);
5040 do_pending_stack_adjust ();
5042 /* Test whether the sign bit is set. */
5043 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
5046 /* The sign bit is not set. Convert as signed. */
5047 expand_float (target, from, 0);
5048 emit_jump_insn (gen_jump (label));
5051 /* The sign bit is set.
5052 Convert to a usable (positive signed) value by shifting right
5053 one bit, while remembering if a nonzero bit was shifted
5054 out; i.e., compute (from & 1) | (from >> 1). */
5056 emit_label (neglabel);
5057 temp = expand_binop (imode, and_optab, from, const1_rtx,
5058 NULL_RTX, 1, OPTAB_LIB_WIDEN);
5059 temp1 = expand_shift (RSHIFT_EXPR, imode, from, integer_one_node,
5061 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
5063 expand_float (target, temp, 0);
5065 /* Multiply by 2 to undo the shift above. */
5066 temp = expand_binop (fmode, add_optab, target, target,
5067 target, 0, OPTAB_LIB_WIDEN);
5069 emit_move_insn (target, temp);
5071 do_pending_stack_adjust ();
5077 /* If we are about to do some arithmetic to correct for an
5078 unsigned operand, do it in a pseudo-register. */
5080 if (GET_MODE (to) != fmode
5081 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
5082 target = gen_reg_rtx (fmode);
5084 /* Convert as signed integer to floating. */
5085 expand_float (target, from, 0);
5087 /* If FROM is negative (and therefore TO is negative),
5088 correct its value by 2**bitwidth. */
5090 do_pending_stack_adjust ();
5091 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from),
5095 real_2expN (&offset, GET_MODE_BITSIZE (GET_MODE (from)), fmode);
5096 temp = expand_binop (fmode, add_optab, target,
5097 CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode),
5098 target, 0, OPTAB_LIB_WIDEN);
5100 emit_move_insn (target, temp);
5102 do_pending_stack_adjust ();
5107 /* No hardware instruction available; call a library routine. */
5112 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
5114 if (GET_MODE_SIZE (GET_MODE (from)) < GET_MODE_SIZE (SImode))
5115 from = convert_to_mode (SImode, from, unsignedp);
5117 libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
5118 gcc_assert (libfunc);
5122 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
5123 GET_MODE (to), 1, from,
5125 insns = get_insns ();
5128 emit_libcall_block (insns, target, value,
5129 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FLOAT : FLOAT,
5130 GET_MODE (to), from));
5135 /* Copy result to requested destination
5136 if we have been computing in a temp location. */
5140 if (GET_MODE (target) == GET_MODE (to))
5141 emit_move_insn (to, target);
5143 convert_move (to, target, 0);
5147 /* Generate code to convert FROM to fixed point and store in TO. FROM
5148 must be floating point. */
5151 expand_fix (rtx to, rtx from, int unsignedp)
5153 enum insn_code icode;
5155 enum machine_mode fmode, imode;
5158 /* We first try to find a pair of modes, one real and one integer, at
5159 least as wide as FROM and TO, respectively, in which we can open-code
5160 this conversion. If the integer mode is wider than the mode of TO,
5161 we can do the conversion either signed or unsigned. */
5163 for (fmode = GET_MODE (from); fmode != VOIDmode;
5164 fmode = GET_MODE_WIDER_MODE (fmode))
5165 for (imode = GET_MODE (to); imode != VOIDmode;
5166 imode = GET_MODE_WIDER_MODE (imode))
5168 int doing_unsigned = unsignedp;
5170 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
5171 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
5172 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
5174 if (icode != CODE_FOR_nothing)
5176 rtx last = get_last_insn ();
5177 if (fmode != GET_MODE (from))
5178 from = convert_to_mode (fmode, from, 0);
5182 rtx temp = gen_reg_rtx (GET_MODE (from));
5183 from = expand_unop (GET_MODE (from), ftrunc_optab, from,
5187 if (imode != GET_MODE (to))
5188 target = gen_reg_rtx (imode);
5190 if (maybe_emit_unop_insn (icode, target, from,
5191 doing_unsigned ? UNSIGNED_FIX : FIX))
5194 convert_move (to, target, unsignedp);
5197 delete_insns_since (last);
5201 /* For an unsigned conversion, there is one more way to do it.
5202 If we have a signed conversion, we generate code that compares
5203 the real value to the largest representable positive number. If if
5204 is smaller, the conversion is done normally. Otherwise, subtract
5205 one plus the highest signed number, convert, and add it back.
5207 We only need to check all real modes, since we know we didn't find
5208 anything with a wider integer mode.
5210 This code used to extend FP value into mode wider than the destination.
5211 This is needed for decimal float modes which cannot accurately
5212 represent one plus the highest signed number of the same size, but
5213 not for binary modes. Consider, for instance conversion from SFmode
5216 The hot path through the code is dealing with inputs smaller than 2^63
5217 and doing just the conversion, so there is no bits to lose.
5219 In the other path we know the value is positive in the range 2^63..2^64-1
5220 inclusive. (as for other input overflow happens and result is undefined)
5221 So we know that the most important bit set in mantissa corresponds to
5222 2^63. The subtraction of 2^63 should not generate any rounding as it
5223 simply clears out that bit. The rest is trivial. */
5225 if (unsignedp && GET_MODE_BITSIZE (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
5226 for (fmode = GET_MODE (from); fmode != VOIDmode;
5227 fmode = GET_MODE_WIDER_MODE (fmode))
5228 if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0, &must_trunc)
5229 && (!DECIMAL_FLOAT_MODE_P (fmode)
5230 || GET_MODE_BITSIZE (fmode) > GET_MODE_BITSIZE (GET_MODE (to))))
5233 REAL_VALUE_TYPE offset;
5234 rtx limit, lab1, lab2, insn;
5236 bitsize = GET_MODE_BITSIZE (GET_MODE (to));
5237 real_2expN (&offset, bitsize - 1, fmode);
5238 limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode);
5239 lab1 = gen_label_rtx ();
5240 lab2 = gen_label_rtx ();
5242 if (fmode != GET_MODE (from))
5243 from = convert_to_mode (fmode, from, 0);
5245 /* See if we need to do the subtraction. */
5246 do_pending_stack_adjust ();
5247 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from),
5250 /* If not, do the signed "fix" and branch around fixup code. */
5251 expand_fix (to, from, 0);
5252 emit_jump_insn (gen_jump (lab2));
5255 /* Otherwise, subtract 2**(N-1), convert to signed number,
5256 then add 2**(N-1). Do the addition using XOR since this
5257 will often generate better code. */
5259 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
5260 NULL_RTX, 0, OPTAB_LIB_WIDEN);
5261 expand_fix (to, target, 0);
5262 target = expand_binop (GET_MODE (to), xor_optab, to,
5264 ((HOST_WIDE_INT) 1 << (bitsize - 1),
5266 to, 1, OPTAB_LIB_WIDEN);
5269 emit_move_insn (to, target);
5273 if (optab_handler (mov_optab, GET_MODE (to)) != CODE_FOR_nothing)
5275 /* Make a place for a REG_NOTE and add it. */
5276 insn = emit_move_insn (to, to);
5277 set_unique_reg_note (insn,
5279 gen_rtx_fmt_e (UNSIGNED_FIX,
5287 /* We can't do it with an insn, so use a library call. But first ensure
5288 that the mode of TO is at least as wide as SImode, since those are the
5289 only library calls we know about. */
5291 if (GET_MODE_SIZE (GET_MODE (to)) < GET_MODE_SIZE (SImode))
5293 target = gen_reg_rtx (SImode);
5295 expand_fix (target, from, unsignedp);
5303 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
5304 libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
5305 gcc_assert (libfunc);
5309 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
5310 GET_MODE (to), 1, from,
5312 insns = get_insns ();
5315 emit_libcall_block (insns, target, value,
5316 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
5317 GET_MODE (to), from));
5322 if (GET_MODE (to) == GET_MODE (target))
5323 emit_move_insn (to, target);
5325 convert_move (to, target, 0);
5329 /* Generate code to convert FROM or TO a fixed-point.
5330 If UINTP is true, either TO or FROM is an unsigned integer.
5331 If SATP is true, we need to saturate the result. */
5334 expand_fixed_convert (rtx to, rtx from, int uintp, int satp)
5336 enum machine_mode to_mode = GET_MODE (to);
5337 enum machine_mode from_mode = GET_MODE (from);
5339 enum rtx_code this_code;
5340 enum insn_code code;
5344 if (to_mode == from_mode)
5346 emit_move_insn (to, from);
5352 tab = satp ? satfractuns_optab : fractuns_optab;
5353 this_code = satp ? UNSIGNED_SAT_FRACT : UNSIGNED_FRACT_CONVERT;
5357 tab = satp ? satfract_optab : fract_optab;
5358 this_code = satp ? SAT_FRACT : FRACT_CONVERT;
5360 code = convert_optab_handler (tab, to_mode, from_mode);
5361 if (code != CODE_FOR_nothing)
5363 emit_unop_insn (code, to, from, this_code);
5367 libfunc = convert_optab_libfunc (tab, to_mode, from_mode);
5368 gcc_assert (libfunc);
5371 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, to_mode,
5372 1, from, from_mode);
5373 insns = get_insns ();
5376 emit_libcall_block (insns, to, value,
5377 gen_rtx_fmt_e (tab->code, to_mode, from));
5380 /* Generate code to convert FROM to fixed point and store in TO. FROM
5381 must be floating point, TO must be signed. Use the conversion optab
5382 TAB to do the conversion. */
5385 expand_sfix_optab (rtx to, rtx from, convert_optab tab)
5387 enum insn_code icode;
5389 enum machine_mode fmode, imode;
5391 /* We first try to find a pair of modes, one real and one integer, at
5392 least as wide as FROM and TO, respectively, in which we can open-code
5393 this conversion. If the integer mode is wider than the mode of TO,
5394 we can do the conversion either signed or unsigned. */
5396 for (fmode = GET_MODE (from); fmode != VOIDmode;
5397 fmode = GET_MODE_WIDER_MODE (fmode))
5398 for (imode = GET_MODE (to); imode != VOIDmode;
5399 imode = GET_MODE_WIDER_MODE (imode))
5401 icode = convert_optab_handler (tab, imode, fmode);
5402 if (icode != CODE_FOR_nothing)
5404 rtx last = get_last_insn ();
5405 if (fmode != GET_MODE (from))
5406 from = convert_to_mode (fmode, from, 0);
5408 if (imode != GET_MODE (to))
5409 target = gen_reg_rtx (imode);
5411 if (!maybe_emit_unop_insn (icode, target, from, UNKNOWN))
5413 delete_insns_since (last);
5417 convert_move (to, target, 0);
5425 /* Report whether we have an instruction to perform the operation
5426 specified by CODE on operands of mode MODE. */
5428 have_insn_for (enum rtx_code code, enum machine_mode mode)
5430 return (code_to_optab[(int) code] != 0
5431 && (optab_handler (code_to_optab[(int) code], mode)
5432 != CODE_FOR_nothing));
5435 /* Set all insn_code fields to CODE_FOR_nothing. */
5438 init_insn_codes (void)
5440 memset (optab_table, 0, sizeof (optab_table));
5441 memset (convert_optab_table, 0, sizeof (convert_optab_table));
5444 /* Initialize OP's code to CODE, and write it into the code_to_optab table. */
5446 init_optab (optab op, enum rtx_code code)
5449 code_to_optab[(int) code] = op;
5452 /* Same, but fill in its code as CODE, and do _not_ write it into
5453 the code_to_optab table. */
5455 init_optabv (optab op, enum rtx_code code)
5460 /* Conversion optabs never go in the code_to_optab table. */
5462 init_convert_optab (convert_optab op, enum rtx_code code)
5467 /* Initialize the libfunc fields of an entire group of entries in some
5468 optab. Each entry is set equal to a string consisting of a leading
5469 pair of underscores followed by a generic operation name followed by
5470 a mode name (downshifted to lowercase) followed by a single character
5471 representing the number of operands for the given operation (which is
5472 usually one of the characters '2', '3', or '4').
5474 OPTABLE is the table in which libfunc fields are to be initialized.
5475 OPNAME is the generic (string) name of the operation.
5476 SUFFIX is the character which specifies the number of operands for
5477 the given generic operation.
5478 MODE is the mode to generate for.
5482 gen_libfunc (optab optable, const char *opname, int suffix, enum machine_mode mode)
5484 unsigned opname_len = strlen (opname);
5485 const char *mname = GET_MODE_NAME (mode);
5486 unsigned mname_len = strlen (mname);
5487 char *libfunc_name = XALLOCAVEC (char, 2 + opname_len + mname_len + 1 + 1);
5494 for (q = opname; *q; )
5496 for (q = mname; *q; q++)
5497 *p++ = TOLOWER (*q);
5501 set_optab_libfunc (optable, mode,
5502 ggc_alloc_string (libfunc_name, p - libfunc_name));
5505 /* Like gen_libfunc, but verify that integer operation is involved. */
5508 gen_int_libfunc (optab optable, const char *opname, char suffix,
5509 enum machine_mode mode)
5511 int maxsize = 2 * BITS_PER_WORD;
5513 if (GET_MODE_CLASS (mode) != MODE_INT)
5515 if (maxsize < LONG_LONG_TYPE_SIZE)
5516 maxsize = LONG_LONG_TYPE_SIZE;
5517 if (GET_MODE_CLASS (mode) != MODE_INT
5518 || mode < word_mode || GET_MODE_BITSIZE (mode) > maxsize)
5520 gen_libfunc (optable, opname, suffix, mode);
5523 /* Like gen_libfunc, but verify that FP and set decimal prefix if needed. */
5526 gen_fp_libfunc (optab optable, const char *opname, char suffix,
5527 enum machine_mode mode)
5531 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5532 gen_libfunc (optable, opname, suffix, mode);
5533 if (DECIMAL_FLOAT_MODE_P (mode))
5535 dec_opname = XALLOCAVEC (char, sizeof (DECIMAL_PREFIX) + strlen (opname));
5536 /* For BID support, change the name to have either a bid_ or dpd_ prefix
5537 depending on the low level floating format used. */
5538 memcpy (dec_opname, DECIMAL_PREFIX, sizeof (DECIMAL_PREFIX) - 1);
5539 strcpy (dec_opname + sizeof (DECIMAL_PREFIX) - 1, opname);
5540 gen_libfunc (optable, dec_opname, suffix, mode);
5544 /* Like gen_libfunc, but verify that fixed-point operation is involved. */
5547 gen_fixed_libfunc (optab optable, const char *opname, char suffix,
5548 enum machine_mode mode)
5550 if (!ALL_FIXED_POINT_MODE_P (mode))
5552 gen_libfunc (optable, opname, suffix, mode);
5555 /* Like gen_libfunc, but verify that signed fixed-point operation is
5559 gen_signed_fixed_libfunc (optab optable, const char *opname, char suffix,
5560 enum machine_mode mode)
5562 if (!SIGNED_FIXED_POINT_MODE_P (mode))
5564 gen_libfunc (optable, opname, suffix, mode);
5567 /* Like gen_libfunc, but verify that unsigned fixed-point operation is
5571 gen_unsigned_fixed_libfunc (optab optable, const char *opname, char suffix,
5572 enum machine_mode mode)
5574 if (!UNSIGNED_FIXED_POINT_MODE_P (mode))
5576 gen_libfunc (optable, opname, suffix, mode);
5579 /* Like gen_libfunc, but verify that FP or INT operation is involved. */
5582 gen_int_fp_libfunc (optab optable, const char *name, char suffix,
5583 enum machine_mode mode)
5585 if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5586 gen_fp_libfunc (optable, name, suffix, mode);
5587 if (INTEGRAL_MODE_P (mode))
5588 gen_int_libfunc (optable, name, suffix, mode);
5591 /* Like gen_libfunc, but verify that FP or INT operation is involved
5592 and add 'v' suffix for integer operation. */
5595 gen_intv_fp_libfunc (optab optable, const char *name, char suffix,
5596 enum machine_mode mode)
5598 if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5599 gen_fp_libfunc (optable, name, suffix, mode);
5600 if (GET_MODE_CLASS (mode) == MODE_INT)
5602 int len = strlen (name);
5603 char *v_name = XALLOCAVEC (char, len + 2);
5604 strcpy (v_name, name);
5606 v_name[len + 1] = 0;
5607 gen_int_libfunc (optable, v_name, suffix, mode);
5611 /* Like gen_libfunc, but verify that FP or INT or FIXED operation is
5615 gen_int_fp_fixed_libfunc (optab optable, const char *name, char suffix,
5616 enum machine_mode mode)
5618 if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5619 gen_fp_libfunc (optable, name, suffix, mode);
5620 if (INTEGRAL_MODE_P (mode))
5621 gen_int_libfunc (optable, name, suffix, mode);
5622 if (ALL_FIXED_POINT_MODE_P (mode))
5623 gen_fixed_libfunc (optable, name, suffix, mode);
5626 /* Like gen_libfunc, but verify that FP or INT or signed FIXED operation is
5630 gen_int_fp_signed_fixed_libfunc (optab optable, const char *name, char suffix,
5631 enum machine_mode mode)
5633 if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5634 gen_fp_libfunc (optable, name, suffix, mode);
5635 if (INTEGRAL_MODE_P (mode))
5636 gen_int_libfunc (optable, name, suffix, mode);
5637 if (SIGNED_FIXED_POINT_MODE_P (mode))
5638 gen_signed_fixed_libfunc (optable, name, suffix, mode);
5641 /* Like gen_libfunc, but verify that INT or FIXED operation is
5645 gen_int_fixed_libfunc (optab optable, const char *name, char suffix,
5646 enum machine_mode mode)
5648 if (INTEGRAL_MODE_P (mode))
5649 gen_int_libfunc (optable, name, suffix, mode);
5650 if (ALL_FIXED_POINT_MODE_P (mode))
5651 gen_fixed_libfunc (optable, name, suffix, mode);
5654 /* Like gen_libfunc, but verify that INT or signed FIXED operation is
5658 gen_int_signed_fixed_libfunc (optab optable, const char *name, char suffix,
5659 enum machine_mode mode)
5661 if (INTEGRAL_MODE_P (mode))
5662 gen_int_libfunc (optable, name, suffix, mode);
5663 if (SIGNED_FIXED_POINT_MODE_P (mode))
5664 gen_signed_fixed_libfunc (optable, name, suffix, mode);
5667 /* Like gen_libfunc, but verify that INT or unsigned FIXED operation is
5671 gen_int_unsigned_fixed_libfunc (optab optable, const char *name, char suffix,
5672 enum machine_mode mode)
5674 if (INTEGRAL_MODE_P (mode))
5675 gen_int_libfunc (optable, name, suffix, mode);
5676 if (UNSIGNED_FIXED_POINT_MODE_P (mode))
5677 gen_unsigned_fixed_libfunc (optable, name, suffix, mode);
5680 /* Initialize the libfunc fields of an entire group of entries of an
5681 inter-mode-class conversion optab. The string formation rules are
5682 similar to the ones for init_libfuncs, above, but instead of having
5683 a mode name and an operand count these functions have two mode names
5684 and no operand count. */
5687 gen_interclass_conv_libfunc (convert_optab tab,
5689 enum machine_mode tmode,
5690 enum machine_mode fmode)
5692 size_t opname_len = strlen (opname);
5693 size_t mname_len = 0;
5695 const char *fname, *tname;
5697 char *libfunc_name, *suffix;
5698 char *nondec_name, *dec_name, *nondec_suffix, *dec_suffix;
5701 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5702 depends on which underlying decimal floating point format is used. */
5703 const size_t dec_len = sizeof (DECIMAL_PREFIX) - 1;
5705 mname_len = strlen (GET_MODE_NAME (tmode)) + strlen (GET_MODE_NAME (fmode));
5707 nondec_name = XALLOCAVEC (char, 2 + opname_len + mname_len + 1 + 1);
5708 nondec_name[0] = '_';
5709 nondec_name[1] = '_';
5710 memcpy (&nondec_name[2], opname, opname_len);
5711 nondec_suffix = nondec_name + opname_len + 2;
5713 dec_name = XALLOCAVEC (char, 2 + dec_len + opname_len + mname_len + 1 + 1);
5716 memcpy (&dec_name[2], DECIMAL_PREFIX, dec_len);
5717 memcpy (&dec_name[2+dec_len], opname, opname_len);
5718 dec_suffix = dec_name + dec_len + opname_len + 2;
5720 fname = GET_MODE_NAME (fmode);
5721 tname = GET_MODE_NAME (tmode);
5723 if (DECIMAL_FLOAT_MODE_P(fmode) || DECIMAL_FLOAT_MODE_P(tmode))
5725 libfunc_name = dec_name;
5726 suffix = dec_suffix;
5730 libfunc_name = nondec_name;
5731 suffix = nondec_suffix;
5735 for (q = fname; *q; p++, q++)
5737 for (q = tname; *q; p++, q++)
5742 set_conv_libfunc (tab, tmode, fmode,
5743 ggc_alloc_string (libfunc_name, p - libfunc_name));
5746 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5747 int->fp conversion. */
5750 gen_int_to_fp_conv_libfunc (convert_optab tab,
5752 enum machine_mode tmode,
5753 enum machine_mode fmode)
5755 if (GET_MODE_CLASS (fmode) != MODE_INT)
5757 if (GET_MODE_CLASS (tmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (tmode))
5759 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5762 /* ufloat_optab is special by using floatun for FP and floatuns decimal fp
5766 gen_ufloat_conv_libfunc (convert_optab tab,
5767 const char *opname ATTRIBUTE_UNUSED,
5768 enum machine_mode tmode,
5769 enum machine_mode fmode)
5771 if (DECIMAL_FLOAT_MODE_P (tmode))
5772 gen_int_to_fp_conv_libfunc (tab, "floatuns", tmode, fmode);
5774 gen_int_to_fp_conv_libfunc (tab, "floatun", tmode, fmode);
5777 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5778 fp->int conversion. */
5781 gen_int_to_fp_nondecimal_conv_libfunc (convert_optab tab,
5783 enum machine_mode tmode,
5784 enum machine_mode fmode)
5786 if (GET_MODE_CLASS (fmode) != MODE_INT)
5788 if (GET_MODE_CLASS (tmode) != MODE_FLOAT)
5790 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5793 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5794 fp->int conversion with no decimal floating point involved. */
5797 gen_fp_to_int_conv_libfunc (convert_optab tab,
5799 enum machine_mode tmode,
5800 enum machine_mode fmode)
5802 if (GET_MODE_CLASS (fmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (fmode))
5804 if (GET_MODE_CLASS (tmode) != MODE_INT)
5806 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5809 /* Initialize the libfunc fields of an of an intra-mode-class conversion optab.
5810 The string formation rules are
5811 similar to the ones for init_libfunc, above. */
5814 gen_intraclass_conv_libfunc (convert_optab tab, const char *opname,
5815 enum machine_mode tmode, enum machine_mode fmode)
5817 size_t opname_len = strlen (opname);
5818 size_t mname_len = 0;
5820 const char *fname, *tname;
5822 char *nondec_name, *dec_name, *nondec_suffix, *dec_suffix;
5823 char *libfunc_name, *suffix;
5826 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5827 depends on which underlying decimal floating point format is used. */
5828 const size_t dec_len = sizeof (DECIMAL_PREFIX) - 1;
5830 mname_len = strlen (GET_MODE_NAME (tmode)) + strlen (GET_MODE_NAME (fmode));
5832 nondec_name = XALLOCAVEC (char, 2 + opname_len + mname_len + 1 + 1);
5833 nondec_name[0] = '_';
5834 nondec_name[1] = '_';
5835 memcpy (&nondec_name[2], opname, opname_len);
5836 nondec_suffix = nondec_name + opname_len + 2;
5838 dec_name = XALLOCAVEC (char, 2 + dec_len + opname_len + mname_len + 1 + 1);
5841 memcpy (&dec_name[2], DECIMAL_PREFIX, dec_len);
5842 memcpy (&dec_name[2 + dec_len], opname, opname_len);
5843 dec_suffix = dec_name + dec_len + opname_len + 2;
5845 fname = GET_MODE_NAME (fmode);
5846 tname = GET_MODE_NAME (tmode);
5848 if (DECIMAL_FLOAT_MODE_P(fmode) || DECIMAL_FLOAT_MODE_P(tmode))
5850 libfunc_name = dec_name;
5851 suffix = dec_suffix;
5855 libfunc_name = nondec_name;
5856 suffix = nondec_suffix;
5860 for (q = fname; *q; p++, q++)
5862 for (q = tname; *q; p++, q++)
5868 set_conv_libfunc (tab, tmode, fmode,
5869 ggc_alloc_string (libfunc_name, p - libfunc_name));
5872 /* Pick proper libcall for trunc_optab. We need to chose if we do
5873 truncation or extension and interclass or intraclass. */
5876 gen_trunc_conv_libfunc (convert_optab tab,
5878 enum machine_mode tmode,
5879 enum machine_mode fmode)
5881 if (GET_MODE_CLASS (tmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (tmode))
5883 if (GET_MODE_CLASS (fmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (fmode))
5888 if ((GET_MODE_CLASS (tmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (fmode))
5889 || (GET_MODE_CLASS (fmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (tmode)))
5890 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5892 if (GET_MODE_PRECISION (fmode) <= GET_MODE_PRECISION (tmode))
5895 if ((GET_MODE_CLASS (tmode) == MODE_FLOAT
5896 && GET_MODE_CLASS (fmode) == MODE_FLOAT)
5897 || (DECIMAL_FLOAT_MODE_P (fmode) && DECIMAL_FLOAT_MODE_P (tmode)))
5898 gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
5901 /* Pick proper libcall for extend_optab. We need to chose if we do
5902 truncation or extension and interclass or intraclass. */
5905 gen_extend_conv_libfunc (convert_optab tab,
5906 const char *opname ATTRIBUTE_UNUSED,
5907 enum machine_mode tmode,
5908 enum machine_mode fmode)
5910 if (GET_MODE_CLASS (tmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (tmode))
5912 if (GET_MODE_CLASS (fmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (fmode))
5917 if ((GET_MODE_CLASS (tmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (fmode))
5918 || (GET_MODE_CLASS (fmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (tmode)))
5919 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5921 if (GET_MODE_PRECISION (fmode) > GET_MODE_PRECISION (tmode))
5924 if ((GET_MODE_CLASS (tmode) == MODE_FLOAT
5925 && GET_MODE_CLASS (fmode) == MODE_FLOAT)
5926 || (DECIMAL_FLOAT_MODE_P (fmode) && DECIMAL_FLOAT_MODE_P (tmode)))
5927 gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
5930 /* Pick proper libcall for fract_optab. We need to chose if we do
5931 interclass or intraclass. */
5934 gen_fract_conv_libfunc (convert_optab tab,
5936 enum machine_mode tmode,
5937 enum machine_mode fmode)
5941 if (!(ALL_FIXED_POINT_MODE_P (tmode) || ALL_FIXED_POINT_MODE_P (fmode)))
5944 if (GET_MODE_CLASS (tmode) == GET_MODE_CLASS (fmode))
5945 gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
5947 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5950 /* Pick proper libcall for fractuns_optab. */
5953 gen_fractuns_conv_libfunc (convert_optab tab,
5955 enum machine_mode tmode,
5956 enum machine_mode fmode)
5960 /* One mode must be a fixed-point mode, and the other must be an integer
5962 if (!((ALL_FIXED_POINT_MODE_P (tmode) && GET_MODE_CLASS (fmode) == MODE_INT)
5963 || (ALL_FIXED_POINT_MODE_P (fmode)
5964 && GET_MODE_CLASS (tmode) == MODE_INT)))
5967 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5970 /* Pick proper libcall for satfract_optab. We need to chose if we do
5971 interclass or intraclass. */
5974 gen_satfract_conv_libfunc (convert_optab tab,
5976 enum machine_mode tmode,
5977 enum machine_mode fmode)
5981 /* TMODE must be a fixed-point mode. */
5982 if (!ALL_FIXED_POINT_MODE_P (tmode))
5985 if (GET_MODE_CLASS (tmode) == GET_MODE_CLASS (fmode))
5986 gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
5988 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5991 /* Pick proper libcall for satfractuns_optab. */
5994 gen_satfractuns_conv_libfunc (convert_optab tab,
5996 enum machine_mode tmode,
5997 enum machine_mode fmode)
6001 /* TMODE must be a fixed-point mode, and FMODE must be an integer mode. */
6002 if (!(ALL_FIXED_POINT_MODE_P (tmode) && GET_MODE_CLASS (fmode) == MODE_INT))
6005 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
6008 /* A table of previously-created libfuncs, hashed by name. */
6009 static GTY ((param_is (union tree_node))) htab_t libfunc_decls;
6011 /* Hashtable callbacks for libfunc_decls. */
6014 libfunc_decl_hash (const void *entry)
6016 return IDENTIFIER_HASH_VALUE (DECL_NAME ((const_tree) entry));
6020 libfunc_decl_eq (const void *entry1, const void *entry2)
6022 return DECL_NAME ((const_tree) entry1) == (const_tree) entry2;
6025 /* Build a decl for a libfunc named NAME. */
6028 build_libfunc_function (const char *name)
6030 tree decl = build_decl (UNKNOWN_LOCATION, FUNCTION_DECL,
6031 get_identifier (name),
6032 build_function_type (integer_type_node, NULL_TREE));
6033 /* ??? We don't have any type information except for this is
6034 a function. Pretend this is "int foo()". */
6035 DECL_ARTIFICIAL (decl) = 1;
6036 DECL_EXTERNAL (decl) = 1;
6037 TREE_PUBLIC (decl) = 1;
6038 gcc_assert (DECL_ASSEMBLER_NAME (decl));
6040 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
6041 are the flags assigned by targetm.encode_section_info. */
6042 SET_SYMBOL_REF_DECL (XEXP (DECL_RTL (decl), 0), NULL);
6048 init_one_libfunc (const char *name)
6054 if (libfunc_decls == NULL)
6055 libfunc_decls = htab_create_ggc (37, libfunc_decl_hash,
6056 libfunc_decl_eq, NULL);
6058 /* See if we have already created a libfunc decl for this function. */
6059 id = get_identifier (name);
6060 hash = IDENTIFIER_HASH_VALUE (id);
6061 slot = htab_find_slot_with_hash (libfunc_decls, id, hash, INSERT);
6062 decl = (tree) *slot;
6065 /* Create a new decl, so that it can be passed to
6066 targetm.encode_section_info. */
6067 decl = build_libfunc_function (name);
6070 return XEXP (DECL_RTL (decl), 0);
6073 /* Adjust the assembler name of libfunc NAME to ASMSPEC. */
6076 set_user_assembler_libfunc (const char *name, const char *asmspec)
6082 id = get_identifier (name);
6083 hash = IDENTIFIER_HASH_VALUE (id);
6084 slot = htab_find_slot_with_hash (libfunc_decls, id, hash, NO_INSERT);
6086 decl = (tree) *slot;
6087 set_user_assembler_name (decl, asmspec);
6088 return XEXP (DECL_RTL (decl), 0);
6091 /* Call this to reset the function entry for one optab (OPTABLE) in mode
6092 MODE to NAME, which should be either 0 or a string constant. */
6094 set_optab_libfunc (optab optable, enum machine_mode mode, const char *name)
6097 struct libfunc_entry e;
6098 struct libfunc_entry **slot;
6099 e.optab = (size_t) (optable - &optab_table[0]);
6104 val = init_one_libfunc (name);
6107 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, INSERT);
6109 *slot = ggc_alloc_libfunc_entry ();
6110 (*slot)->optab = (size_t) (optable - &optab_table[0]);
6111 (*slot)->mode1 = mode;
6112 (*slot)->mode2 = VOIDmode;
6113 (*slot)->libfunc = val;
6116 /* Call this to reset the function entry for one conversion optab
6117 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
6118 either 0 or a string constant. */
6120 set_conv_libfunc (convert_optab optable, enum machine_mode tmode,
6121 enum machine_mode fmode, const char *name)
6124 struct libfunc_entry e;
6125 struct libfunc_entry **slot;
6126 e.optab = (size_t) (optable - &convert_optab_table[0]);
6131 val = init_one_libfunc (name);
6134 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, INSERT);
6136 *slot = ggc_alloc_libfunc_entry ();
6137 (*slot)->optab = (size_t) (optable - &convert_optab_table[0]);
6138 (*slot)->mode1 = tmode;
6139 (*slot)->mode2 = fmode;
6140 (*slot)->libfunc = val;
6143 /* Call this to initialize the contents of the optabs
6144 appropriately for the current target machine. */
6152 libfunc_hash = htab_create_ggc (10, hash_libfunc, eq_libfunc, NULL);
6153 /* Start by initializing all tables to contain CODE_FOR_nothing. */
6155 #ifdef HAVE_conditional_move
6156 for (i = 0; i < NUM_MACHINE_MODES; i++)
6157 movcc_gen_code[i] = CODE_FOR_nothing;
6160 for (i = 0; i < NUM_MACHINE_MODES; i++)
6162 vcond_gen_code[i] = CODE_FOR_nothing;
6163 vcondu_gen_code[i] = CODE_FOR_nothing;
6166 /* We statically initialize the insn_codes with the equivalent of
6167 CODE_FOR_nothing. */
6171 init_optab (add_optab, PLUS);
6172 init_optabv (addv_optab, PLUS);
6173 init_optab (sub_optab, MINUS);
6174 init_optabv (subv_optab, MINUS);
6175 init_optab (ssadd_optab, SS_PLUS);
6176 init_optab (usadd_optab, US_PLUS);
6177 init_optab (sssub_optab, SS_MINUS);
6178 init_optab (ussub_optab, US_MINUS);
6179 init_optab (smul_optab, MULT);
6180 init_optab (ssmul_optab, SS_MULT);
6181 init_optab (usmul_optab, US_MULT);
6182 init_optabv (smulv_optab, MULT);
6183 init_optab (smul_highpart_optab, UNKNOWN);
6184 init_optab (umul_highpart_optab, UNKNOWN);
6185 init_optab (smul_widen_optab, UNKNOWN);
6186 init_optab (umul_widen_optab, UNKNOWN);
6187 init_optab (usmul_widen_optab, UNKNOWN);
6188 init_optab (smadd_widen_optab, UNKNOWN);
6189 init_optab (umadd_widen_optab, UNKNOWN);
6190 init_optab (ssmadd_widen_optab, UNKNOWN);
6191 init_optab (usmadd_widen_optab, UNKNOWN);
6192 init_optab (smsub_widen_optab, UNKNOWN);
6193 init_optab (umsub_widen_optab, UNKNOWN);
6194 init_optab (ssmsub_widen_optab, UNKNOWN);
6195 init_optab (usmsub_widen_optab, UNKNOWN);
6196 init_optab (sdiv_optab, DIV);
6197 init_optab (ssdiv_optab, SS_DIV);
6198 init_optab (usdiv_optab, US_DIV);
6199 init_optabv (sdivv_optab, DIV);
6200 init_optab (sdivmod_optab, UNKNOWN);
6201 init_optab (udiv_optab, UDIV);
6202 init_optab (udivmod_optab, UNKNOWN);
6203 init_optab (smod_optab, MOD);
6204 init_optab (umod_optab, UMOD);
6205 init_optab (fmod_optab, UNKNOWN);
6206 init_optab (remainder_optab, UNKNOWN);
6207 init_optab (ftrunc_optab, UNKNOWN);
6208 init_optab (and_optab, AND);
6209 init_optab (ior_optab, IOR);
6210 init_optab (xor_optab, XOR);
6211 init_optab (ashl_optab, ASHIFT);
6212 init_optab (ssashl_optab, SS_ASHIFT);
6213 init_optab (usashl_optab, US_ASHIFT);
6214 init_optab (ashr_optab, ASHIFTRT);
6215 init_optab (lshr_optab, LSHIFTRT);
6216 init_optab (rotl_optab, ROTATE);
6217 init_optab (rotr_optab, ROTATERT);
6218 init_optab (smin_optab, SMIN);
6219 init_optab (smax_optab, SMAX);
6220 init_optab (umin_optab, UMIN);
6221 init_optab (umax_optab, UMAX);
6222 init_optab (pow_optab, UNKNOWN);
6223 init_optab (atan2_optab, UNKNOWN);
6225 /* These three have codes assigned exclusively for the sake of
6227 init_optab (mov_optab, SET);
6228 init_optab (movstrict_optab, STRICT_LOW_PART);
6229 init_optab (cbranch_optab, COMPARE);
6231 init_optab (cmov_optab, UNKNOWN);
6232 init_optab (cstore_optab, UNKNOWN);
6233 init_optab (ctrap_optab, UNKNOWN);
6235 init_optab (storent_optab, UNKNOWN);
6237 init_optab (cmp_optab, UNKNOWN);
6238 init_optab (ucmp_optab, UNKNOWN);
6240 init_optab (eq_optab, EQ);
6241 init_optab (ne_optab, NE);
6242 init_optab (gt_optab, GT);
6243 init_optab (ge_optab, GE);
6244 init_optab (lt_optab, LT);
6245 init_optab (le_optab, LE);
6246 init_optab (unord_optab, UNORDERED);
6248 init_optab (neg_optab, NEG);
6249 init_optab (ssneg_optab, SS_NEG);
6250 init_optab (usneg_optab, US_NEG);
6251 init_optabv (negv_optab, NEG);
6252 init_optab (abs_optab, ABS);
6253 init_optabv (absv_optab, ABS);
6254 init_optab (addcc_optab, UNKNOWN);
6255 init_optab (one_cmpl_optab, NOT);
6256 init_optab (bswap_optab, BSWAP);
6257 init_optab (ffs_optab, FFS);
6258 init_optab (clz_optab, CLZ);
6259 init_optab (ctz_optab, CTZ);
6260 init_optab (popcount_optab, POPCOUNT);
6261 init_optab (parity_optab, PARITY);
6262 init_optab (sqrt_optab, SQRT);
6263 init_optab (floor_optab, UNKNOWN);
6264 init_optab (ceil_optab, UNKNOWN);
6265 init_optab (round_optab, UNKNOWN);
6266 init_optab (btrunc_optab, UNKNOWN);
6267 init_optab (nearbyint_optab, UNKNOWN);
6268 init_optab (rint_optab, UNKNOWN);
6269 init_optab (sincos_optab, UNKNOWN);
6270 init_optab (sin_optab, UNKNOWN);
6271 init_optab (asin_optab, UNKNOWN);
6272 init_optab (cos_optab, UNKNOWN);
6273 init_optab (acos_optab, UNKNOWN);
6274 init_optab (exp_optab, UNKNOWN);
6275 init_optab (exp10_optab, UNKNOWN);
6276 init_optab (exp2_optab, UNKNOWN);
6277 init_optab (expm1_optab, UNKNOWN);
6278 init_optab (ldexp_optab, UNKNOWN);
6279 init_optab (scalb_optab, UNKNOWN);
6280 init_optab (significand_optab, UNKNOWN);
6281 init_optab (logb_optab, UNKNOWN);
6282 init_optab (ilogb_optab, UNKNOWN);
6283 init_optab (log_optab, UNKNOWN);
6284 init_optab (log10_optab, UNKNOWN);
6285 init_optab (log2_optab, UNKNOWN);
6286 init_optab (log1p_optab, UNKNOWN);
6287 init_optab (tan_optab, UNKNOWN);
6288 init_optab (atan_optab, UNKNOWN);
6289 init_optab (copysign_optab, UNKNOWN);
6290 init_optab (signbit_optab, UNKNOWN);
6292 init_optab (isinf_optab, UNKNOWN);
6294 init_optab (strlen_optab, UNKNOWN);
6295 init_optab (push_optab, UNKNOWN);
6297 init_optab (reduc_smax_optab, UNKNOWN);
6298 init_optab (reduc_umax_optab, UNKNOWN);
6299 init_optab (reduc_smin_optab, UNKNOWN);
6300 init_optab (reduc_umin_optab, UNKNOWN);
6301 init_optab (reduc_splus_optab, UNKNOWN);
6302 init_optab (reduc_uplus_optab, UNKNOWN);
6304 init_optab (ssum_widen_optab, UNKNOWN);
6305 init_optab (usum_widen_optab, UNKNOWN);
6306 init_optab (sdot_prod_optab, UNKNOWN);
6307 init_optab (udot_prod_optab, UNKNOWN);
6309 init_optab (vec_extract_optab, UNKNOWN);
6310 init_optab (vec_extract_even_optab, UNKNOWN);
6311 init_optab (vec_extract_odd_optab, UNKNOWN);
6312 init_optab (vec_interleave_high_optab, UNKNOWN);
6313 init_optab (vec_interleave_low_optab, UNKNOWN);
6314 init_optab (vec_set_optab, UNKNOWN);
6315 init_optab (vec_init_optab, UNKNOWN);
6316 init_optab (vec_shl_optab, UNKNOWN);
6317 init_optab (vec_shr_optab, UNKNOWN);
6318 init_optab (vec_realign_load_optab, UNKNOWN);
6319 init_optab (movmisalign_optab, UNKNOWN);
6320 init_optab (vec_widen_umult_hi_optab, UNKNOWN);
6321 init_optab (vec_widen_umult_lo_optab, UNKNOWN);
6322 init_optab (vec_widen_smult_hi_optab, UNKNOWN);
6323 init_optab (vec_widen_smult_lo_optab, UNKNOWN);
6324 init_optab (vec_unpacks_hi_optab, UNKNOWN);
6325 init_optab (vec_unpacks_lo_optab, UNKNOWN);
6326 init_optab (vec_unpacku_hi_optab, UNKNOWN);
6327 init_optab (vec_unpacku_lo_optab, UNKNOWN);
6328 init_optab (vec_unpacks_float_hi_optab, UNKNOWN);
6329 init_optab (vec_unpacks_float_lo_optab, UNKNOWN);
6330 init_optab (vec_unpacku_float_hi_optab, UNKNOWN);
6331 init_optab (vec_unpacku_float_lo_optab, UNKNOWN);
6332 init_optab (vec_pack_trunc_optab, UNKNOWN);
6333 init_optab (vec_pack_usat_optab, UNKNOWN);
6334 init_optab (vec_pack_ssat_optab, UNKNOWN);
6335 init_optab (vec_pack_ufix_trunc_optab, UNKNOWN);
6336 init_optab (vec_pack_sfix_trunc_optab, UNKNOWN);
6338 init_optab (powi_optab, UNKNOWN);
6341 init_convert_optab (sext_optab, SIGN_EXTEND);
6342 init_convert_optab (zext_optab, ZERO_EXTEND);
6343 init_convert_optab (trunc_optab, TRUNCATE);
6344 init_convert_optab (sfix_optab, FIX);
6345 init_convert_optab (ufix_optab, UNSIGNED_FIX);
6346 init_convert_optab (sfixtrunc_optab, UNKNOWN);
6347 init_convert_optab (ufixtrunc_optab, UNKNOWN);
6348 init_convert_optab (sfloat_optab, FLOAT);
6349 init_convert_optab (ufloat_optab, UNSIGNED_FLOAT);
6350 init_convert_optab (lrint_optab, UNKNOWN);
6351 init_convert_optab (lround_optab, UNKNOWN);
6352 init_convert_optab (lfloor_optab, UNKNOWN);
6353 init_convert_optab (lceil_optab, UNKNOWN);
6355 init_convert_optab (fract_optab, FRACT_CONVERT);
6356 init_convert_optab (fractuns_optab, UNSIGNED_FRACT_CONVERT);
6357 init_convert_optab (satfract_optab, SAT_FRACT);
6358 init_convert_optab (satfractuns_optab, UNSIGNED_SAT_FRACT);
6360 for (i = 0; i < NUM_MACHINE_MODES; i++)
6362 movmem_optab[i] = CODE_FOR_nothing;
6363 cmpstr_optab[i] = CODE_FOR_nothing;
6364 cmpstrn_optab[i] = CODE_FOR_nothing;
6365 cmpmem_optab[i] = CODE_FOR_nothing;
6366 setmem_optab[i] = CODE_FOR_nothing;
6368 sync_add_optab[i] = CODE_FOR_nothing;
6369 sync_sub_optab[i] = CODE_FOR_nothing;
6370 sync_ior_optab[i] = CODE_FOR_nothing;
6371 sync_and_optab[i] = CODE_FOR_nothing;
6372 sync_xor_optab[i] = CODE_FOR_nothing;
6373 sync_nand_optab[i] = CODE_FOR_nothing;
6374 sync_old_add_optab[i] = CODE_FOR_nothing;
6375 sync_old_sub_optab[i] = CODE_FOR_nothing;
6376 sync_old_ior_optab[i] = CODE_FOR_nothing;
6377 sync_old_and_optab[i] = CODE_FOR_nothing;
6378 sync_old_xor_optab[i] = CODE_FOR_nothing;
6379 sync_old_nand_optab[i] = CODE_FOR_nothing;
6380 sync_new_add_optab[i] = CODE_FOR_nothing;
6381 sync_new_sub_optab[i] = CODE_FOR_nothing;
6382 sync_new_ior_optab[i] = CODE_FOR_nothing;
6383 sync_new_and_optab[i] = CODE_FOR_nothing;
6384 sync_new_xor_optab[i] = CODE_FOR_nothing;
6385 sync_new_nand_optab[i] = CODE_FOR_nothing;
6386 sync_compare_and_swap[i] = CODE_FOR_nothing;
6387 sync_lock_test_and_set[i] = CODE_FOR_nothing;
6388 sync_lock_release[i] = CODE_FOR_nothing;
6390 reload_in_optab[i] = reload_out_optab[i] = CODE_FOR_nothing;
6393 /* Fill in the optabs with the insns we support. */
6396 /* Initialize the optabs with the names of the library functions. */
6397 add_optab->libcall_basename = "add";
6398 add_optab->libcall_suffix = '3';
6399 add_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6400 addv_optab->libcall_basename = "add";
6401 addv_optab->libcall_suffix = '3';
6402 addv_optab->libcall_gen = gen_intv_fp_libfunc;
6403 ssadd_optab->libcall_basename = "ssadd";
6404 ssadd_optab->libcall_suffix = '3';
6405 ssadd_optab->libcall_gen = gen_signed_fixed_libfunc;
6406 usadd_optab->libcall_basename = "usadd";
6407 usadd_optab->libcall_suffix = '3';
6408 usadd_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6409 sub_optab->libcall_basename = "sub";
6410 sub_optab->libcall_suffix = '3';
6411 sub_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6412 subv_optab->libcall_basename = "sub";
6413 subv_optab->libcall_suffix = '3';
6414 subv_optab->libcall_gen = gen_intv_fp_libfunc;
6415 sssub_optab->libcall_basename = "sssub";
6416 sssub_optab->libcall_suffix = '3';
6417 sssub_optab->libcall_gen = gen_signed_fixed_libfunc;
6418 ussub_optab->libcall_basename = "ussub";
6419 ussub_optab->libcall_suffix = '3';
6420 ussub_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6421 smul_optab->libcall_basename = "mul";
6422 smul_optab->libcall_suffix = '3';
6423 smul_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6424 smulv_optab->libcall_basename = "mul";
6425 smulv_optab->libcall_suffix = '3';
6426 smulv_optab->libcall_gen = gen_intv_fp_libfunc;
6427 ssmul_optab->libcall_basename = "ssmul";
6428 ssmul_optab->libcall_suffix = '3';
6429 ssmul_optab->libcall_gen = gen_signed_fixed_libfunc;
6430 usmul_optab->libcall_basename = "usmul";
6431 usmul_optab->libcall_suffix = '3';
6432 usmul_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6433 sdiv_optab->libcall_basename = "div";
6434 sdiv_optab->libcall_suffix = '3';
6435 sdiv_optab->libcall_gen = gen_int_fp_signed_fixed_libfunc;
6436 sdivv_optab->libcall_basename = "divv";
6437 sdivv_optab->libcall_suffix = '3';
6438 sdivv_optab->libcall_gen = gen_int_libfunc;
6439 ssdiv_optab->libcall_basename = "ssdiv";
6440 ssdiv_optab->libcall_suffix = '3';
6441 ssdiv_optab->libcall_gen = gen_signed_fixed_libfunc;
6442 udiv_optab->libcall_basename = "udiv";
6443 udiv_optab->libcall_suffix = '3';
6444 udiv_optab->libcall_gen = gen_int_unsigned_fixed_libfunc;
6445 usdiv_optab->libcall_basename = "usdiv";
6446 usdiv_optab->libcall_suffix = '3';
6447 usdiv_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6448 sdivmod_optab->libcall_basename = "divmod";
6449 sdivmod_optab->libcall_suffix = '4';
6450 sdivmod_optab->libcall_gen = gen_int_libfunc;
6451 udivmod_optab->libcall_basename = "udivmod";
6452 udivmod_optab->libcall_suffix = '4';
6453 udivmod_optab->libcall_gen = gen_int_libfunc;
6454 smod_optab->libcall_basename = "mod";
6455 smod_optab->libcall_suffix = '3';
6456 smod_optab->libcall_gen = gen_int_libfunc;
6457 umod_optab->libcall_basename = "umod";
6458 umod_optab->libcall_suffix = '3';
6459 umod_optab->libcall_gen = gen_int_libfunc;
6460 ftrunc_optab->libcall_basename = "ftrunc";
6461 ftrunc_optab->libcall_suffix = '2';
6462 ftrunc_optab->libcall_gen = gen_fp_libfunc;
6463 and_optab->libcall_basename = "and";
6464 and_optab->libcall_suffix = '3';
6465 and_optab->libcall_gen = gen_int_libfunc;
6466 ior_optab->libcall_basename = "ior";
6467 ior_optab->libcall_suffix = '3';
6468 ior_optab->libcall_gen = gen_int_libfunc;
6469 xor_optab->libcall_basename = "xor";
6470 xor_optab->libcall_suffix = '3';
6471 xor_optab->libcall_gen = gen_int_libfunc;
6472 ashl_optab->libcall_basename = "ashl";
6473 ashl_optab->libcall_suffix = '3';
6474 ashl_optab->libcall_gen = gen_int_fixed_libfunc;
6475 ssashl_optab->libcall_basename = "ssashl";
6476 ssashl_optab->libcall_suffix = '3';
6477 ssashl_optab->libcall_gen = gen_signed_fixed_libfunc;
6478 usashl_optab->libcall_basename = "usashl";
6479 usashl_optab->libcall_suffix = '3';
6480 usashl_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6481 ashr_optab->libcall_basename = "ashr";
6482 ashr_optab->libcall_suffix = '3';
6483 ashr_optab->libcall_gen = gen_int_signed_fixed_libfunc;
6484 lshr_optab->libcall_basename = "lshr";
6485 lshr_optab->libcall_suffix = '3';
6486 lshr_optab->libcall_gen = gen_int_unsigned_fixed_libfunc;
6487 smin_optab->libcall_basename = "min";
6488 smin_optab->libcall_suffix = '3';
6489 smin_optab->libcall_gen = gen_int_fp_libfunc;
6490 smax_optab->libcall_basename = "max";
6491 smax_optab->libcall_suffix = '3';
6492 smax_optab->libcall_gen = gen_int_fp_libfunc;
6493 umin_optab->libcall_basename = "umin";
6494 umin_optab->libcall_suffix = '3';
6495 umin_optab->libcall_gen = gen_int_libfunc;
6496 umax_optab->libcall_basename = "umax";
6497 umax_optab->libcall_suffix = '3';
6498 umax_optab->libcall_gen = gen_int_libfunc;
6499 neg_optab->libcall_basename = "neg";
6500 neg_optab->libcall_suffix = '2';
6501 neg_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6502 ssneg_optab->libcall_basename = "ssneg";
6503 ssneg_optab->libcall_suffix = '2';
6504 ssneg_optab->libcall_gen = gen_signed_fixed_libfunc;
6505 usneg_optab->libcall_basename = "usneg";
6506 usneg_optab->libcall_suffix = '2';
6507 usneg_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6508 negv_optab->libcall_basename = "neg";
6509 negv_optab->libcall_suffix = '2';
6510 negv_optab->libcall_gen = gen_intv_fp_libfunc;
6511 one_cmpl_optab->libcall_basename = "one_cmpl";
6512 one_cmpl_optab->libcall_suffix = '2';
6513 one_cmpl_optab->libcall_gen = gen_int_libfunc;
6514 ffs_optab->libcall_basename = "ffs";
6515 ffs_optab->libcall_suffix = '2';
6516 ffs_optab->libcall_gen = gen_int_libfunc;
6517 clz_optab->libcall_basename = "clz";
6518 clz_optab->libcall_suffix = '2';
6519 clz_optab->libcall_gen = gen_int_libfunc;
6520 ctz_optab->libcall_basename = "ctz";
6521 ctz_optab->libcall_suffix = '2';
6522 ctz_optab->libcall_gen = gen_int_libfunc;
6523 popcount_optab->libcall_basename = "popcount";
6524 popcount_optab->libcall_suffix = '2';
6525 popcount_optab->libcall_gen = gen_int_libfunc;
6526 parity_optab->libcall_basename = "parity";
6527 parity_optab->libcall_suffix = '2';
6528 parity_optab->libcall_gen = gen_int_libfunc;
6530 /* Comparison libcalls for integers MUST come in pairs,
6532 cmp_optab->libcall_basename = "cmp";
6533 cmp_optab->libcall_suffix = '2';
6534 cmp_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6535 ucmp_optab->libcall_basename = "ucmp";
6536 ucmp_optab->libcall_suffix = '2';
6537 ucmp_optab->libcall_gen = gen_int_libfunc;
6539 /* EQ etc are floating point only. */
6540 eq_optab->libcall_basename = "eq";
6541 eq_optab->libcall_suffix = '2';
6542 eq_optab->libcall_gen = gen_fp_libfunc;
6543 ne_optab->libcall_basename = "ne";
6544 ne_optab->libcall_suffix = '2';
6545 ne_optab->libcall_gen = gen_fp_libfunc;
6546 gt_optab->libcall_basename = "gt";
6547 gt_optab->libcall_suffix = '2';
6548 gt_optab->libcall_gen = gen_fp_libfunc;
6549 ge_optab->libcall_basename = "ge";
6550 ge_optab->libcall_suffix = '2';
6551 ge_optab->libcall_gen = gen_fp_libfunc;
6552 lt_optab->libcall_basename = "lt";
6553 lt_optab->libcall_suffix = '2';
6554 lt_optab->libcall_gen = gen_fp_libfunc;
6555 le_optab->libcall_basename = "le";
6556 le_optab->libcall_suffix = '2';
6557 le_optab->libcall_gen = gen_fp_libfunc;
6558 unord_optab->libcall_basename = "unord";
6559 unord_optab->libcall_suffix = '2';
6560 unord_optab->libcall_gen = gen_fp_libfunc;
6562 powi_optab->libcall_basename = "powi";
6563 powi_optab->libcall_suffix = '2';
6564 powi_optab->libcall_gen = gen_fp_libfunc;
6567 sfloat_optab->libcall_basename = "float";
6568 sfloat_optab->libcall_gen = gen_int_to_fp_conv_libfunc;
6569 ufloat_optab->libcall_gen = gen_ufloat_conv_libfunc;
6570 sfix_optab->libcall_basename = "fix";
6571 sfix_optab->libcall_gen = gen_fp_to_int_conv_libfunc;
6572 ufix_optab->libcall_basename = "fixuns";
6573 ufix_optab->libcall_gen = gen_fp_to_int_conv_libfunc;
6574 lrint_optab->libcall_basename = "lrint";
6575 lrint_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6576 lround_optab->libcall_basename = "lround";
6577 lround_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6578 lfloor_optab->libcall_basename = "lfloor";
6579 lfloor_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6580 lceil_optab->libcall_basename = "lceil";
6581 lceil_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6583 /* trunc_optab is also used for FLOAT_EXTEND. */
6584 sext_optab->libcall_basename = "extend";
6585 sext_optab->libcall_gen = gen_extend_conv_libfunc;
6586 trunc_optab->libcall_basename = "trunc";
6587 trunc_optab->libcall_gen = gen_trunc_conv_libfunc;
6589 /* Conversions for fixed-point modes and other modes. */
6590 fract_optab->libcall_basename = "fract";
6591 fract_optab->libcall_gen = gen_fract_conv_libfunc;
6592 satfract_optab->libcall_basename = "satfract";
6593 satfract_optab->libcall_gen = gen_satfract_conv_libfunc;
6594 fractuns_optab->libcall_basename = "fractuns";
6595 fractuns_optab->libcall_gen = gen_fractuns_conv_libfunc;
6596 satfractuns_optab->libcall_basename = "satfractuns";
6597 satfractuns_optab->libcall_gen = gen_satfractuns_conv_libfunc;
6599 /* The ffs function operates on `int'. Fall back on it if we do not
6600 have a libgcc2 function for that width. */
6601 if (INT_TYPE_SIZE < BITS_PER_WORD)
6602 set_optab_libfunc (ffs_optab, mode_for_size (INT_TYPE_SIZE, MODE_INT, 0),
6605 /* Explicitly initialize the bswap libfuncs since we need them to be
6606 valid for things other than word_mode. */
6607 set_optab_libfunc (bswap_optab, SImode, "__bswapsi2");
6608 set_optab_libfunc (bswap_optab, DImode, "__bswapdi2");
6610 /* Use cabs for double complex abs, since systems generally have cabs.
6611 Don't define any libcall for float complex, so that cabs will be used. */
6612 if (complex_double_type_node)
6613 set_optab_libfunc (abs_optab, TYPE_MODE (complex_double_type_node), "cabs");
6615 abort_libfunc = init_one_libfunc ("abort");
6616 memcpy_libfunc = init_one_libfunc ("memcpy");
6617 memmove_libfunc = init_one_libfunc ("memmove");
6618 memcmp_libfunc = init_one_libfunc ("memcmp");
6619 memset_libfunc = init_one_libfunc ("memset");
6620 setbits_libfunc = init_one_libfunc ("__setbits");
6622 #ifndef DONT_USE_BUILTIN_SETJMP
6623 setjmp_libfunc = init_one_libfunc ("__builtin_setjmp");
6624 longjmp_libfunc = init_one_libfunc ("__builtin_longjmp");
6626 setjmp_libfunc = init_one_libfunc ("setjmp");
6627 longjmp_libfunc = init_one_libfunc ("longjmp");
6629 unwind_sjlj_register_libfunc = init_one_libfunc ("_Unwind_SjLj_Register");
6630 unwind_sjlj_unregister_libfunc
6631 = init_one_libfunc ("_Unwind_SjLj_Unregister");
6633 /* For function entry/exit instrumentation. */
6634 profile_function_entry_libfunc
6635 = init_one_libfunc ("__cyg_profile_func_enter");
6636 profile_function_exit_libfunc
6637 = init_one_libfunc ("__cyg_profile_func_exit");
6639 gcov_flush_libfunc = init_one_libfunc ("__gcov_flush");
6641 /* Allow the target to add more libcalls or rename some, etc. */
6642 targetm.init_libfuncs ();
6647 /* Print information about the current contents of the optabs on
6651 debug_optab_libfuncs (void)
6657 /* Dump the arithmetic optabs. */
6658 for (i = 0; i != (int) OTI_MAX; i++)
6659 for (j = 0; j < NUM_MACHINE_MODES; ++j)
6664 o = &optab_table[i];
6665 l = optab_libfunc (o, (enum machine_mode) j);
6668 gcc_assert (GET_CODE (l) == SYMBOL_REF);
6669 fprintf (stderr, "%s\t%s:\t%s\n",
6670 GET_RTX_NAME (o->code),
6676 /* Dump the conversion optabs. */
6677 for (i = 0; i < (int) COI_MAX; ++i)
6678 for (j = 0; j < NUM_MACHINE_MODES; ++j)
6679 for (k = 0; k < NUM_MACHINE_MODES; ++k)
6684 o = &convert_optab_table[i];
6685 l = convert_optab_libfunc (o, (enum machine_mode) j,
6686 (enum machine_mode) k);
6689 gcc_assert (GET_CODE (l) == SYMBOL_REF);
6690 fprintf (stderr, "%s\t%s\t%s:\t%s\n",
6691 GET_RTX_NAME (o->code),
6700 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
6701 CODE. Return 0 on failure. */
6704 gen_cond_trap (enum rtx_code code, rtx op1, rtx op2, rtx tcode)
6706 enum machine_mode mode = GET_MODE (op1);
6707 enum insn_code icode;
6711 if (mode == VOIDmode)
6714 icode = optab_handler (ctrap_optab, mode);
6715 if (icode == CODE_FOR_nothing)
6718 /* Some targets only accept a zero trap code. */
6719 if (insn_data[icode].operand[3].predicate
6720 && !insn_data[icode].operand[3].predicate (tcode, VOIDmode))
6723 do_pending_stack_adjust ();
6725 prepare_cmp_insn (op1, op2, code, NULL_RTX, false, OPTAB_DIRECT,
6730 insn = GEN_FCN (icode) (trap_rtx, XEXP (trap_rtx, 0), XEXP (trap_rtx, 1),
6733 /* If that failed, then give up. */
6741 insn = get_insns ();
6746 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
6747 or unsigned operation code. */
6749 static enum rtx_code
6750 get_rtx_code (enum tree_code tcode, bool unsignedp)
6762 code = unsignedp ? LTU : LT;
6765 code = unsignedp ? LEU : LE;
6768 code = unsignedp ? GTU : GT;
6771 code = unsignedp ? GEU : GE;
6774 case UNORDERED_EXPR:
6805 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
6806 unsigned operators. Do not generate compare instruction. */
6809 vector_compare_rtx (tree cond, bool unsignedp, enum insn_code icode)
6811 enum rtx_code rcode;
6813 rtx rtx_op0, rtx_op1;
6815 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
6816 ensures that condition is a relational operation. */
6817 gcc_assert (COMPARISON_CLASS_P (cond));
6819 rcode = get_rtx_code (TREE_CODE (cond), unsignedp);
6820 t_op0 = TREE_OPERAND (cond, 0);
6821 t_op1 = TREE_OPERAND (cond, 1);
6823 /* Expand operands. */
6824 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)),
6826 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)),
6829 if (!insn_data[icode].operand[4].predicate (rtx_op0, GET_MODE (rtx_op0))
6830 && GET_MODE (rtx_op0) != VOIDmode)
6831 rtx_op0 = force_reg (GET_MODE (rtx_op0), rtx_op0);
6833 if (!insn_data[icode].operand[5].predicate (rtx_op1, GET_MODE (rtx_op1))
6834 && GET_MODE (rtx_op1) != VOIDmode)
6835 rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1);
6837 return gen_rtx_fmt_ee (rcode, VOIDmode, rtx_op0, rtx_op1);
6840 /* Return insn code for TYPE, the type of a VEC_COND_EXPR. */
6842 static inline enum insn_code
6843 get_vcond_icode (tree type, enum machine_mode mode)
6845 enum insn_code icode = CODE_FOR_nothing;
6847 if (TYPE_UNSIGNED (type))
6848 icode = vcondu_gen_code[mode];
6850 icode = vcond_gen_code[mode];
6854 /* Return TRUE iff, appropriate vector insns are available
6855 for vector cond expr with type TYPE in VMODE mode. */
6858 expand_vec_cond_expr_p (tree type, enum machine_mode vmode)
6860 if (get_vcond_icode (type, vmode) == CODE_FOR_nothing)
6865 /* Generate insns for a VEC_COND_EXPR, given its TYPE and its
6869 expand_vec_cond_expr (tree vec_cond_type, tree op0, tree op1, tree op2,
6872 enum insn_code icode;
6873 rtx comparison, rtx_op1, rtx_op2, cc_op0, cc_op1;
6874 enum machine_mode mode = TYPE_MODE (vec_cond_type);
6875 bool unsignedp = TYPE_UNSIGNED (vec_cond_type);
6877 icode = get_vcond_icode (vec_cond_type, mode);
6878 if (icode == CODE_FOR_nothing)
6881 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6882 target = gen_reg_rtx (mode);
6884 /* Get comparison rtx. First expand both cond expr operands. */
6885 comparison = vector_compare_rtx (op0,
6887 cc_op0 = XEXP (comparison, 0);
6888 cc_op1 = XEXP (comparison, 1);
6889 /* Expand both operands and force them in reg, if required. */
6890 rtx_op1 = expand_normal (op1);
6891 if (!insn_data[icode].operand[1].predicate (rtx_op1, mode)
6892 && mode != VOIDmode)
6893 rtx_op1 = force_reg (mode, rtx_op1);
6895 rtx_op2 = expand_normal (op2);
6896 if (!insn_data[icode].operand[2].predicate (rtx_op2, mode)
6897 && mode != VOIDmode)
6898 rtx_op2 = force_reg (mode, rtx_op2);
6900 /* Emit instruction! */
6901 emit_insn (GEN_FCN (icode) (target, rtx_op1, rtx_op2,
6902 comparison, cc_op0, cc_op1));
6908 /* This is an internal subroutine of the other compare_and_swap expanders.
6909 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
6910 operation. TARGET is an optional place to store the value result of
6911 the operation. ICODE is the particular instruction to expand. Return
6912 the result of the operation. */
6915 expand_val_compare_and_swap_1 (rtx mem, rtx old_val, rtx new_val,
6916 rtx target, enum insn_code icode)
6918 enum machine_mode mode = GET_MODE (mem);
6921 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6922 target = gen_reg_rtx (mode);
6924 if (GET_MODE (old_val) != VOIDmode && GET_MODE (old_val) != mode)
6925 old_val = convert_modes (mode, GET_MODE (old_val), old_val, 1);
6926 if (!insn_data[icode].operand[2].predicate (old_val, mode))
6927 old_val = force_reg (mode, old_val);
6929 if (GET_MODE (new_val) != VOIDmode && GET_MODE (new_val) != mode)
6930 new_val = convert_modes (mode, GET_MODE (new_val), new_val, 1);
6931 if (!insn_data[icode].operand[3].predicate (new_val, mode))
6932 new_val = force_reg (mode, new_val);
6934 insn = GEN_FCN (icode) (target, mem, old_val, new_val);
6935 if (insn == NULL_RTX)
6942 /* Expand a compare-and-swap operation and return its value. */
6945 expand_val_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
6947 enum machine_mode mode = GET_MODE (mem);
6948 enum insn_code icode = sync_compare_and_swap[mode];
6950 if (icode == CODE_FOR_nothing)
6953 return expand_val_compare_and_swap_1 (mem, old_val, new_val, target, icode);
6956 /* Helper function to find the MODE_CC set in a sync_compare_and_swap
6960 find_cc_set (rtx x, const_rtx pat, void *data)
6962 if (REG_P (x) && GET_MODE_CLASS (GET_MODE (x)) == MODE_CC
6963 && GET_CODE (pat) == SET)
6965 rtx *p_cc_reg = (rtx *) data;
6966 gcc_assert (!*p_cc_reg);
6971 /* Expand a compare-and-swap operation and store true into the result if
6972 the operation was successful and false otherwise. Return the result.
6973 Unlike other routines, TARGET is not optional. */
6976 expand_bool_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
6978 enum machine_mode mode = GET_MODE (mem);
6979 enum insn_code icode;
6980 rtx subtarget, seq, cc_reg;
6982 /* If the target supports a compare-and-swap pattern that simultaneously
6983 sets some flag for success, then use it. Otherwise use the regular
6984 compare-and-swap and follow that immediately with a compare insn. */
6985 icode = sync_compare_and_swap[mode];
6986 if (icode == CODE_FOR_nothing)
6992 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
6995 if (subtarget == NULL_RTX)
7001 if (have_insn_for (COMPARE, CCmode))
7002 note_stores (PATTERN (get_last_insn ()), find_cc_set, &cc_reg);
7006 /* We might be comparing against an old value. Try again. :-( */
7007 if (!cc_reg && MEM_P (old_val))
7010 old_val = force_reg (mode, old_val);
7017 return emit_store_flag_force (target, EQ, cc_reg, const0_rtx, VOIDmode, 0, 1);
7019 return emit_store_flag_force (target, EQ, subtarget, old_val, VOIDmode, 1, 1);
7022 /* This is a helper function for the other atomic operations. This function
7023 emits a loop that contains SEQ that iterates until a compare-and-swap
7024 operation at the end succeeds. MEM is the memory to be modified. SEQ is
7025 a set of instructions that takes a value from OLD_REG as an input and
7026 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
7027 set to the current contents of MEM. After SEQ, a compare-and-swap will
7028 attempt to update MEM with NEW_REG. The function returns true when the
7029 loop was generated successfully. */
7032 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
7034 enum machine_mode mode = GET_MODE (mem);
7035 enum insn_code icode;
7036 rtx label, cmp_reg, subtarget, cc_reg;
7038 /* The loop we want to generate looks like
7044 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
7045 if (cmp_reg != old_reg)
7048 Note that we only do the plain load from memory once. Subsequent
7049 iterations use the value loaded by the compare-and-swap pattern. */
7051 label = gen_label_rtx ();
7052 cmp_reg = gen_reg_rtx (mode);
7054 emit_move_insn (cmp_reg, mem);
7056 emit_move_insn (old_reg, cmp_reg);
7060 /* If the target supports a compare-and-swap pattern that simultaneously
7061 sets some flag for success, then use it. Otherwise use the regular
7062 compare-and-swap and follow that immediately with a compare insn. */
7063 icode = sync_compare_and_swap[mode];
7064 if (icode == CODE_FOR_nothing)
7067 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
7069 if (subtarget == NULL_RTX)
7073 if (have_insn_for (COMPARE, CCmode))
7074 note_stores (PATTERN (get_last_insn ()), find_cc_set, &cc_reg);
7078 old_reg = const0_rtx;
7082 if (subtarget != cmp_reg)
7083 emit_move_insn (cmp_reg, subtarget);
7086 /* ??? Mark this jump predicted not taken? */
7087 emit_cmp_and_jump_insns (cmp_reg, old_reg, NE, const0_rtx, GET_MODE (cmp_reg), 1,
7092 /* This function generates the atomic operation MEM CODE= VAL. In this
7093 case, we do not care about any resulting value. Returns NULL if we
7094 cannot generate the operation. */
7097 expand_sync_operation (rtx mem, rtx val, enum rtx_code code)
7099 enum machine_mode mode = GET_MODE (mem);
7100 enum insn_code icode;
7103 /* Look to see if the target supports the operation directly. */
7107 icode = sync_add_optab[mode];
7110 icode = sync_ior_optab[mode];
7113 icode = sync_xor_optab[mode];
7116 icode = sync_and_optab[mode];
7119 icode = sync_nand_optab[mode];
7123 icode = sync_sub_optab[mode];
7124 if (icode == CODE_FOR_nothing || CONST_INT_P (val))
7126 icode = sync_add_optab[mode];
7127 if (icode != CODE_FOR_nothing)
7129 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
7139 /* Generate the direct operation, if present. */
7140 if (icode != CODE_FOR_nothing)
7142 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
7143 val = convert_modes (mode, GET_MODE (val), val, 1);
7144 if (!insn_data[icode].operand[1].predicate (val, mode))
7145 val = force_reg (mode, val);
7147 insn = GEN_FCN (icode) (mem, val);
7155 /* Failing that, generate a compare-and-swap loop in which we perform the
7156 operation with normal arithmetic instructions. */
7157 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
7159 rtx t0 = gen_reg_rtx (mode), t1;
7166 t1 = expand_simple_binop (mode, AND, t1, val, NULL_RTX,
7167 true, OPTAB_LIB_WIDEN);
7168 t1 = expand_simple_unop (mode, code, t1, NULL_RTX, true);
7171 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
7172 true, OPTAB_LIB_WIDEN);
7173 insn = get_insns ();
7176 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
7183 /* This function generates the atomic operation MEM CODE= VAL. In this
7184 case, we do care about the resulting value: if AFTER is true then
7185 return the value MEM holds after the operation, if AFTER is false
7186 then return the value MEM holds before the operation. TARGET is an
7187 optional place for the result value to be stored. */
7190 expand_sync_fetch_operation (rtx mem, rtx val, enum rtx_code code,
7191 bool after, rtx target)
7193 enum machine_mode mode = GET_MODE (mem);
7194 enum insn_code old_code, new_code, icode;
7198 /* Look to see if the target supports the operation directly. */
7202 old_code = sync_old_add_optab[mode];
7203 new_code = sync_new_add_optab[mode];
7206 old_code = sync_old_ior_optab[mode];
7207 new_code = sync_new_ior_optab[mode];
7210 old_code = sync_old_xor_optab[mode];
7211 new_code = sync_new_xor_optab[mode];
7214 old_code = sync_old_and_optab[mode];
7215 new_code = sync_new_and_optab[mode];
7218 old_code = sync_old_nand_optab[mode];
7219 new_code = sync_new_nand_optab[mode];
7223 old_code = sync_old_sub_optab[mode];
7224 new_code = sync_new_sub_optab[mode];
7225 if ((old_code == CODE_FOR_nothing && new_code == CODE_FOR_nothing)
7226 || CONST_INT_P (val))
7228 old_code = sync_old_add_optab[mode];
7229 new_code = sync_new_add_optab[mode];
7230 if (old_code != CODE_FOR_nothing || new_code != CODE_FOR_nothing)
7232 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
7242 /* If the target does supports the proper new/old operation, great. But
7243 if we only support the opposite old/new operation, check to see if we
7244 can compensate. In the case in which the old value is supported, then
7245 we can always perform the operation again with normal arithmetic. In
7246 the case in which the new value is supported, then we can only handle
7247 this in the case the operation is reversible. */
7252 if (icode == CODE_FOR_nothing)
7255 if (icode != CODE_FOR_nothing)
7262 if (icode == CODE_FOR_nothing
7263 && (code == PLUS || code == MINUS || code == XOR))
7266 if (icode != CODE_FOR_nothing)
7271 /* If we found something supported, great. */
7272 if (icode != CODE_FOR_nothing)
7274 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
7275 target = gen_reg_rtx (mode);
7277 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
7278 val = convert_modes (mode, GET_MODE (val), val, 1);
7279 if (!insn_data[icode].operand[2].predicate (val, mode))
7280 val = force_reg (mode, val);
7282 insn = GEN_FCN (icode) (target, mem, val);
7287 /* If we need to compensate for using an operation with the
7288 wrong return value, do so now. */
7295 else if (code == MINUS)
7301 target = expand_simple_binop (mode, AND, target, val,
7304 target = expand_simple_unop (mode, code, target,
7308 target = expand_simple_binop (mode, code, target, val,
7317 /* Failing that, generate a compare-and-swap loop in which we perform the
7318 operation with normal arithmetic instructions. */
7319 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
7321 rtx t0 = gen_reg_rtx (mode), t1;
7323 if (!target || !register_operand (target, mode))
7324 target = gen_reg_rtx (mode);
7329 emit_move_insn (target, t0);
7333 t1 = expand_simple_binop (mode, AND, t1, val, NULL_RTX,
7334 true, OPTAB_LIB_WIDEN);
7335 t1 = expand_simple_unop (mode, code, t1, NULL_RTX, true);
7338 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
7339 true, OPTAB_LIB_WIDEN);
7341 emit_move_insn (target, t1);
7343 insn = get_insns ();
7346 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
7353 /* This function expands a test-and-set operation. Ideally we atomically
7354 store VAL in MEM and return the previous value in MEM. Some targets
7355 may not support this operation and only support VAL with the constant 1;
7356 in this case while the return value will be 0/1, but the exact value
7357 stored in MEM is target defined. TARGET is an option place to stick
7358 the return value. */
7361 expand_sync_lock_test_and_set (rtx mem, rtx val, rtx target)
7363 enum machine_mode mode = GET_MODE (mem);
7364 enum insn_code icode;
7367 /* If the target supports the test-and-set directly, great. */
7368 icode = sync_lock_test_and_set[mode];
7369 if (icode != CODE_FOR_nothing)
7371 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
7372 target = gen_reg_rtx (mode);
7374 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
7375 val = convert_modes (mode, GET_MODE (val), val, 1);
7376 if (!insn_data[icode].operand[2].predicate (val, mode))
7377 val = force_reg (mode, val);
7379 insn = GEN_FCN (icode) (target, mem, val);
7387 /* Otherwise, use a compare-and-swap loop for the exchange. */
7388 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
7390 if (!target || !register_operand (target, mode))
7391 target = gen_reg_rtx (mode);
7392 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
7393 val = convert_modes (mode, GET_MODE (val), val, 1);
7394 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
7401 #include "gt-optabs.h"