1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
45 #include "basic-block.h"
48 /* Each optab contains info on how this target machine
49 can perform a particular operation
50 for all sizes and kinds of operands.
52 The operation to be performed is often specified
53 by passing one of these optabs as an argument.
55 See expr.h for documentation of these optabs. */
57 #if GCC_VERSION >= 4000
58 __extension__ struct optab optab_table[OTI_MAX]
59 = { [0 ... OTI_MAX - 1].handlers[0 ... NUM_MACHINE_MODES - 1].insn_code
62 /* init_insn_codes will do runtime initialization otherwise. */
63 struct optab optab_table[OTI_MAX];
66 rtx libfunc_table[LTI_MAX];
68 /* Tables of patterns for converting one mode to another. */
69 #if GCC_VERSION >= 4000
70 __extension__ struct convert_optab convert_optab_table[COI_MAX]
71 = { [0 ... COI_MAX - 1].handlers[0 ... NUM_MACHINE_MODES - 1]
72 [0 ... NUM_MACHINE_MODES - 1].insn_code
75 /* init_convert_optab will do runtime initialization otherwise. */
76 struct convert_optab convert_optab_table[COI_MAX];
79 /* Contains the optab used for each rtx code. */
80 optab code_to_optab[NUM_RTX_CODE + 1];
82 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
83 gives the gen_function to make a branch to test that condition. */
85 rtxfun bcc_gen_fctn[NUM_RTX_CODE];
87 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
88 gives the insn code to make a store-condition insn
89 to test that condition. */
91 enum insn_code setcc_gen_code[NUM_RTX_CODE];
93 #ifdef HAVE_conditional_move
94 /* Indexed by the machine mode, gives the insn code to make a conditional
95 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
96 setcc_gen_code to cut down on the number of named patterns. Consider a day
97 when a lot more rtx codes are conditional (eg: for the ARM). */
99 enum insn_code movcc_gen_code[NUM_MACHINE_MODES];
102 /* Indexed by the machine mode, gives the insn code for vector conditional
105 enum insn_code vcond_gen_code[NUM_MACHINE_MODES];
106 enum insn_code vcondu_gen_code[NUM_MACHINE_MODES];
108 /* The insn generating function can not take an rtx_code argument.
109 TRAP_RTX is used as an rtx argument. Its code is replaced with
110 the code to be used in the trap insn and all other fields are ignored. */
111 static GTY(()) rtx trap_rtx;
113 static void prepare_float_lib_cmp (rtx *, rtx *, enum rtx_code *,
114 enum machine_mode *, int *);
115 static rtx expand_unop_direct (enum machine_mode, optab, rtx, rtx, int);
117 /* Debug facility for use in GDB. */
118 void debug_optab_libfuncs (void);
120 #ifndef HAVE_conditional_trap
121 #define HAVE_conditional_trap 0
122 #define gen_conditional_trap(a,b) (gcc_unreachable (), NULL_RTX)
125 /* Prefixes for the current version of decimal floating point (BID vs. DPD) */
126 #if ENABLE_DECIMAL_BID_FORMAT
127 #define DECIMAL_PREFIX "bid_"
129 #define DECIMAL_PREFIX "dpd_"
133 /* Info about libfunc. We use same hashtable for normal optabs and conversion
134 optab. In the first case mode2 is unused. */
135 struct libfunc_entry GTY(())
138 enum machine_mode mode1, mode2;
142 /* Hash table used to convert declarations into nodes. */
143 static GTY((param_is (struct libfunc_entry))) htab_t libfunc_hash;
145 /* Used for attribute_hash. */
148 hash_libfunc (const void *p)
150 const struct libfunc_entry *const e = (const struct libfunc_entry *) p;
152 return (((int) e->mode1 + (int) e->mode2 * NUM_MACHINE_MODES)
156 /* Used for optab_hash. */
159 eq_libfunc (const void *p, const void *q)
161 const struct libfunc_entry *const e1 = (const struct libfunc_entry *) p;
162 const struct libfunc_entry *const e2 = (const struct libfunc_entry *) q;
164 return (e1->optab == e2->optab
165 && e1->mode1 == e2->mode1
166 && e1->mode2 == e2->mode2);
169 /* Return libfunc corresponding operation defined by OPTAB converting
170 from MODE2 to MODE1. Trigger lazy initialization if needed, return NULL
171 if no libfunc is available. */
173 convert_optab_libfunc (convert_optab optab, enum machine_mode mode1,
174 enum machine_mode mode2)
176 struct libfunc_entry e;
177 struct libfunc_entry **slot;
179 e.optab = (size_t) (optab - &convert_optab_table[0]);
182 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, NO_INSERT);
185 if (optab->libcall_gen)
187 optab->libcall_gen (optab, optab->libcall_basename, mode1, mode2);
188 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, NO_INSERT);
190 return (*slot)->libfunc;
196 return (*slot)->libfunc;
199 /* Return libfunc corresponding operation defined by OPTAB in MODE.
200 Trigger lazy initialization if needed, return NULL if no libfunc is
203 optab_libfunc (optab optab, enum machine_mode mode)
205 struct libfunc_entry e;
206 struct libfunc_entry **slot;
208 e.optab = (size_t) (optab - &optab_table[0]);
211 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, NO_INSERT);
214 if (optab->libcall_gen)
216 optab->libcall_gen (optab, optab->libcall_basename,
217 optab->libcall_suffix, mode);
218 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash,
221 return (*slot)->libfunc;
227 return (*slot)->libfunc;
231 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
232 the result of operation CODE applied to OP0 (and OP1 if it is a binary
235 If the last insn does not set TARGET, don't do anything, but return 1.
237 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
238 don't add the REG_EQUAL note but return 0. Our caller can then try
239 again, ensuring that TARGET is not one of the operands. */
242 add_equal_note (rtx insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
244 rtx last_insn, insn, set;
247 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
249 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
250 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
251 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
252 && GET_RTX_CLASS (code) != RTX_COMPARE
253 && GET_RTX_CLASS (code) != RTX_UNARY)
256 if (GET_CODE (target) == ZERO_EXTRACT)
259 for (last_insn = insns;
260 NEXT_INSN (last_insn) != NULL_RTX;
261 last_insn = NEXT_INSN (last_insn))
264 set = single_set (last_insn);
268 if (! rtx_equal_p (SET_DEST (set), target)
269 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
270 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
271 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
274 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
275 besides the last insn. */
276 if (reg_overlap_mentioned_p (target, op0)
277 || (op1 && reg_overlap_mentioned_p (target, op1)))
279 insn = PREV_INSN (last_insn);
280 while (insn != NULL_RTX)
282 if (reg_set_p (target, insn))
285 insn = PREV_INSN (insn);
289 if (GET_RTX_CLASS (code) == RTX_UNARY)
290 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
292 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
294 set_unique_reg_note (last_insn, REG_EQUAL, note);
299 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
300 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
301 not actually do a sign-extend or zero-extend, but can leave the
302 higher-order bits of the result rtx undefined, for example, in the case
303 of logical operations, but not right shifts. */
306 widen_operand (rtx op, enum machine_mode mode, enum machine_mode oldmode,
307 int unsignedp, int no_extend)
311 /* If we don't have to extend and this is a constant, return it. */
312 if (no_extend && GET_MODE (op) == VOIDmode)
315 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
316 extend since it will be more efficient to do so unless the signedness of
317 a promoted object differs from our extension. */
319 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
320 && SUBREG_PROMOTED_UNSIGNED_P (op) == unsignedp))
321 return convert_modes (mode, oldmode, op, unsignedp);
323 /* If MODE is no wider than a single word, we return a paradoxical
325 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
326 return gen_rtx_SUBREG (mode, force_reg (GET_MODE (op), op), 0);
328 /* Otherwise, get an object of MODE, clobber it, and set the low-order
331 result = gen_reg_rtx (mode);
332 emit_insn (gen_rtx_CLOBBER (VOIDmode, result));
333 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
337 /* Return the optab used for computing the operation given by
338 the tree code, CODE. This function is not always usable (for
339 example, it cannot give complete results for multiplication
340 or division) but probably ought to be relied on more widely
341 throughout the expander. */
343 optab_for_tree_code (enum tree_code code, const_tree type)
355 return one_cmpl_optab;
364 return TYPE_UNSIGNED (type) ? umod_optab : smod_optab;
372 if (TYPE_SATURATING(type))
373 return TYPE_UNSIGNED(type) ? usdiv_optab : ssdiv_optab;
374 return TYPE_UNSIGNED (type) ? udiv_optab : sdiv_optab;
377 if (TYPE_SATURATING(type))
378 return TYPE_UNSIGNED(type) ? usashl_optab : ssashl_optab;
382 return TYPE_UNSIGNED (type) ? lshr_optab : ashr_optab;
391 return TYPE_UNSIGNED (type) ? umax_optab : smax_optab;
394 return TYPE_UNSIGNED (type) ? umin_optab : smin_optab;
396 case REALIGN_LOAD_EXPR:
397 return vec_realign_load_optab;
400 return TYPE_UNSIGNED (type) ? usum_widen_optab : ssum_widen_optab;
403 return TYPE_UNSIGNED (type) ? udot_prod_optab : sdot_prod_optab;
406 return TYPE_UNSIGNED (type) ? reduc_umax_optab : reduc_smax_optab;
409 return TYPE_UNSIGNED (type) ? reduc_umin_optab : reduc_smin_optab;
411 case REDUC_PLUS_EXPR:
412 return TYPE_UNSIGNED (type) ? reduc_uplus_optab : reduc_splus_optab;
414 case VEC_LSHIFT_EXPR:
415 return vec_shl_optab;
417 case VEC_RSHIFT_EXPR:
418 return vec_shr_optab;
420 case VEC_WIDEN_MULT_HI_EXPR:
421 return TYPE_UNSIGNED (type) ?
422 vec_widen_umult_hi_optab : vec_widen_smult_hi_optab;
424 case VEC_WIDEN_MULT_LO_EXPR:
425 return TYPE_UNSIGNED (type) ?
426 vec_widen_umult_lo_optab : vec_widen_smult_lo_optab;
428 case VEC_UNPACK_HI_EXPR:
429 return TYPE_UNSIGNED (type) ?
430 vec_unpacku_hi_optab : vec_unpacks_hi_optab;
432 case VEC_UNPACK_LO_EXPR:
433 return TYPE_UNSIGNED (type) ?
434 vec_unpacku_lo_optab : vec_unpacks_lo_optab;
436 case VEC_UNPACK_FLOAT_HI_EXPR:
437 /* The signedness is determined from input operand. */
438 return TYPE_UNSIGNED (type) ?
439 vec_unpacku_float_hi_optab : vec_unpacks_float_hi_optab;
441 case VEC_UNPACK_FLOAT_LO_EXPR:
442 /* The signedness is determined from input operand. */
443 return TYPE_UNSIGNED (type) ?
444 vec_unpacku_float_lo_optab : vec_unpacks_float_lo_optab;
446 case VEC_PACK_TRUNC_EXPR:
447 return vec_pack_trunc_optab;
449 case VEC_PACK_SAT_EXPR:
450 return TYPE_UNSIGNED (type) ? vec_pack_usat_optab : vec_pack_ssat_optab;
452 case VEC_PACK_FIX_TRUNC_EXPR:
453 /* The signedness is determined from output operand. */
454 return TYPE_UNSIGNED (type) ?
455 vec_pack_ufix_trunc_optab : vec_pack_sfix_trunc_optab;
461 trapv = INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type);
464 case POINTER_PLUS_EXPR:
466 if (TYPE_SATURATING(type))
467 return TYPE_UNSIGNED(type) ? usadd_optab : ssadd_optab;
468 return trapv ? addv_optab : add_optab;
471 if (TYPE_SATURATING(type))
472 return TYPE_UNSIGNED(type) ? ussub_optab : sssub_optab;
473 return trapv ? subv_optab : sub_optab;
476 if (TYPE_SATURATING(type))
477 return TYPE_UNSIGNED(type) ? usmul_optab : ssmul_optab;
478 return trapv ? smulv_optab : smul_optab;
481 if (TYPE_SATURATING(type))
482 return TYPE_UNSIGNED(type) ? usneg_optab : ssneg_optab;
483 return trapv ? negv_optab : neg_optab;
486 return trapv ? absv_optab : abs_optab;
488 case VEC_EXTRACT_EVEN_EXPR:
489 return vec_extract_even_optab;
491 case VEC_EXTRACT_ODD_EXPR:
492 return vec_extract_odd_optab;
494 case VEC_INTERLEAVE_HIGH_EXPR:
495 return vec_interleave_high_optab;
497 case VEC_INTERLEAVE_LOW_EXPR:
498 return vec_interleave_low_optab;
506 /* Expand vector widening operations.
508 There are two different classes of operations handled here:
509 1) Operations whose result is wider than all the arguments to the operation.
510 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
511 In this case OP0 and optionally OP1 would be initialized,
512 but WIDE_OP wouldn't (not relevant for this case).
513 2) Operations whose result is of the same size as the last argument to the
514 operation, but wider than all the other arguments to the operation.
515 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
516 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
518 E.g, when called to expand the following operations, this is how
519 the arguments will be initialized:
521 widening-sum 2 oprnd0 - oprnd1
522 widening-dot-product 3 oprnd0 oprnd1 oprnd2
523 widening-mult 2 oprnd0 oprnd1 -
524 type-promotion (vec-unpack) 1 oprnd0 - - */
527 expand_widen_pattern_expr (tree exp, rtx op0, rtx op1, rtx wide_op, rtx target,
530 tree oprnd0, oprnd1, oprnd2;
531 enum machine_mode wmode = 0, tmode0, tmode1 = 0;
532 optab widen_pattern_optab;
534 enum machine_mode xmode0, xmode1 = 0, wxmode = 0;
537 rtx xop0, xop1, wxop;
538 int nops = TREE_OPERAND_LENGTH (exp);
540 oprnd0 = TREE_OPERAND (exp, 0);
541 tmode0 = TYPE_MODE (TREE_TYPE (oprnd0));
542 widen_pattern_optab =
543 optab_for_tree_code (TREE_CODE (exp), TREE_TYPE (oprnd0));
544 icode = (int) optab_handler (widen_pattern_optab, tmode0)->insn_code;
545 gcc_assert (icode != CODE_FOR_nothing);
546 xmode0 = insn_data[icode].operand[1].mode;
550 oprnd1 = TREE_OPERAND (exp, 1);
551 tmode1 = TYPE_MODE (TREE_TYPE (oprnd1));
552 xmode1 = insn_data[icode].operand[2].mode;
555 /* The last operand is of a wider mode than the rest of the operands. */
563 gcc_assert (tmode1 == tmode0);
565 oprnd2 = TREE_OPERAND (exp, 2);
566 wmode = TYPE_MODE (TREE_TYPE (oprnd2));
567 wxmode = insn_data[icode].operand[3].mode;
571 wmode = wxmode = insn_data[icode].operand[0].mode;
574 || ! (*insn_data[icode].operand[0].predicate) (target, wmode))
575 temp = gen_reg_rtx (wmode);
583 /* In case the insn wants input operands in modes different from
584 those of the actual operands, convert the operands. It would
585 seem that we don't need to convert CONST_INTs, but we do, so
586 that they're properly zero-extended, sign-extended or truncated
589 if (GET_MODE (op0) != xmode0 && xmode0 != VOIDmode)
590 xop0 = convert_modes (xmode0,
591 GET_MODE (op0) != VOIDmode
597 if (GET_MODE (op1) != xmode1 && xmode1 != VOIDmode)
598 xop1 = convert_modes (xmode1,
599 GET_MODE (op1) != VOIDmode
605 if (GET_MODE (wide_op) != wxmode && wxmode != VOIDmode)
606 wxop = convert_modes (wxmode,
607 GET_MODE (wide_op) != VOIDmode
612 /* Now, if insn's predicates don't allow our operands, put them into
615 if (! (*insn_data[icode].operand[1].predicate) (xop0, xmode0)
616 && xmode0 != VOIDmode)
617 xop0 = copy_to_mode_reg (xmode0, xop0);
621 if (! (*insn_data[icode].operand[2].predicate) (xop1, xmode1)
622 && xmode1 != VOIDmode)
623 xop1 = copy_to_mode_reg (xmode1, xop1);
627 if (! (*insn_data[icode].operand[3].predicate) (wxop, wxmode)
628 && wxmode != VOIDmode)
629 wxop = copy_to_mode_reg (wxmode, wxop);
631 pat = GEN_FCN (icode) (temp, xop0, xop1, wxop);
634 pat = GEN_FCN (icode) (temp, xop0, xop1);
640 if (! (*insn_data[icode].operand[2].predicate) (wxop, wxmode)
641 && wxmode != VOIDmode)
642 wxop = copy_to_mode_reg (wxmode, wxop);
644 pat = GEN_FCN (icode) (temp, xop0, wxop);
647 pat = GEN_FCN (icode) (temp, xop0);
654 /* Generate code to perform an operation specified by TERNARY_OPTAB
655 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
657 UNSIGNEDP is for the case where we have to widen the operands
658 to perform the operation. It says to use zero-extension.
660 If TARGET is nonzero, the value
661 is generated there, if it is convenient to do so.
662 In all cases an rtx is returned for the locus of the value;
663 this may or may not be TARGET. */
666 expand_ternary_op (enum machine_mode mode, optab ternary_optab, rtx op0,
667 rtx op1, rtx op2, rtx target, int unsignedp)
669 int icode = (int) optab_handler (ternary_optab, mode)->insn_code;
670 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
671 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
672 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
675 rtx xop0 = op0, xop1 = op1, xop2 = op2;
677 gcc_assert (optab_handler (ternary_optab, mode)->insn_code
678 != CODE_FOR_nothing);
680 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
681 temp = gen_reg_rtx (mode);
685 /* In case the insn wants input operands in modes different from
686 those of the actual operands, convert the operands. It would
687 seem that we don't need to convert CONST_INTs, but we do, so
688 that they're properly zero-extended, sign-extended or truncated
691 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
692 xop0 = convert_modes (mode0,
693 GET_MODE (op0) != VOIDmode
698 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
699 xop1 = convert_modes (mode1,
700 GET_MODE (op1) != VOIDmode
705 if (GET_MODE (op2) != mode2 && mode2 != VOIDmode)
706 xop2 = convert_modes (mode2,
707 GET_MODE (op2) != VOIDmode
712 /* Now, if insn's predicates don't allow our operands, put them into
715 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
716 && mode0 != VOIDmode)
717 xop0 = copy_to_mode_reg (mode0, xop0);
719 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
720 && mode1 != VOIDmode)
721 xop1 = copy_to_mode_reg (mode1, xop1);
723 if (!insn_data[icode].operand[3].predicate (xop2, mode2)
724 && mode2 != VOIDmode)
725 xop2 = copy_to_mode_reg (mode2, xop2);
727 pat = GEN_FCN (icode) (temp, xop0, xop1, xop2);
734 /* Like expand_binop, but return a constant rtx if the result can be
735 calculated at compile time. The arguments and return value are
736 otherwise the same as for expand_binop. */
739 simplify_expand_binop (enum machine_mode mode, optab binoptab,
740 rtx op0, rtx op1, rtx target, int unsignedp,
741 enum optab_methods methods)
743 if (CONSTANT_P (op0) && CONSTANT_P (op1))
745 rtx x = simplify_binary_operation (binoptab->code, mode, op0, op1);
751 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
754 /* Like simplify_expand_binop, but always put the result in TARGET.
755 Return true if the expansion succeeded. */
758 force_expand_binop (enum machine_mode mode, optab binoptab,
759 rtx op0, rtx op1, rtx target, int unsignedp,
760 enum optab_methods methods)
762 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
763 target, unsignedp, methods);
767 emit_move_insn (target, x);
771 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
774 expand_vec_shift_expr (tree vec_shift_expr, rtx target)
776 enum insn_code icode;
777 rtx rtx_op1, rtx_op2;
778 enum machine_mode mode1;
779 enum machine_mode mode2;
780 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_shift_expr));
781 tree vec_oprnd = TREE_OPERAND (vec_shift_expr, 0);
782 tree shift_oprnd = TREE_OPERAND (vec_shift_expr, 1);
786 switch (TREE_CODE (vec_shift_expr))
788 case VEC_RSHIFT_EXPR:
789 shift_optab = vec_shr_optab;
791 case VEC_LSHIFT_EXPR:
792 shift_optab = vec_shl_optab;
798 icode = (int) optab_handler (shift_optab, mode)->insn_code;
799 gcc_assert (icode != CODE_FOR_nothing);
801 mode1 = insn_data[icode].operand[1].mode;
802 mode2 = insn_data[icode].operand[2].mode;
804 rtx_op1 = expand_normal (vec_oprnd);
805 if (!(*insn_data[icode].operand[1].predicate) (rtx_op1, mode1)
806 && mode1 != VOIDmode)
807 rtx_op1 = force_reg (mode1, rtx_op1);
809 rtx_op2 = expand_normal (shift_oprnd);
810 if (!(*insn_data[icode].operand[2].predicate) (rtx_op2, mode2)
811 && mode2 != VOIDmode)
812 rtx_op2 = force_reg (mode2, rtx_op2);
815 || ! (*insn_data[icode].operand[0].predicate) (target, mode))
816 target = gen_reg_rtx (mode);
818 /* Emit instruction */
819 pat = GEN_FCN (icode) (target, rtx_op1, rtx_op2);
826 /* This subroutine of expand_doubleword_shift handles the cases in which
827 the effective shift value is >= BITS_PER_WORD. The arguments and return
828 value are the same as for the parent routine, except that SUPERWORD_OP1
829 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
830 INTO_TARGET may be null if the caller has decided to calculate it. */
833 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
834 rtx outof_target, rtx into_target,
835 int unsignedp, enum optab_methods methods)
837 if (into_target != 0)
838 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
839 into_target, unsignedp, methods))
842 if (outof_target != 0)
844 /* For a signed right shift, we must fill OUTOF_TARGET with copies
845 of the sign bit, otherwise we must fill it with zeros. */
846 if (binoptab != ashr_optab)
847 emit_move_insn (outof_target, CONST0_RTX (word_mode));
849 if (!force_expand_binop (word_mode, binoptab,
850 outof_input, GEN_INT (BITS_PER_WORD - 1),
851 outof_target, unsignedp, methods))
857 /* This subroutine of expand_doubleword_shift handles the cases in which
858 the effective shift value is < BITS_PER_WORD. The arguments and return
859 value are the same as for the parent routine. */
862 expand_subword_shift (enum machine_mode op1_mode, optab binoptab,
863 rtx outof_input, rtx into_input, rtx op1,
864 rtx outof_target, rtx into_target,
865 int unsignedp, enum optab_methods methods,
866 unsigned HOST_WIDE_INT shift_mask)
868 optab reverse_unsigned_shift, unsigned_shift;
871 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
872 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
874 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
875 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
876 the opposite direction to BINOPTAB. */
877 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
879 carries = outof_input;
880 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
881 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
886 /* We must avoid shifting by BITS_PER_WORD bits since that is either
887 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
888 has unknown behavior. Do a single shift first, then shift by the
889 remainder. It's OK to use ~OP1 as the remainder if shift counts
890 are truncated to the mode size. */
891 carries = expand_binop (word_mode, reverse_unsigned_shift,
892 outof_input, const1_rtx, 0, unsignedp, methods);
893 if (shift_mask == BITS_PER_WORD - 1)
895 tmp = immed_double_const (-1, -1, op1_mode);
896 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
901 tmp = immed_double_const (BITS_PER_WORD - 1, 0, op1_mode);
902 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
906 if (tmp == 0 || carries == 0)
908 carries = expand_binop (word_mode, reverse_unsigned_shift,
909 carries, tmp, 0, unsignedp, methods);
913 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
914 so the result can go directly into INTO_TARGET if convenient. */
915 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
916 into_target, unsignedp, methods);
920 /* Now OR in the bits carried over from OUTOF_INPUT. */
921 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
922 into_target, unsignedp, methods))
925 /* Use a standard word_mode shift for the out-of half. */
926 if (outof_target != 0)
927 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
928 outof_target, unsignedp, methods))
935 #ifdef HAVE_conditional_move
936 /* Try implementing expand_doubleword_shift using conditional moves.
937 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
938 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
939 are the shift counts to use in the former and latter case. All other
940 arguments are the same as the parent routine. */
943 expand_doubleword_shift_condmove (enum machine_mode op1_mode, optab binoptab,
944 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
945 rtx outof_input, rtx into_input,
946 rtx subword_op1, rtx superword_op1,
947 rtx outof_target, rtx into_target,
948 int unsignedp, enum optab_methods methods,
949 unsigned HOST_WIDE_INT shift_mask)
951 rtx outof_superword, into_superword;
953 /* Put the superword version of the output into OUTOF_SUPERWORD and
955 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
956 if (outof_target != 0 && subword_op1 == superword_op1)
958 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
959 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
960 into_superword = outof_target;
961 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
962 outof_superword, 0, unsignedp, methods))
967 into_superword = gen_reg_rtx (word_mode);
968 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
969 outof_superword, into_superword,
974 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
975 if (!expand_subword_shift (op1_mode, binoptab,
976 outof_input, into_input, subword_op1,
977 outof_target, into_target,
978 unsignedp, methods, shift_mask))
981 /* Select between them. Do the INTO half first because INTO_SUPERWORD
982 might be the current value of OUTOF_TARGET. */
983 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
984 into_target, into_superword, word_mode, false))
987 if (outof_target != 0)
988 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
989 outof_target, outof_superword,
997 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
998 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
999 input operand; the shift moves bits in the direction OUTOF_INPUT->
1000 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
1001 of the target. OP1 is the shift count and OP1_MODE is its mode.
1002 If OP1 is constant, it will have been truncated as appropriate
1003 and is known to be nonzero.
1005 If SHIFT_MASK is zero, the result of word shifts is undefined when the
1006 shift count is outside the range [0, BITS_PER_WORD). This routine must
1007 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
1009 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
1010 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
1011 fill with zeros or sign bits as appropriate.
1013 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
1014 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
1015 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
1016 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
1019 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
1020 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
1021 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
1022 function wants to calculate it itself.
1024 Return true if the shift could be successfully synthesized. */
1027 expand_doubleword_shift (enum machine_mode op1_mode, optab binoptab,
1028 rtx outof_input, rtx into_input, rtx op1,
1029 rtx outof_target, rtx into_target,
1030 int unsignedp, enum optab_methods methods,
1031 unsigned HOST_WIDE_INT shift_mask)
1033 rtx superword_op1, tmp, cmp1, cmp2;
1034 rtx subword_label, done_label;
1035 enum rtx_code cmp_code;
1037 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
1038 fill the result with sign or zero bits as appropriate. If so, the value
1039 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
1040 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
1041 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
1043 This isn't worthwhile for constant shifts since the optimizers will
1044 cope better with in-range shift counts. */
1045 if (shift_mask >= BITS_PER_WORD
1046 && outof_target != 0
1047 && !CONSTANT_P (op1))
1049 if (!expand_doubleword_shift (op1_mode, binoptab,
1050 outof_input, into_input, op1,
1052 unsignedp, methods, shift_mask))
1054 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
1055 outof_target, unsignedp, methods))
1060 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
1061 is true when the effective shift value is less than BITS_PER_WORD.
1062 Set SUPERWORD_OP1 to the shift count that should be used to shift
1063 OUTOF_INPUT into INTO_TARGET when the condition is false. */
1064 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
1065 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
1067 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
1068 is a subword shift count. */
1069 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
1071 cmp2 = CONST0_RTX (op1_mode);
1073 superword_op1 = op1;
1077 /* Set CMP1 to OP1 - BITS_PER_WORD. */
1078 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
1080 cmp2 = CONST0_RTX (op1_mode);
1082 superword_op1 = cmp1;
1087 /* If we can compute the condition at compile time, pick the
1088 appropriate subroutine. */
1089 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
1090 if (tmp != 0 && GET_CODE (tmp) == CONST_INT)
1092 if (tmp == const0_rtx)
1093 return expand_superword_shift (binoptab, outof_input, superword_op1,
1094 outof_target, into_target,
1095 unsignedp, methods);
1097 return expand_subword_shift (op1_mode, binoptab,
1098 outof_input, into_input, op1,
1099 outof_target, into_target,
1100 unsignedp, methods, shift_mask);
1103 #ifdef HAVE_conditional_move
1104 /* Try using conditional moves to generate straight-line code. */
1106 rtx start = get_last_insn ();
1107 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
1108 cmp_code, cmp1, cmp2,
1109 outof_input, into_input,
1111 outof_target, into_target,
1112 unsignedp, methods, shift_mask))
1114 delete_insns_since (start);
1118 /* As a last resort, use branches to select the correct alternative. */
1119 subword_label = gen_label_rtx ();
1120 done_label = gen_label_rtx ();
1123 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
1124 0, 0, subword_label);
1127 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
1128 outof_target, into_target,
1129 unsignedp, methods))
1132 emit_jump_insn (gen_jump (done_label));
1134 emit_label (subword_label);
1136 if (!expand_subword_shift (op1_mode, binoptab,
1137 outof_input, into_input, op1,
1138 outof_target, into_target,
1139 unsignedp, methods, shift_mask))
1142 emit_label (done_label);
1146 /* Subroutine of expand_binop. Perform a double word multiplication of
1147 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
1148 as the target's word_mode. This function return NULL_RTX if anything
1149 goes wrong, in which case it may have already emitted instructions
1150 which need to be deleted.
1152 If we want to multiply two two-word values and have normal and widening
1153 multiplies of single-word values, we can do this with three smaller
1156 The multiplication proceeds as follows:
1157 _______________________
1158 [__op0_high_|__op0_low__]
1159 _______________________
1160 * [__op1_high_|__op1_low__]
1161 _______________________________________________
1162 _______________________
1163 (1) [__op0_low__*__op1_low__]
1164 _______________________
1165 (2a) [__op0_low__*__op1_high_]
1166 _______________________
1167 (2b) [__op0_high_*__op1_low__]
1168 _______________________
1169 (3) [__op0_high_*__op1_high_]
1172 This gives a 4-word result. Since we are only interested in the
1173 lower 2 words, partial result (3) and the upper words of (2a) and
1174 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1175 calculated using non-widening multiplication.
1177 (1), however, needs to be calculated with an unsigned widening
1178 multiplication. If this operation is not directly supported we
1179 try using a signed widening multiplication and adjust the result.
1180 This adjustment works as follows:
1182 If both operands are positive then no adjustment is needed.
1184 If the operands have different signs, for example op0_low < 0 and
1185 op1_low >= 0, the instruction treats the most significant bit of
1186 op0_low as a sign bit instead of a bit with significance
1187 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1188 with 2**BITS_PER_WORD - op0_low, and two's complements the
1189 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1192 Similarly, if both operands are negative, we need to add
1193 (op0_low + op1_low) * 2**BITS_PER_WORD.
1195 We use a trick to adjust quickly. We logically shift op0_low right
1196 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1197 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1198 logical shift exists, we do an arithmetic right shift and subtract
1202 expand_doubleword_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
1203 bool umulp, enum optab_methods methods)
1205 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
1206 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
1207 rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
1208 rtx product, adjust, product_high, temp;
1210 rtx op0_high = operand_subword_force (op0, high, mode);
1211 rtx op0_low = operand_subword_force (op0, low, mode);
1212 rtx op1_high = operand_subword_force (op1, high, mode);
1213 rtx op1_low = operand_subword_force (op1, low, mode);
1215 /* If we're using an unsigned multiply to directly compute the product
1216 of the low-order words of the operands and perform any required
1217 adjustments of the operands, we begin by trying two more multiplications
1218 and then computing the appropriate sum.
1220 We have checked above that the required addition is provided.
1221 Full-word addition will normally always succeed, especially if
1222 it is provided at all, so we don't worry about its failure. The
1223 multiplication may well fail, however, so we do handle that. */
1227 /* ??? This could be done with emit_store_flag where available. */
1228 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
1229 NULL_RTX, 1, methods);
1231 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
1232 NULL_RTX, 0, OPTAB_DIRECT);
1235 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
1236 NULL_RTX, 0, methods);
1239 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
1240 NULL_RTX, 0, OPTAB_DIRECT);
1247 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
1248 NULL_RTX, 0, OPTAB_DIRECT);
1252 /* OP0_HIGH should now be dead. */
1256 /* ??? This could be done with emit_store_flag where available. */
1257 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
1258 NULL_RTX, 1, methods);
1260 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
1261 NULL_RTX, 0, OPTAB_DIRECT);
1264 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
1265 NULL_RTX, 0, methods);
1268 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
1269 NULL_RTX, 0, OPTAB_DIRECT);
1276 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
1277 NULL_RTX, 0, OPTAB_DIRECT);
1281 /* OP1_HIGH should now be dead. */
1283 adjust = expand_binop (word_mode, add_optab, adjust, temp,
1284 adjust, 0, OPTAB_DIRECT);
1286 if (target && !REG_P (target))
1290 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
1291 target, 1, OPTAB_DIRECT);
1293 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
1294 target, 1, OPTAB_DIRECT);
1299 product_high = operand_subword (product, high, 1, mode);
1300 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
1301 REG_P (product_high) ? product_high : adjust,
1303 emit_move_insn (product_high, adjust);
1307 /* Wrapper around expand_binop which takes an rtx code to specify
1308 the operation to perform, not an optab pointer. All other
1309 arguments are the same. */
1311 expand_simple_binop (enum machine_mode mode, enum rtx_code code, rtx op0,
1312 rtx op1, rtx target, int unsignedp,
1313 enum optab_methods methods)
1315 optab binop = code_to_optab[(int) code];
1318 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
1321 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1322 binop. Order them according to commutative_operand_precedence and, if
1323 possible, try to put TARGET or a pseudo first. */
1325 swap_commutative_operands_with_target (rtx target, rtx op0, rtx op1)
1327 int op0_prec = commutative_operand_precedence (op0);
1328 int op1_prec = commutative_operand_precedence (op1);
1330 if (op0_prec < op1_prec)
1333 if (op0_prec > op1_prec)
1336 /* With equal precedence, both orders are ok, but it is better if the
1337 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
1338 if (target == 0 || REG_P (target))
1339 return (REG_P (op1) && !REG_P (op0)) || target == op1;
1341 return rtx_equal_p (op1, target);
1344 /* Return true if BINOPTAB implements a shift operation. */
1347 shift_optab_p (optab binoptab)
1349 switch (binoptab->code)
1365 /* Return true if BINOPTAB implements a commutative binary operation. */
1368 commutative_optab_p (optab binoptab)
1370 return (GET_RTX_CLASS (binoptab->code) == RTX_COMM_ARITH
1371 || binoptab == smul_widen_optab
1372 || binoptab == umul_widen_optab
1373 || binoptab == smul_highpart_optab
1374 || binoptab == umul_highpart_optab);
1377 /* X is to be used in mode MODE as an operand to BINOPTAB. If we're
1378 optimizing, and if the operand is a constant that costs more than
1379 1 instruction, force the constant into a register and return that
1380 register. Return X otherwise. UNSIGNEDP says whether X is unsigned. */
1383 avoid_expensive_constant (enum machine_mode mode, optab binoptab,
1384 rtx x, bool unsignedp)
1386 if (mode != VOIDmode
1389 && rtx_cost (x, binoptab->code) > COSTS_N_INSNS (1))
1391 if (GET_CODE (x) == CONST_INT)
1393 HOST_WIDE_INT intval = trunc_int_for_mode (INTVAL (x), mode);
1394 if (intval != INTVAL (x))
1395 x = GEN_INT (intval);
1398 x = convert_modes (mode, VOIDmode, x, unsignedp);
1399 x = force_reg (mode, x);
1404 /* Helper function for expand_binop: handle the case where there
1405 is an insn that directly implements the indicated operation.
1406 Returns null if this is not possible. */
1408 expand_binop_directly (enum machine_mode mode, optab binoptab,
1410 rtx target, int unsignedp, enum optab_methods methods,
1413 int icode = (int) optab_handler (binoptab, mode)->insn_code;
1414 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
1415 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
1416 enum machine_mode tmp_mode;
1419 rtx xop0 = op0, xop1 = op1;
1426 temp = gen_reg_rtx (mode);
1428 /* If it is a commutative operator and the modes would match
1429 if we would swap the operands, we can save the conversions. */
1430 commutative_p = commutative_optab_p (binoptab);
1432 && GET_MODE (xop0) != mode0 && GET_MODE (xop1) != mode1
1433 && GET_MODE (xop0) == mode1 && GET_MODE (xop1) == mode1)
1440 /* If we are optimizing, force expensive constants into a register. */
1441 xop0 = avoid_expensive_constant (mode0, binoptab, xop0, unsignedp);
1442 if (!shift_optab_p (binoptab))
1443 xop1 = avoid_expensive_constant (mode1, binoptab, xop1, unsignedp);
1445 /* In case the insn wants input operands in modes different from
1446 those of the actual operands, convert the operands. It would
1447 seem that we don't need to convert CONST_INTs, but we do, so
1448 that they're properly zero-extended, sign-extended or truncated
1451 if (GET_MODE (xop0) != mode0 && mode0 != VOIDmode)
1452 xop0 = convert_modes (mode0,
1453 GET_MODE (xop0) != VOIDmode
1458 if (GET_MODE (xop1) != mode1 && mode1 != VOIDmode)
1459 xop1 = convert_modes (mode1,
1460 GET_MODE (xop1) != VOIDmode
1465 /* If operation is commutative,
1466 try to make the first operand a register.
1467 Even better, try to make it the same as the target.
1468 Also try to make the last operand a constant. */
1470 && swap_commutative_operands_with_target (target, xop0, xop1))
1477 /* Now, if insn's predicates don't allow our operands, put them into
1480 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
1481 && mode0 != VOIDmode)
1482 xop0 = copy_to_mode_reg (mode0, xop0);
1484 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
1485 && mode1 != VOIDmode)
1486 xop1 = copy_to_mode_reg (mode1, xop1);
1488 if (binoptab == vec_pack_trunc_optab
1489 || binoptab == vec_pack_usat_optab
1490 || binoptab == vec_pack_ssat_optab
1491 || binoptab == vec_pack_ufix_trunc_optab
1492 || binoptab == vec_pack_sfix_trunc_optab)
1494 /* The mode of the result is different then the mode of the
1496 tmp_mode = insn_data[icode].operand[0].mode;
1497 if (GET_MODE_NUNITS (tmp_mode) != 2 * GET_MODE_NUNITS (mode))
1503 if (!insn_data[icode].operand[0].predicate (temp, tmp_mode))
1504 temp = gen_reg_rtx (tmp_mode);
1506 pat = GEN_FCN (icode) (temp, xop0, xop1);
1509 /* If PAT is composed of more than one insn, try to add an appropriate
1510 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1511 operand, call expand_binop again, this time without a target. */
1512 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1513 && ! add_equal_note (pat, temp, binoptab->code, xop0, xop1))
1515 delete_insns_since (last);
1516 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1517 unsignedp, methods);
1524 delete_insns_since (last);
1528 /* Generate code to perform an operation specified by BINOPTAB
1529 on operands OP0 and OP1, with result having machine-mode MODE.
1531 UNSIGNEDP is for the case where we have to widen the operands
1532 to perform the operation. It says to use zero-extension.
1534 If TARGET is nonzero, the value
1535 is generated there, if it is convenient to do so.
1536 In all cases an rtx is returned for the locus of the value;
1537 this may or may not be TARGET. */
1540 expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1,
1541 rtx target, int unsignedp, enum optab_methods methods)
1543 enum optab_methods next_methods
1544 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1545 ? OPTAB_WIDEN : methods);
1546 enum mode_class class;
1547 enum machine_mode wider_mode;
1550 rtx entry_last = get_last_insn ();
1553 class = GET_MODE_CLASS (mode);
1555 /* If subtracting an integer constant, convert this into an addition of
1556 the negated constant. */
1558 if (binoptab == sub_optab && GET_CODE (op1) == CONST_INT)
1560 op1 = negate_rtx (mode, op1);
1561 binoptab = add_optab;
1564 /* Record where to delete back to if we backtrack. */
1565 last = get_last_insn ();
1567 /* If we can do it with a three-operand insn, do so. */
1569 if (methods != OPTAB_MUST_WIDEN
1570 && optab_handler (binoptab, mode)->insn_code != CODE_FOR_nothing)
1572 temp = expand_binop_directly (mode, binoptab, op0, op1, target,
1573 unsignedp, methods, last);
1578 /* If we were trying to rotate, and that didn't work, try rotating
1579 the other direction before falling back to shifts and bitwise-or. */
1580 if (((binoptab == rotl_optab
1581 && optab_handler (rotr_optab, mode)->insn_code != CODE_FOR_nothing)
1582 || (binoptab == rotr_optab
1583 && optab_handler (rotl_optab, mode)->insn_code != CODE_FOR_nothing))
1584 && class == MODE_INT)
1586 optab otheroptab = (binoptab == rotl_optab ? rotr_optab : rotl_optab);
1588 unsigned int bits = GET_MODE_BITSIZE (mode);
1590 if (GET_CODE (op1) == CONST_INT)
1591 newop1 = GEN_INT (bits - INTVAL (op1));
1592 else if (targetm.shift_truncation_mask (mode) == bits - 1)
1593 newop1 = negate_rtx (mode, op1);
1595 newop1 = expand_binop (mode, sub_optab,
1596 GEN_INT (bits), op1,
1597 NULL_RTX, unsignedp, OPTAB_DIRECT);
1599 temp = expand_binop_directly (mode, otheroptab, op0, newop1,
1600 target, unsignedp, methods, last);
1605 /* If this is a multiply, see if we can do a widening operation that
1606 takes operands of this mode and makes a wider mode. */
1608 if (binoptab == smul_optab
1609 && GET_MODE_WIDER_MODE (mode) != VOIDmode
1610 && ((optab_handler ((unsignedp ? umul_widen_optab : smul_widen_optab),
1611 GET_MODE_WIDER_MODE (mode))->insn_code)
1612 != CODE_FOR_nothing))
1614 temp = expand_binop (GET_MODE_WIDER_MODE (mode),
1615 unsignedp ? umul_widen_optab : smul_widen_optab,
1616 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1620 if (GET_MODE_CLASS (mode) == MODE_INT
1621 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1622 GET_MODE_BITSIZE (GET_MODE (temp))))
1623 return gen_lowpart (mode, temp);
1625 return convert_to_mode (mode, temp, unsignedp);
1629 /* Look for a wider mode of the same class for which we think we
1630 can open-code the operation. Check for a widening multiply at the
1631 wider mode as well. */
1633 if (CLASS_HAS_WIDER_MODES_P (class)
1634 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1635 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1636 wider_mode != VOIDmode;
1637 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1639 if (optab_handler (binoptab, wider_mode)->insn_code != CODE_FOR_nothing
1640 || (binoptab == smul_optab
1641 && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
1642 && ((optab_handler ((unsignedp ? umul_widen_optab
1643 : smul_widen_optab),
1644 GET_MODE_WIDER_MODE (wider_mode))->insn_code)
1645 != CODE_FOR_nothing)))
1647 rtx xop0 = op0, xop1 = op1;
1650 /* For certain integer operations, we need not actually extend
1651 the narrow operands, as long as we will truncate
1652 the results to the same narrowness. */
1654 if ((binoptab == ior_optab || binoptab == and_optab
1655 || binoptab == xor_optab
1656 || binoptab == add_optab || binoptab == sub_optab
1657 || binoptab == smul_optab || binoptab == ashl_optab)
1658 && class == MODE_INT)
1661 xop0 = avoid_expensive_constant (mode, binoptab,
1663 if (binoptab != ashl_optab)
1664 xop1 = avoid_expensive_constant (mode, binoptab,
1668 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1670 /* The second operand of a shift must always be extended. */
1671 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1672 no_extend && binoptab != ashl_optab);
1674 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1675 unsignedp, OPTAB_DIRECT);
1678 if (class != MODE_INT
1679 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1680 GET_MODE_BITSIZE (wider_mode)))
1683 target = gen_reg_rtx (mode);
1684 convert_move (target, temp, 0);
1688 return gen_lowpart (mode, temp);
1691 delete_insns_since (last);
1695 /* If operation is commutative,
1696 try to make the first operand a register.
1697 Even better, try to make it the same as the target.
1698 Also try to make the last operand a constant. */
1699 if (commutative_optab_p (binoptab)
1700 && swap_commutative_operands_with_target (target, op0, op1))
1707 /* These can be done a word at a time. */
1708 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1709 && class == MODE_INT
1710 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
1711 && optab_handler (binoptab, word_mode)->insn_code != CODE_FOR_nothing)
1717 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1718 won't be accurate, so use a new target. */
1719 if (target == 0 || target == op0 || target == op1)
1720 target = gen_reg_rtx (mode);
1724 /* Do the actual arithmetic. */
1725 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
1727 rtx target_piece = operand_subword (target, i, 1, mode);
1728 rtx x = expand_binop (word_mode, binoptab,
1729 operand_subword_force (op0, i, mode),
1730 operand_subword_force (op1, i, mode),
1731 target_piece, unsignedp, next_methods);
1736 if (target_piece != x)
1737 emit_move_insn (target_piece, x);
1740 insns = get_insns ();
1743 if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
1745 if (binoptab->code != UNKNOWN)
1747 = gen_rtx_fmt_ee (binoptab->code, mode,
1748 copy_rtx (op0), copy_rtx (op1));
1757 /* Synthesize double word shifts from single word shifts. */
1758 if ((binoptab == lshr_optab || binoptab == ashl_optab
1759 || binoptab == ashr_optab)
1760 && class == MODE_INT
1761 && (GET_CODE (op1) == CONST_INT || !optimize_size)
1762 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1763 && optab_handler (binoptab, word_mode)->insn_code != CODE_FOR_nothing
1764 && optab_handler (ashl_optab, word_mode)->insn_code != CODE_FOR_nothing
1765 && optab_handler (lshr_optab, word_mode)->insn_code != CODE_FOR_nothing)
1767 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1768 enum machine_mode op1_mode;
1770 double_shift_mask = targetm.shift_truncation_mask (mode);
1771 shift_mask = targetm.shift_truncation_mask (word_mode);
1772 op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode;
1774 /* Apply the truncation to constant shifts. */
1775 if (double_shift_mask > 0 && GET_CODE (op1) == CONST_INT)
1776 op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
1778 if (op1 == CONST0_RTX (op1_mode))
1781 /* Make sure that this is a combination that expand_doubleword_shift
1782 can handle. See the comments there for details. */
1783 if (double_shift_mask == 0
1784 || (shift_mask == BITS_PER_WORD - 1
1785 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1788 rtx into_target, outof_target;
1789 rtx into_input, outof_input;
1790 int left_shift, outof_word;
1792 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1793 won't be accurate, so use a new target. */
1794 if (target == 0 || target == op0 || target == op1)
1795 target = gen_reg_rtx (mode);
1799 /* OUTOF_* is the word we are shifting bits away from, and
1800 INTO_* is the word that we are shifting bits towards, thus
1801 they differ depending on the direction of the shift and
1802 WORDS_BIG_ENDIAN. */
1804 left_shift = binoptab == ashl_optab;
1805 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1807 outof_target = operand_subword (target, outof_word, 1, mode);
1808 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1810 outof_input = operand_subword_force (op0, outof_word, mode);
1811 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1813 if (expand_doubleword_shift (op1_mode, binoptab,
1814 outof_input, into_input, op1,
1815 outof_target, into_target,
1816 unsignedp, next_methods, shift_mask))
1818 insns = get_insns ();
1828 /* Synthesize double word rotates from single word shifts. */
1829 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1830 && class == MODE_INT
1831 && GET_CODE (op1) == CONST_INT
1832 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1833 && optab_handler (ashl_optab, word_mode)->insn_code != CODE_FOR_nothing
1834 && optab_handler (lshr_optab, word_mode)->insn_code != CODE_FOR_nothing)
1837 rtx into_target, outof_target;
1838 rtx into_input, outof_input;
1840 int shift_count, left_shift, outof_word;
1842 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1843 won't be accurate, so use a new target. Do this also if target is not
1844 a REG, first because having a register instead may open optimization
1845 opportunities, and second because if target and op0 happen to be MEMs
1846 designating the same location, we would risk clobbering it too early
1847 in the code sequence we generate below. */
1848 if (target == 0 || target == op0 || target == op1 || ! REG_P (target))
1849 target = gen_reg_rtx (mode);
1853 shift_count = INTVAL (op1);
1855 /* OUTOF_* is the word we are shifting bits away from, and
1856 INTO_* is the word that we are shifting bits towards, thus
1857 they differ depending on the direction of the shift and
1858 WORDS_BIG_ENDIAN. */
1860 left_shift = (binoptab == rotl_optab);
1861 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1863 outof_target = operand_subword (target, outof_word, 1, mode);
1864 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1866 outof_input = operand_subword_force (op0, outof_word, mode);
1867 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1869 if (shift_count == BITS_PER_WORD)
1871 /* This is just a word swap. */
1872 emit_move_insn (outof_target, into_input);
1873 emit_move_insn (into_target, outof_input);
1878 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1879 rtx first_shift_count, second_shift_count;
1880 optab reverse_unsigned_shift, unsigned_shift;
1882 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1883 ? lshr_optab : ashl_optab);
1885 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1886 ? ashl_optab : lshr_optab);
1888 if (shift_count > BITS_PER_WORD)
1890 first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1891 second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1895 first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1896 second_shift_count = GEN_INT (shift_count);
1899 into_temp1 = expand_binop (word_mode, unsigned_shift,
1900 outof_input, first_shift_count,
1901 NULL_RTX, unsignedp, next_methods);
1902 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1903 into_input, second_shift_count,
1904 NULL_RTX, unsignedp, next_methods);
1906 if (into_temp1 != 0 && into_temp2 != 0)
1907 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1908 into_target, unsignedp, next_methods);
1912 if (inter != 0 && inter != into_target)
1913 emit_move_insn (into_target, inter);
1915 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1916 into_input, first_shift_count,
1917 NULL_RTX, unsignedp, next_methods);
1918 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1919 outof_input, second_shift_count,
1920 NULL_RTX, unsignedp, next_methods);
1922 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1923 inter = expand_binop (word_mode, ior_optab,
1924 outof_temp1, outof_temp2,
1925 outof_target, unsignedp, next_methods);
1927 if (inter != 0 && inter != outof_target)
1928 emit_move_insn (outof_target, inter);
1931 insns = get_insns ();
1941 /* These can be done a word at a time by propagating carries. */
1942 if ((binoptab == add_optab || binoptab == sub_optab)
1943 && class == MODE_INT
1944 && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
1945 && optab_handler (binoptab, word_mode)->insn_code != CODE_FOR_nothing)
1948 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1949 const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
1950 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1951 rtx xop0, xop1, xtarget;
1953 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1954 value is one of those, use it. Otherwise, use 1 since it is the
1955 one easiest to get. */
1956 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1957 int normalizep = STORE_FLAG_VALUE;
1962 /* Prepare the operands. */
1963 xop0 = force_reg (mode, op0);
1964 xop1 = force_reg (mode, op1);
1966 xtarget = gen_reg_rtx (mode);
1968 if (target == 0 || !REG_P (target))
1971 /* Indicate for flow that the entire target reg is being set. */
1973 emit_insn (gen_rtx_CLOBBER (VOIDmode, xtarget));
1975 /* Do the actual arithmetic. */
1976 for (i = 0; i < nwords; i++)
1978 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1979 rtx target_piece = operand_subword (xtarget, index, 1, mode);
1980 rtx op0_piece = operand_subword_force (xop0, index, mode);
1981 rtx op1_piece = operand_subword_force (xop1, index, mode);
1984 /* Main add/subtract of the input operands. */
1985 x = expand_binop (word_mode, binoptab,
1986 op0_piece, op1_piece,
1987 target_piece, unsignedp, next_methods);
1993 /* Store carry from main add/subtract. */
1994 carry_out = gen_reg_rtx (word_mode);
1995 carry_out = emit_store_flag_force (carry_out,
1996 (binoptab == add_optab
1999 word_mode, 1, normalizep);
2006 /* Add/subtract previous carry to main result. */
2007 newx = expand_binop (word_mode,
2008 normalizep == 1 ? binoptab : otheroptab,
2010 NULL_RTX, 1, next_methods);
2014 /* Get out carry from adding/subtracting carry in. */
2015 rtx carry_tmp = gen_reg_rtx (word_mode);
2016 carry_tmp = emit_store_flag_force (carry_tmp,
2017 (binoptab == add_optab
2020 word_mode, 1, normalizep);
2022 /* Logical-ior the two poss. carry together. */
2023 carry_out = expand_binop (word_mode, ior_optab,
2024 carry_out, carry_tmp,
2025 carry_out, 0, next_methods);
2029 emit_move_insn (target_piece, newx);
2033 if (x != target_piece)
2034 emit_move_insn (target_piece, x);
2037 carry_in = carry_out;
2040 if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
2042 if (optab_handler (mov_optab, mode)->insn_code != CODE_FOR_nothing
2043 || ! rtx_equal_p (target, xtarget))
2045 rtx temp = emit_move_insn (target, xtarget);
2047 set_unique_reg_note (temp,
2049 gen_rtx_fmt_ee (binoptab->code, mode,
2060 delete_insns_since (last);
2063 /* Attempt to synthesize double word multiplies using a sequence of word
2064 mode multiplications. We first attempt to generate a sequence using a
2065 more efficient unsigned widening multiply, and if that fails we then
2066 try using a signed widening multiply. */
2068 if (binoptab == smul_optab
2069 && class == MODE_INT
2070 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
2071 && optab_handler (smul_optab, word_mode)->insn_code != CODE_FOR_nothing
2072 && optab_handler (add_optab, word_mode)->insn_code != CODE_FOR_nothing)
2074 rtx product = NULL_RTX;
2076 if (optab_handler (umul_widen_optab, mode)->insn_code
2077 != CODE_FOR_nothing)
2079 product = expand_doubleword_mult (mode, op0, op1, target,
2082 delete_insns_since (last);
2085 if (product == NULL_RTX
2086 && optab_handler (smul_widen_optab, mode)->insn_code
2087 != CODE_FOR_nothing)
2089 product = expand_doubleword_mult (mode, op0, op1, target,
2092 delete_insns_since (last);
2095 if (product != NULL_RTX)
2097 if (optab_handler (mov_optab, mode)->insn_code != CODE_FOR_nothing)
2099 temp = emit_move_insn (target ? target : product, product);
2100 set_unique_reg_note (temp,
2102 gen_rtx_fmt_ee (MULT, mode,
2110 /* It can't be open-coded in this mode.
2111 Use a library call if one is available and caller says that's ok. */
2113 libfunc = optab_libfunc (binoptab, mode);
2115 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
2119 enum machine_mode op1_mode = mode;
2124 if (shift_optab_p (binoptab))
2126 op1_mode = targetm.libgcc_shift_count_mode ();
2127 /* Specify unsigned here,
2128 since negative shift counts are meaningless. */
2129 op1x = convert_to_mode (op1_mode, op1, 1);
2132 if (GET_MODE (op0) != VOIDmode
2133 && GET_MODE (op0) != mode)
2134 op0 = convert_to_mode (mode, op0, unsignedp);
2136 /* Pass 1 for NO_QUEUE so we don't lose any increments
2137 if the libcall is cse'd or moved. */
2138 value = emit_library_call_value (libfunc,
2139 NULL_RTX, LCT_CONST, mode, 2,
2140 op0, mode, op1x, op1_mode);
2142 insns = get_insns ();
2145 target = gen_reg_rtx (mode);
2146 emit_libcall_block (insns, target, value,
2147 gen_rtx_fmt_ee (binoptab->code, mode, op0, op1));
2152 delete_insns_since (last);
2154 /* It can't be done in this mode. Can we do it in a wider mode? */
2156 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
2157 || methods == OPTAB_MUST_WIDEN))
2159 /* Caller says, don't even try. */
2160 delete_insns_since (entry_last);
2164 /* Compute the value of METHODS to pass to recursive calls.
2165 Don't allow widening to be tried recursively. */
2167 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
2169 /* Look for a wider mode of the same class for which it appears we can do
2172 if (CLASS_HAS_WIDER_MODES_P (class))
2174 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2175 wider_mode != VOIDmode;
2176 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2178 if ((optab_handler (binoptab, wider_mode)->insn_code
2179 != CODE_FOR_nothing)
2180 || (methods == OPTAB_LIB
2181 && optab_libfunc (binoptab, wider_mode)))
2183 rtx xop0 = op0, xop1 = op1;
2186 /* For certain integer operations, we need not actually extend
2187 the narrow operands, as long as we will truncate
2188 the results to the same narrowness. */
2190 if ((binoptab == ior_optab || binoptab == and_optab
2191 || binoptab == xor_optab
2192 || binoptab == add_optab || binoptab == sub_optab
2193 || binoptab == smul_optab || binoptab == ashl_optab)
2194 && class == MODE_INT)
2197 xop0 = widen_operand (xop0, wider_mode, mode,
2198 unsignedp, no_extend);
2200 /* The second operand of a shift must always be extended. */
2201 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
2202 no_extend && binoptab != ashl_optab);
2204 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
2205 unsignedp, methods);
2208 if (class != MODE_INT
2209 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
2210 GET_MODE_BITSIZE (wider_mode)))
2213 target = gen_reg_rtx (mode);
2214 convert_move (target, temp, 0);
2218 return gen_lowpart (mode, temp);
2221 delete_insns_since (last);
2226 delete_insns_since (entry_last);
2230 /* Expand a binary operator which has both signed and unsigned forms.
2231 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2234 If we widen unsigned operands, we may use a signed wider operation instead
2235 of an unsigned wider operation, since the result would be the same. */
2238 sign_expand_binop (enum machine_mode mode, optab uoptab, optab soptab,
2239 rtx op0, rtx op1, rtx target, int unsignedp,
2240 enum optab_methods methods)
2243 optab direct_optab = unsignedp ? uoptab : soptab;
2244 struct optab wide_soptab;
2246 /* Do it without widening, if possible. */
2247 temp = expand_binop (mode, direct_optab, op0, op1, target,
2248 unsignedp, OPTAB_DIRECT);
2249 if (temp || methods == OPTAB_DIRECT)
2252 /* Try widening to a signed int. Make a fake signed optab that
2253 hides any signed insn for direct use. */
2254 wide_soptab = *soptab;
2255 optab_handler (&wide_soptab, mode)->insn_code = CODE_FOR_nothing;
2256 /* We don't want to generate new hash table entries from this fake
2258 wide_soptab.libcall_gen = NULL;
2260 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2261 unsignedp, OPTAB_WIDEN);
2263 /* For unsigned operands, try widening to an unsigned int. */
2264 if (temp == 0 && unsignedp)
2265 temp = expand_binop (mode, uoptab, op0, op1, target,
2266 unsignedp, OPTAB_WIDEN);
2267 if (temp || methods == OPTAB_WIDEN)
2270 /* Use the right width lib call if that exists. */
2271 temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_LIB);
2272 if (temp || methods == OPTAB_LIB)
2275 /* Must widen and use a lib call, use either signed or unsigned. */
2276 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2277 unsignedp, methods);
2281 return expand_binop (mode, uoptab, op0, op1, target,
2282 unsignedp, methods);
2286 /* Generate code to perform an operation specified by UNOPPTAB
2287 on operand OP0, with two results to TARG0 and TARG1.
2288 We assume that the order of the operands for the instruction
2289 is TARG0, TARG1, OP0.
2291 Either TARG0 or TARG1 may be zero, but what that means is that
2292 the result is not actually wanted. We will generate it into
2293 a dummy pseudo-reg and discard it. They may not both be zero.
2295 Returns 1 if this operation can be performed; 0 if not. */
2298 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
2301 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2302 enum mode_class class;
2303 enum machine_mode wider_mode;
2304 rtx entry_last = get_last_insn ();
2307 class = GET_MODE_CLASS (mode);
2310 targ0 = gen_reg_rtx (mode);
2312 targ1 = gen_reg_rtx (mode);
2314 /* Record where to go back to if we fail. */
2315 last = get_last_insn ();
2317 if (optab_handler (unoptab, mode)->insn_code != CODE_FOR_nothing)
2319 int icode = (int) optab_handler (unoptab, mode)->insn_code;
2320 enum machine_mode mode0 = insn_data[icode].operand[2].mode;
2324 if (GET_MODE (xop0) != VOIDmode
2325 && GET_MODE (xop0) != mode0)
2326 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2328 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2329 if (!insn_data[icode].operand[2].predicate (xop0, mode0))
2330 xop0 = copy_to_mode_reg (mode0, xop0);
2332 /* We could handle this, but we should always be called with a pseudo
2333 for our targets and all insns should take them as outputs. */
2334 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2335 gcc_assert (insn_data[icode].operand[1].predicate (targ1, mode));
2337 pat = GEN_FCN (icode) (targ0, targ1, xop0);
2344 delete_insns_since (last);
2347 /* It can't be done in this mode. Can we do it in a wider mode? */
2349 if (CLASS_HAS_WIDER_MODES_P (class))
2351 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2352 wider_mode != VOIDmode;
2353 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2355 if (optab_handler (unoptab, wider_mode)->insn_code
2356 != CODE_FOR_nothing)
2358 rtx t0 = gen_reg_rtx (wider_mode);
2359 rtx t1 = gen_reg_rtx (wider_mode);
2360 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2362 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
2364 convert_move (targ0, t0, unsignedp);
2365 convert_move (targ1, t1, unsignedp);
2369 delete_insns_since (last);
2374 delete_insns_since (entry_last);
2378 /* Generate code to perform an operation specified by BINOPTAB
2379 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2380 We assume that the order of the operands for the instruction
2381 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2382 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2384 Either TARG0 or TARG1 may be zero, but what that means is that
2385 the result is not actually wanted. We will generate it into
2386 a dummy pseudo-reg and discard it. They may not both be zero.
2388 Returns 1 if this operation can be performed; 0 if not. */
2391 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
2394 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2395 enum mode_class class;
2396 enum machine_mode wider_mode;
2397 rtx entry_last = get_last_insn ();
2400 class = GET_MODE_CLASS (mode);
2403 targ0 = gen_reg_rtx (mode);
2405 targ1 = gen_reg_rtx (mode);
2407 /* Record where to go back to if we fail. */
2408 last = get_last_insn ();
2410 if (optab_handler (binoptab, mode)->insn_code != CODE_FOR_nothing)
2412 int icode = (int) optab_handler (binoptab, mode)->insn_code;
2413 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2414 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
2416 rtx xop0 = op0, xop1 = op1;
2418 /* If we are optimizing, force expensive constants into a register. */
2419 xop0 = avoid_expensive_constant (mode0, binoptab, xop0, unsignedp);
2420 xop1 = avoid_expensive_constant (mode1, binoptab, xop1, unsignedp);
2422 /* In case the insn wants input operands in modes different from
2423 those of the actual operands, convert the operands. It would
2424 seem that we don't need to convert CONST_INTs, but we do, so
2425 that they're properly zero-extended, sign-extended or truncated
2428 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
2429 xop0 = convert_modes (mode0,
2430 GET_MODE (op0) != VOIDmode
2435 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
2436 xop1 = convert_modes (mode1,
2437 GET_MODE (op1) != VOIDmode
2442 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2443 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2444 xop0 = copy_to_mode_reg (mode0, xop0);
2446 if (!insn_data[icode].operand[2].predicate (xop1, mode1))
2447 xop1 = copy_to_mode_reg (mode1, xop1);
2449 /* We could handle this, but we should always be called with a pseudo
2450 for our targets and all insns should take them as outputs. */
2451 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2452 gcc_assert (insn_data[icode].operand[3].predicate (targ1, mode));
2454 pat = GEN_FCN (icode) (targ0, xop0, xop1, targ1);
2461 delete_insns_since (last);
2464 /* It can't be done in this mode. Can we do it in a wider mode? */
2466 if (CLASS_HAS_WIDER_MODES_P (class))
2468 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2469 wider_mode != VOIDmode;
2470 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2472 if (optab_handler (binoptab, wider_mode)->insn_code
2473 != CODE_FOR_nothing)
2475 rtx t0 = gen_reg_rtx (wider_mode);
2476 rtx t1 = gen_reg_rtx (wider_mode);
2477 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2478 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2480 if (expand_twoval_binop (binoptab, cop0, cop1,
2483 convert_move (targ0, t0, unsignedp);
2484 convert_move (targ1, t1, unsignedp);
2488 delete_insns_since (last);
2493 delete_insns_since (entry_last);
2497 /* Expand the two-valued library call indicated by BINOPTAB, but
2498 preserve only one of the values. If TARG0 is non-NULL, the first
2499 value is placed into TARG0; otherwise the second value is placed
2500 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2501 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2502 This routine assumes that the value returned by the library call is
2503 as if the return value was of an integral mode twice as wide as the
2504 mode of OP0. Returns 1 if the call was successful. */
2507 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2508 rtx targ0, rtx targ1, enum rtx_code code)
2510 enum machine_mode mode;
2511 enum machine_mode libval_mode;
2516 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2517 gcc_assert (!targ0 != !targ1);
2519 mode = GET_MODE (op0);
2520 libfunc = optab_libfunc (binoptab, mode);
2524 /* The value returned by the library function will have twice as
2525 many bits as the nominal MODE. */
2526 libval_mode = smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode),
2529 libval = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
2533 /* Get the part of VAL containing the value that we want. */
2534 libval = simplify_gen_subreg (mode, libval, libval_mode,
2535 targ0 ? 0 : GET_MODE_SIZE (mode));
2536 insns = get_insns ();
2538 /* Move the into the desired location. */
2539 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2540 gen_rtx_fmt_ee (code, mode, op0, op1));
2546 /* Wrapper around expand_unop which takes an rtx code to specify
2547 the operation to perform, not an optab pointer. All other
2548 arguments are the same. */
2550 expand_simple_unop (enum machine_mode mode, enum rtx_code code, rtx op0,
2551 rtx target, int unsignedp)
2553 optab unop = code_to_optab[(int) code];
2556 return expand_unop (mode, unop, op0, target, unsignedp);
2562 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2564 widen_clz (enum machine_mode mode, rtx op0, rtx target)
2566 enum mode_class class = GET_MODE_CLASS (mode);
2567 if (CLASS_HAS_WIDER_MODES_P (class))
2569 enum machine_mode wider_mode;
2570 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2571 wider_mode != VOIDmode;
2572 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2574 if (optab_handler (clz_optab, wider_mode)->insn_code
2575 != CODE_FOR_nothing)
2577 rtx xop0, temp, last;
2579 last = get_last_insn ();
2582 target = gen_reg_rtx (mode);
2583 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2584 temp = expand_unop (wider_mode, clz_optab, xop0, NULL_RTX, true);
2586 temp = expand_binop (wider_mode, sub_optab, temp,
2587 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2588 - GET_MODE_BITSIZE (mode)),
2589 target, true, OPTAB_DIRECT);
2591 delete_insns_since (last);
2600 /* Try calculating clz of a double-word quantity as two clz's of word-sized
2601 quantities, choosing which based on whether the high word is nonzero. */
2603 expand_doubleword_clz (enum machine_mode mode, rtx op0, rtx target)
2605 rtx xop0 = force_reg (mode, op0);
2606 rtx subhi = gen_highpart (word_mode, xop0);
2607 rtx sublo = gen_lowpart (word_mode, xop0);
2608 rtx hi0_label = gen_label_rtx ();
2609 rtx after_label = gen_label_rtx ();
2610 rtx seq, temp, result;
2612 /* If we were not given a target, use a word_mode register, not a
2613 'mode' register. The result will fit, and nobody is expecting
2614 anything bigger (the return type of __builtin_clz* is int). */
2616 target = gen_reg_rtx (word_mode);
2618 /* In any case, write to a word_mode scratch in both branches of the
2619 conditional, so we can ensure there is a single move insn setting
2620 'target' to tag a REG_EQUAL note on. */
2621 result = gen_reg_rtx (word_mode);
2625 /* If the high word is not equal to zero,
2626 then clz of the full value is clz of the high word. */
2627 emit_cmp_and_jump_insns (subhi, CONST0_RTX (word_mode), EQ, 0,
2628 word_mode, true, hi0_label);
2630 temp = expand_unop_direct (word_mode, clz_optab, subhi, result, true);
2635 convert_move (result, temp, true);
2637 emit_jump_insn (gen_jump (after_label));
2640 /* Else clz of the full value is clz of the low word plus the number
2641 of bits in the high word. */
2642 emit_label (hi0_label);
2644 temp = expand_unop_direct (word_mode, clz_optab, sublo, 0, true);
2647 temp = expand_binop (word_mode, add_optab, temp,
2648 GEN_INT (GET_MODE_BITSIZE (word_mode)),
2649 result, true, OPTAB_DIRECT);
2653 convert_move (result, temp, true);
2655 emit_label (after_label);
2656 convert_move (target, result, true);
2661 add_equal_note (seq, target, CLZ, xop0, 0);
2673 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2675 widen_bswap (enum machine_mode mode, rtx op0, rtx target)
2677 enum mode_class class = GET_MODE_CLASS (mode);
2678 enum machine_mode wider_mode;
2681 if (!CLASS_HAS_WIDER_MODES_P (class))
2684 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2685 wider_mode != VOIDmode;
2686 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2687 if (optab_handler (bswap_optab, wider_mode)->insn_code != CODE_FOR_nothing)
2692 last = get_last_insn ();
2694 x = widen_operand (op0, wider_mode, mode, true, true);
2695 x = expand_unop (wider_mode, bswap_optab, x, NULL_RTX, true);
2698 x = expand_shift (RSHIFT_EXPR, wider_mode, x,
2699 size_int (GET_MODE_BITSIZE (wider_mode)
2700 - GET_MODE_BITSIZE (mode)),
2706 target = gen_reg_rtx (mode);
2707 emit_move_insn (target, gen_lowpart (mode, x));
2710 delete_insns_since (last);
2715 /* Try calculating bswap as two bswaps of two word-sized operands. */
2718 expand_doubleword_bswap (enum machine_mode mode, rtx op, rtx target)
2722 t1 = expand_unop (word_mode, bswap_optab,
2723 operand_subword_force (op, 0, mode), NULL_RTX, true);
2724 t0 = expand_unop (word_mode, bswap_optab,
2725 operand_subword_force (op, 1, mode), NULL_RTX, true);
2728 target = gen_reg_rtx (mode);
2730 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
2731 emit_move_insn (operand_subword (target, 0, 1, mode), t0);
2732 emit_move_insn (operand_subword (target, 1, 1, mode), t1);
2737 /* Try calculating (parity x) as (and (popcount x) 1), where
2738 popcount can also be done in a wider mode. */
2740 expand_parity (enum machine_mode mode, rtx op0, rtx target)
2742 enum mode_class class = GET_MODE_CLASS (mode);
2743 if (CLASS_HAS_WIDER_MODES_P (class))
2745 enum machine_mode wider_mode;
2746 for (wider_mode = mode; wider_mode != VOIDmode;
2747 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2749 if (optab_handler (popcount_optab, wider_mode)->insn_code
2750 != CODE_FOR_nothing)
2752 rtx xop0, temp, last;
2754 last = get_last_insn ();
2757 target = gen_reg_rtx (mode);
2758 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2759 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2762 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2763 target, true, OPTAB_DIRECT);
2765 delete_insns_since (last);
2774 /* Try calculating ctz(x) as K - clz(x & -x) ,
2775 where K is GET_MODE_BITSIZE(mode) - 1.
2777 Both __builtin_ctz and __builtin_clz are undefined at zero, so we
2778 don't have to worry about what the hardware does in that case. (If
2779 the clz instruction produces the usual value at 0, which is K, the
2780 result of this code sequence will be -1; expand_ffs, below, relies
2781 on this. It might be nice to have it be K instead, for consistency
2782 with the (very few) processors that provide a ctz with a defined
2783 value, but that would take one more instruction, and it would be
2784 less convenient for expand_ffs anyway. */
2787 expand_ctz (enum machine_mode mode, rtx op0, rtx target)
2791 if (optab_handler (clz_optab, mode)->insn_code == CODE_FOR_nothing)
2796 temp = expand_unop_direct (mode, neg_optab, op0, NULL_RTX, true);
2798 temp = expand_binop (mode, and_optab, op0, temp, NULL_RTX,
2799 true, OPTAB_DIRECT);
2801 temp = expand_unop_direct (mode, clz_optab, temp, NULL_RTX, true);
2803 temp = expand_binop (mode, sub_optab, GEN_INT (GET_MODE_BITSIZE (mode) - 1),
2805 true, OPTAB_DIRECT);
2815 add_equal_note (seq, temp, CTZ, op0, 0);
2821 /* Try calculating ffs(x) using ctz(x) if we have that instruction, or
2822 else with the sequence used by expand_clz.
2824 The ffs builtin promises to return zero for a zero value and ctz/clz
2825 may have an undefined value in that case. If they do not give us a
2826 convenient value, we have to generate a test and branch. */
2828 expand_ffs (enum machine_mode mode, rtx op0, rtx target)
2830 HOST_WIDE_INT val = 0;
2831 bool defined_at_zero = false;
2834 if (optab_handler (ctz_optab, mode)->insn_code != CODE_FOR_nothing)
2838 temp = expand_unop_direct (mode, ctz_optab, op0, 0, true);
2842 defined_at_zero = (CTZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2);
2844 else if (optab_handler (clz_optab, mode)->insn_code != CODE_FOR_nothing)
2847 temp = expand_ctz (mode, op0, 0);
2851 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2)
2853 defined_at_zero = true;
2854 val = (GET_MODE_BITSIZE (mode) - 1) - val;
2860 if (defined_at_zero && val == -1)
2861 /* No correction needed at zero. */;
2864 /* We don't try to do anything clever with the situation found
2865 on some processors (eg Alpha) where ctz(0:mode) ==
2866 bitsize(mode). If someone can think of a way to send N to -1
2867 and leave alone all values in the range 0..N-1 (where N is a
2868 power of two), cheaper than this test-and-branch, please add it.
2870 The test-and-branch is done after the operation itself, in case
2871 the operation sets condition codes that can be recycled for this.
2872 (This is true on i386, for instance.) */
2874 rtx nonzero_label = gen_label_rtx ();
2875 emit_cmp_and_jump_insns (op0, CONST0_RTX (mode), NE, 0,
2876 mode, true, nonzero_label);
2878 convert_move (temp, GEN_INT (-1), false);
2879 emit_label (nonzero_label);
2882 /* temp now has a value in the range -1..bitsize-1. ffs is supposed
2883 to produce a value in the range 0..bitsize. */
2884 temp = expand_binop (mode, add_optab, temp, GEN_INT (1),
2885 target, false, OPTAB_DIRECT);
2892 add_equal_note (seq, temp, FFS, op0, 0);
2901 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2902 conditions, VAL may already be a SUBREG against which we cannot generate
2903 a further SUBREG. In this case, we expect forcing the value into a
2904 register will work around the situation. */
2907 lowpart_subreg_maybe_copy (enum machine_mode omode, rtx val,
2908 enum machine_mode imode)
2911 ret = lowpart_subreg (omode, val, imode);
2914 val = force_reg (imode, val);
2915 ret = lowpart_subreg (omode, val, imode);
2916 gcc_assert (ret != NULL);
2921 /* Expand a floating point absolute value or negation operation via a
2922 logical operation on the sign bit. */
2925 expand_absneg_bit (enum rtx_code code, enum machine_mode mode,
2926 rtx op0, rtx target)
2928 const struct real_format *fmt;
2929 int bitpos, word, nwords, i;
2930 enum machine_mode imode;
2931 HOST_WIDE_INT hi, lo;
2934 /* The format has to have a simple sign bit. */
2935 fmt = REAL_MODE_FORMAT (mode);
2939 bitpos = fmt->signbit_rw;
2943 /* Don't create negative zeros if the format doesn't support them. */
2944 if (code == NEG && !fmt->has_signed_zero)
2947 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2949 imode = int_mode_for_mode (mode);
2950 if (imode == BLKmode)
2959 if (FLOAT_WORDS_BIG_ENDIAN)
2960 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2962 word = bitpos / BITS_PER_WORD;
2963 bitpos = bitpos % BITS_PER_WORD;
2964 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2967 if (bitpos < HOST_BITS_PER_WIDE_INT)
2970 lo = (HOST_WIDE_INT) 1 << bitpos;
2974 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2980 if (target == 0 || target == op0)
2981 target = gen_reg_rtx (mode);
2987 for (i = 0; i < nwords; ++i)
2989 rtx targ_piece = operand_subword (target, i, 1, mode);
2990 rtx op0_piece = operand_subword_force (op0, i, mode);
2994 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2996 immed_double_const (lo, hi, imode),
2997 targ_piece, 1, OPTAB_LIB_WIDEN);
2998 if (temp != targ_piece)
2999 emit_move_insn (targ_piece, temp);
3002 emit_move_insn (targ_piece, op0_piece);
3005 insns = get_insns ();
3012 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
3013 gen_lowpart (imode, op0),
3014 immed_double_const (lo, hi, imode),
3015 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3016 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3018 set_unique_reg_note (get_last_insn (), REG_EQUAL,
3019 gen_rtx_fmt_e (code, mode, copy_rtx (op0)));
3025 /* As expand_unop, but will fail rather than attempt the operation in a
3026 different mode or with a libcall. */
3028 expand_unop_direct (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
3031 if (optab_handler (unoptab, mode)->insn_code != CODE_FOR_nothing)
3033 int icode = (int) optab_handler (unoptab, mode)->insn_code;
3034 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
3036 rtx last = get_last_insn ();
3042 temp = gen_reg_rtx (mode);
3044 if (GET_MODE (xop0) != VOIDmode
3045 && GET_MODE (xop0) != mode0)
3046 xop0 = convert_to_mode (mode0, xop0, unsignedp);
3048 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
3050 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
3051 xop0 = copy_to_mode_reg (mode0, xop0);
3053 if (!insn_data[icode].operand[0].predicate (temp, mode))
3054 temp = gen_reg_rtx (mode);
3056 pat = GEN_FCN (icode) (temp, xop0);
3059 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
3060 && ! add_equal_note (pat, temp, unoptab->code, xop0, NULL_RTX))
3062 delete_insns_since (last);
3063 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
3071 delete_insns_since (last);
3076 /* Generate code to perform an operation specified by UNOPTAB
3077 on operand OP0, with result having machine-mode MODE.
3079 UNSIGNEDP is for the case where we have to widen the operands
3080 to perform the operation. It says to use zero-extension.
3082 If TARGET is nonzero, the value
3083 is generated there, if it is convenient to do so.
3084 In all cases an rtx is returned for the locus of the value;
3085 this may or may not be TARGET. */
3088 expand_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
3091 enum mode_class class = GET_MODE_CLASS (mode);
3092 enum machine_mode wider_mode;
3096 temp = expand_unop_direct (mode, unoptab, op0, target, unsignedp);
3100 /* It can't be done in this mode. Can we open-code it in a wider mode? */
3102 /* Widening (or narrowing) clz needs special treatment. */
3103 if (unoptab == clz_optab)
3105 temp = widen_clz (mode, op0, target);
3109 if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
3110 && optab_handler (unoptab, word_mode)->insn_code != CODE_FOR_nothing)
3112 temp = expand_doubleword_clz (mode, op0, target);
3120 /* Widening (or narrowing) bswap needs special treatment. */
3121 if (unoptab == bswap_optab)
3123 temp = widen_bswap (mode, op0, target);
3127 if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
3128 && optab_handler (unoptab, word_mode)->insn_code != CODE_FOR_nothing)
3130 temp = expand_doubleword_bswap (mode, op0, target);
3138 if (CLASS_HAS_WIDER_MODES_P (class))
3139 for (wider_mode = GET_MODE_WIDER_MODE (mode);
3140 wider_mode != VOIDmode;
3141 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
3143 if (optab_handler (unoptab, wider_mode)->insn_code != CODE_FOR_nothing)
3146 rtx last = get_last_insn ();
3148 /* For certain operations, we need not actually extend
3149 the narrow operand, as long as we will truncate the
3150 results to the same narrowness. */
3152 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
3153 (unoptab == neg_optab
3154 || unoptab == one_cmpl_optab)
3155 && class == MODE_INT);
3157 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
3162 if (class != MODE_INT
3163 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
3164 GET_MODE_BITSIZE (wider_mode)))
3167 target = gen_reg_rtx (mode);
3168 convert_move (target, temp, 0);
3172 return gen_lowpart (mode, temp);
3175 delete_insns_since (last);
3179 /* These can be done a word at a time. */
3180 if (unoptab == one_cmpl_optab
3181 && class == MODE_INT
3182 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
3183 && optab_handler (unoptab, word_mode)->insn_code != CODE_FOR_nothing)
3188 if (target == 0 || target == op0)
3189 target = gen_reg_rtx (mode);
3193 /* Do the actual arithmetic. */
3194 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
3196 rtx target_piece = operand_subword (target, i, 1, mode);
3197 rtx x = expand_unop (word_mode, unoptab,
3198 operand_subword_force (op0, i, mode),
3199 target_piece, unsignedp);
3201 if (target_piece != x)
3202 emit_move_insn (target_piece, x);
3205 insns = get_insns ();
3212 if (unoptab->code == NEG)
3214 /* Try negating floating point values by flipping the sign bit. */
3215 if (SCALAR_FLOAT_MODE_P (mode))
3217 temp = expand_absneg_bit (NEG, mode, op0, target);
3222 /* If there is no negation pattern, and we have no negative zero,
3223 try subtracting from zero. */
3224 if (!HONOR_SIGNED_ZEROS (mode))
3226 temp = expand_binop (mode, (unoptab == negv_optab
3227 ? subv_optab : sub_optab),
3228 CONST0_RTX (mode), op0, target,
3229 unsignedp, OPTAB_DIRECT);
3235 /* Try calculating parity (x) as popcount (x) % 2. */
3236 if (unoptab == parity_optab)
3238 temp = expand_parity (mode, op0, target);
3243 /* Try implementing ffs (x) in terms of clz (x). */
3244 if (unoptab == ffs_optab)
3246 temp = expand_ffs (mode, op0, target);
3251 /* Try implementing ctz (x) in terms of clz (x). */
3252 if (unoptab == ctz_optab)
3254 temp = expand_ctz (mode, op0, target);
3260 /* Now try a library call in this mode. */
3261 libfunc = optab_libfunc (unoptab, mode);
3267 enum machine_mode outmode = mode;
3269 /* All of these functions return small values. Thus we choose to
3270 have them return something that isn't a double-word. */
3271 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
3272 || unoptab == popcount_optab || unoptab == parity_optab)
3274 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node)));
3278 /* Pass 1 for NO_QUEUE so we don't lose any increments
3279 if the libcall is cse'd or moved. */
3280 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, outmode,
3282 insns = get_insns ();
3285 target = gen_reg_rtx (outmode);
3286 eq_value = gen_rtx_fmt_e (unoptab->code, mode, op0);
3287 if (GET_MODE_SIZE (outmode) < GET_MODE_SIZE (mode))
3288 eq_value = simplify_gen_unary (TRUNCATE, outmode, eq_value, mode);
3289 else if (GET_MODE_SIZE (outmode) > GET_MODE_SIZE (mode))
3290 eq_value = simplify_gen_unary (ZERO_EXTEND, outmode, eq_value, mode);
3291 emit_libcall_block (insns, target, value, eq_value);
3296 /* It can't be done in this mode. Can we do it in a wider mode? */
3298 if (CLASS_HAS_WIDER_MODES_P (class))
3300 for (wider_mode = GET_MODE_WIDER_MODE (mode);
3301 wider_mode != VOIDmode;
3302 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
3304 if ((optab_handler (unoptab, wider_mode)->insn_code
3305 != CODE_FOR_nothing)
3306 || optab_libfunc (unoptab, wider_mode))
3309 rtx last = get_last_insn ();
3311 /* For certain operations, we need not actually extend
3312 the narrow operand, as long as we will truncate the
3313 results to the same narrowness. */
3315 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
3316 (unoptab == neg_optab
3317 || unoptab == one_cmpl_optab)
3318 && class == MODE_INT);
3320 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
3323 /* If we are generating clz using wider mode, adjust the
3325 if (unoptab == clz_optab && temp != 0)
3326 temp = expand_binop (wider_mode, sub_optab, temp,
3327 GEN_INT (GET_MODE_BITSIZE (wider_mode)
3328 - GET_MODE_BITSIZE (mode)),
3329 target, true, OPTAB_DIRECT);
3333 if (class != MODE_INT)
3336 target = gen_reg_rtx (mode);
3337 convert_move (target, temp, 0);
3341 return gen_lowpart (mode, temp);
3344 delete_insns_since (last);
3349 /* One final attempt at implementing negation via subtraction,
3350 this time allowing widening of the operand. */
3351 if (unoptab->code == NEG && !HONOR_SIGNED_ZEROS (mode))
3354 temp = expand_binop (mode,
3355 unoptab == negv_optab ? subv_optab : sub_optab,
3356 CONST0_RTX (mode), op0,
3357 target, unsignedp, OPTAB_LIB_WIDEN);
3365 /* Emit code to compute the absolute value of OP0, with result to
3366 TARGET if convenient. (TARGET may be 0.) The return value says
3367 where the result actually is to be found.
3369 MODE is the mode of the operand; the mode of the result is
3370 different but can be deduced from MODE.
3375 expand_abs_nojump (enum machine_mode mode, rtx op0, rtx target,
3376 int result_unsignedp)
3381 result_unsignedp = 1;
3383 /* First try to do it with a special abs instruction. */
3384 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
3389 /* For floating point modes, try clearing the sign bit. */
3390 if (SCALAR_FLOAT_MODE_P (mode))
3392 temp = expand_absneg_bit (ABS, mode, op0, target);
3397 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3398 if (optab_handler (smax_optab, mode)->insn_code != CODE_FOR_nothing
3399 && !HONOR_SIGNED_ZEROS (mode))
3401 rtx last = get_last_insn ();
3403 temp = expand_unop (mode, neg_optab, op0, NULL_RTX, 0);
3405 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3411 delete_insns_since (last);
3414 /* If this machine has expensive jumps, we can do integer absolute
3415 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3416 where W is the width of MODE. */
3418 if (GET_MODE_CLASS (mode) == MODE_INT && BRANCH_COST >= 2)
3420 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
3421 size_int (GET_MODE_BITSIZE (mode) - 1),
3424 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
3427 temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab,
3428 temp, extended, target, 0, OPTAB_LIB_WIDEN);
3438 expand_abs (enum machine_mode mode, rtx op0, rtx target,
3439 int result_unsignedp, int safe)
3444 result_unsignedp = 1;
3446 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
3450 /* If that does not win, use conditional jump and negate. */
3452 /* It is safe to use the target if it is the same
3453 as the source if this is also a pseudo register */
3454 if (op0 == target && REG_P (op0)
3455 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
3458 op1 = gen_label_rtx ();
3459 if (target == 0 || ! safe
3460 || GET_MODE (target) != mode
3461 || (MEM_P (target) && MEM_VOLATILE_P (target))
3463 && REGNO (target) < FIRST_PSEUDO_REGISTER))
3464 target = gen_reg_rtx (mode);
3466 emit_move_insn (target, op0);
3469 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
3470 NULL_RTX, NULL_RTX, op1);
3472 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3475 emit_move_insn (target, op0);
3481 /* A subroutine of expand_copysign, perform the copysign operation using the
3482 abs and neg primitives advertised to exist on the target. The assumption
3483 is that we have a split register file, and leaving op0 in fp registers,
3484 and not playing with subregs so much, will help the register allocator. */
3487 expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3488 int bitpos, bool op0_is_abs)
3490 enum machine_mode imode;
3497 /* Check if the back end provides an insn that handles signbit for the
3499 icode = (int) signbit_optab->handlers [(int) mode].insn_code;
3500 if (icode != CODE_FOR_nothing)
3502 imode = insn_data[icode].operand[0].mode;
3503 sign = gen_reg_rtx (imode);
3504 emit_unop_insn (icode, sign, op1, UNKNOWN);
3508 HOST_WIDE_INT hi, lo;
3510 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3512 imode = int_mode_for_mode (mode);
3513 if (imode == BLKmode)
3515 op1 = gen_lowpart (imode, op1);
3522 if (FLOAT_WORDS_BIG_ENDIAN)
3523 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3525 word = bitpos / BITS_PER_WORD;
3526 bitpos = bitpos % BITS_PER_WORD;
3527 op1 = operand_subword_force (op1, word, mode);
3530 if (bitpos < HOST_BITS_PER_WIDE_INT)
3533 lo = (HOST_WIDE_INT) 1 << bitpos;
3537 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
3541 sign = gen_reg_rtx (imode);
3542 sign = expand_binop (imode, and_optab, op1,
3543 immed_double_const (lo, hi, imode),
3544 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3549 op0 = expand_unop (mode, abs_optab, op0, target, 0);
3556 if (target == NULL_RTX)
3557 target = copy_to_reg (op0);
3559 emit_move_insn (target, op0);
3562 label = gen_label_rtx ();
3563 emit_cmp_and_jump_insns (sign, const0_rtx, EQ, NULL_RTX, imode, 1, label);
3565 if (GET_CODE (op0) == CONST_DOUBLE)
3566 op0 = simplify_unary_operation (NEG, mode, op0, mode);
3568 op0 = expand_unop (mode, neg_optab, op0, target, 0);
3570 emit_move_insn (target, op0);
3578 /* A subroutine of expand_copysign, perform the entire copysign operation
3579 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3580 is true if op0 is known to have its sign bit clear. */
3583 expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3584 int bitpos, bool op0_is_abs)
3586 enum machine_mode imode;
3587 HOST_WIDE_INT hi, lo;
3588 int word, nwords, i;
3591 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3593 imode = int_mode_for_mode (mode);
3594 if (imode == BLKmode)
3603 if (FLOAT_WORDS_BIG_ENDIAN)
3604 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3606 word = bitpos / BITS_PER_WORD;
3607 bitpos = bitpos % BITS_PER_WORD;
3608 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
3611 if (bitpos < HOST_BITS_PER_WIDE_INT)
3614 lo = (HOST_WIDE_INT) 1 << bitpos;
3618 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
3622 if (target == 0 || target == op0 || target == op1)
3623 target = gen_reg_rtx (mode);
3629 for (i = 0; i < nwords; ++i)
3631 rtx targ_piece = operand_subword (target, i, 1, mode);
3632 rtx op0_piece = operand_subword_force (op0, i, mode);
3637 op0_piece = expand_binop (imode, and_optab, op0_piece,
3638 immed_double_const (~lo, ~hi, imode),
3639 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3641 op1 = expand_binop (imode, and_optab,
3642 operand_subword_force (op1, i, mode),
3643 immed_double_const (lo, hi, imode),
3644 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3646 temp = expand_binop (imode, ior_optab, op0_piece, op1,
3647 targ_piece, 1, OPTAB_LIB_WIDEN);
3648 if (temp != targ_piece)
3649 emit_move_insn (targ_piece, temp);
3652 emit_move_insn (targ_piece, op0_piece);
3655 insns = get_insns ();
3662 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
3663 immed_double_const (lo, hi, imode),
3664 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3666 op0 = gen_lowpart (imode, op0);
3668 op0 = expand_binop (imode, and_optab, op0,
3669 immed_double_const (~lo, ~hi, imode),
3670 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3672 temp = expand_binop (imode, ior_optab, op0, op1,
3673 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3674 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3680 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3681 scalar floating point mode. Return NULL if we do not know how to
3682 expand the operation inline. */
3685 expand_copysign (rtx op0, rtx op1, rtx target)
3687 enum machine_mode mode = GET_MODE (op0);
3688 const struct real_format *fmt;
3692 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3693 gcc_assert (GET_MODE (op1) == mode);
3695 /* First try to do it with a special instruction. */
3696 temp = expand_binop (mode, copysign_optab, op0, op1,
3697 target, 0, OPTAB_DIRECT);
3701 fmt = REAL_MODE_FORMAT (mode);
3702 if (fmt == NULL || !fmt->has_signed_zero)
3706 if (GET_CODE (op0) == CONST_DOUBLE)
3708 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
3709 op0 = simplify_unary_operation (ABS, mode, op0, mode);
3713 if (fmt->signbit_ro >= 0
3714 && (GET_CODE (op0) == CONST_DOUBLE
3715 || (optab_handler (neg_optab, mode)->insn_code != CODE_FOR_nothing
3716 && optab_handler (abs_optab, mode)->insn_code != CODE_FOR_nothing)))
3718 temp = expand_copysign_absneg (mode, op0, op1, target,
3719 fmt->signbit_ro, op0_is_abs);
3724 if (fmt->signbit_rw < 0)
3726 return expand_copysign_bit (mode, op0, op1, target,
3727 fmt->signbit_rw, op0_is_abs);
3730 /* Generate an instruction whose insn-code is INSN_CODE,
3731 with two operands: an output TARGET and an input OP0.
3732 TARGET *must* be nonzero, and the output is always stored there.
3733 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3734 the value that is stored into TARGET. */
3737 emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code)
3740 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
3745 /* Now, if insn does not accept our operands, put them into pseudos. */
3747 if (!insn_data[icode].operand[1].predicate (op0, mode0))
3748 op0 = copy_to_mode_reg (mode0, op0);
3750 if (!insn_data[icode].operand[0].predicate (temp, GET_MODE (temp)))
3751 temp = gen_reg_rtx (GET_MODE (temp));
3753 pat = GEN_FCN (icode) (temp, op0);
3755 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && code != UNKNOWN)
3756 add_equal_note (pat, temp, code, op0, NULL_RTX);
3761 emit_move_insn (target, temp);
3764 struct no_conflict_data
3766 rtx target, first, insn;
3770 /* Called via note_stores by emit_libcall_block. Set P->must_stay if
3771 the currently examined clobber / store has to stay in the list of
3772 insns that constitute the actual libcall block. */
3774 no_conflict_move_test (rtx dest, const_rtx set, void *p0)
3776 struct no_conflict_data *p= p0;
3778 /* If this inns directly contributes to setting the target, it must stay. */
3779 if (reg_overlap_mentioned_p (p->target, dest))
3780 p->must_stay = true;
3781 /* If we haven't committed to keeping any other insns in the list yet,
3782 there is nothing more to check. */
3783 else if (p->insn == p->first)
3785 /* If this insn sets / clobbers a register that feeds one of the insns
3786 already in the list, this insn has to stay too. */
3787 else if (reg_overlap_mentioned_p (dest, PATTERN (p->first))
3788 || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest)))
3789 || reg_used_between_p (dest, p->first, p->insn)
3790 /* Likewise if this insn depends on a register set by a previous
3791 insn in the list, or if it sets a result (presumably a hard
3792 register) that is set or clobbered by a previous insn.
3793 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3794 SET_DEST perform the former check on the address, and the latter
3795 check on the MEM. */
3796 || (GET_CODE (set) == SET
3797 && (modified_in_p (SET_SRC (set), p->first)
3798 || modified_in_p (SET_DEST (set), p->first)
3799 || modified_between_p (SET_SRC (set), p->first, p->insn)
3800 || modified_between_p (SET_DEST (set), p->first, p->insn))))
3801 p->must_stay = true;
3804 /* Encapsulate the block starting at FIRST and ending with LAST, which is
3805 logically equivalent to EQUIV, so it gets manipulated as a unit if it
3806 is possible to do so. */
3809 maybe_encapsulate_block (rtx first, rtx last, rtx equiv)
3811 if (!flag_non_call_exceptions || !may_trap_p (equiv))
3813 /* We can't attach the REG_LIBCALL and REG_RETVAL notes when the
3814 encapsulated region would not be in one basic block, i.e. when
3815 there is a control_flow_insn_p insn between FIRST and LAST. */
3816 bool attach_libcall_retval_notes = true;
3817 rtx insn, next = NEXT_INSN (last);
3819 for (insn = first; insn != next; insn = NEXT_INSN (insn))
3820 if (control_flow_insn_p (insn))
3822 attach_libcall_retval_notes = false;
3826 if (attach_libcall_retval_notes)
3828 REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
3830 REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first,
3837 /* Emit code to make a call to a constant function or a library call.
3839 INSNS is a list containing all insns emitted in the call.
3840 These insns leave the result in RESULT. Our block is to copy RESULT
3841 to TARGET, which is logically equivalent to EQUIV.
3843 We first emit any insns that set a pseudo on the assumption that these are
3844 loading constants into registers; doing so allows them to be safely cse'ed
3845 between blocks. Then we emit all the other insns in the block, followed by
3846 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3847 note with an operand of EQUIV.
3849 Moving assignments to pseudos outside of the block is done to improve
3850 the generated code, but is not required to generate correct code,
3851 hence being unable to move an assignment is not grounds for not making
3852 a libcall block. There are two reasons why it is safe to leave these
3853 insns inside the block: First, we know that these pseudos cannot be
3854 used in generated RTL outside the block since they are created for
3855 temporary purposes within the block. Second, CSE will not record the
3856 values of anything set inside a libcall block, so we know they must
3857 be dead at the end of the block.
3859 Except for the first group of insns (the ones setting pseudos), the
3860 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3862 emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv)
3864 rtx final_dest = target;
3865 rtx prev, next, first, last, insn;
3867 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3868 into a MEM later. Protect the libcall block from this change. */
3869 if (! REG_P (target) || REG_USERVAR_P (target))
3870 target = gen_reg_rtx (GET_MODE (target));
3872 /* If we're using non-call exceptions, a libcall corresponding to an
3873 operation that may trap may also trap. */
3874 if (flag_non_call_exceptions && may_trap_p (equiv))
3876 for (insn = insns; insn; insn = NEXT_INSN (insn))
3879 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3881 if (note != 0 && INTVAL (XEXP (note, 0)) <= 0)
3882 remove_note (insn, note);
3886 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3887 reg note to indicate that this call cannot throw or execute a nonlocal
3888 goto (unless there is already a REG_EH_REGION note, in which case
3890 for (insn = insns; insn; insn = NEXT_INSN (insn))
3893 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3896 XEXP (note, 0) = constm1_rtx;
3898 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx,
3902 /* First emit all insns that set pseudos. Remove them from the list as
3903 we go. Avoid insns that set pseudos which were referenced in previous
3904 insns. These can be generated by move_by_pieces, for example,
3905 to update an address. Similarly, avoid insns that reference things
3906 set in previous insns. */
3908 for (insn = insns; insn; insn = next)
3910 rtx set = single_set (insn);
3913 /* Some ports (cris) create a libcall regions at their own. We must
3914 avoid any potential nesting of LIBCALLs. */
3915 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3916 remove_note (insn, note);
3917 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3918 remove_note (insn, note);
3920 next = NEXT_INSN (insn);
3922 if (set != 0 && REG_P (SET_DEST (set))
3923 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3925 struct no_conflict_data data;
3927 data.target = const0_rtx;
3931 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3932 if (! data.must_stay)
3934 if (PREV_INSN (insn))
3935 NEXT_INSN (PREV_INSN (insn)) = next;
3940 PREV_INSN (next) = PREV_INSN (insn);
3946 /* Some ports use a loop to copy large arguments onto the stack.
3947 Don't move anything outside such a loop. */
3952 prev = get_last_insn ();
3954 /* Write the remaining insns followed by the final copy. */
3956 for (insn = insns; insn; insn = next)
3958 next = NEXT_INSN (insn);
3963 last = emit_move_insn (target, result);
3964 if (optab_handler (mov_optab, GET_MODE (target))->insn_code
3965 != CODE_FOR_nothing)
3966 set_unique_reg_note (last, REG_EQUAL, copy_rtx (equiv));
3969 /* Remove any existing REG_EQUAL note from "last", or else it will
3970 be mistaken for a note referring to the full contents of the
3971 libcall value when found together with the REG_RETVAL note added
3972 below. An existing note can come from an insn expansion at
3974 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3977 if (final_dest != target)
3978 emit_move_insn (final_dest, target);
3981 first = get_insns ();
3983 first = NEXT_INSN (prev);
3985 maybe_encapsulate_block (first, last, equiv);
3988 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3989 PURPOSE describes how this comparison will be used. CODE is the rtx
3990 comparison code we will be using.
3992 ??? Actually, CODE is slightly weaker than that. A target is still
3993 required to implement all of the normal bcc operations, but not
3994 required to implement all (or any) of the unordered bcc operations. */
3997 can_compare_p (enum rtx_code code, enum machine_mode mode,
3998 enum can_compare_purpose purpose)
4002 if (optab_handler (cmp_optab, mode)->insn_code != CODE_FOR_nothing)
4004 if (purpose == ccp_jump)
4005 return bcc_gen_fctn[(int) code] != NULL;
4006 else if (purpose == ccp_store_flag)
4007 return setcc_gen_code[(int) code] != CODE_FOR_nothing;
4009 /* There's only one cmov entry point, and it's allowed to fail. */
4012 if (purpose == ccp_jump
4013 && optab_handler (cbranch_optab, mode)->insn_code != CODE_FOR_nothing)
4015 if (purpose == ccp_cmov
4016 && optab_handler (cmov_optab, mode)->insn_code != CODE_FOR_nothing)
4018 if (purpose == ccp_store_flag
4019 && optab_handler (cstore_optab, mode)->insn_code != CODE_FOR_nothing)
4021 mode = GET_MODE_WIDER_MODE (mode);
4023 while (mode != VOIDmode);
4028 /* This function is called when we are going to emit a compare instruction that
4029 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
4031 *PMODE is the mode of the inputs (in case they are const_int).
4032 *PUNSIGNEDP nonzero says that the operands are unsigned;
4033 this matters if they need to be widened.
4035 If they have mode BLKmode, then SIZE specifies the size of both operands.
4037 This function performs all the setup necessary so that the caller only has
4038 to emit a single comparison insn. This setup can involve doing a BLKmode
4039 comparison or emitting a library call to perform the comparison if no insn
4040 is available to handle it.
4041 The values which are passed in through pointers can be modified; the caller
4042 should perform the comparison on the modified values. Constant
4043 comparisons must have already been folded. */
4046 prepare_cmp_insn (rtx *px, rtx *py, enum rtx_code *pcomparison, rtx size,
4047 enum machine_mode *pmode, int *punsignedp,
4048 enum can_compare_purpose purpose)
4050 enum machine_mode mode = *pmode;
4051 rtx x = *px, y = *py;
4052 int unsignedp = *punsignedp;
4055 /* If we are inside an appropriately-short loop and we are optimizing,
4056 force expensive constants into a register. */
4057 if (CONSTANT_P (x) && optimize
4058 && rtx_cost (x, COMPARE) > COSTS_N_INSNS (1))
4059 x = force_reg (mode, x);
4061 if (CONSTANT_P (y) && optimize
4062 && rtx_cost (y, COMPARE) > COSTS_N_INSNS (1))
4063 y = force_reg (mode, y);
4066 /* Make sure if we have a canonical comparison. The RTL
4067 documentation states that canonical comparisons are required only
4068 for targets which have cc0. */
4069 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
4072 /* Don't let both operands fail to indicate the mode. */
4073 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
4074 x = force_reg (mode, x);
4076 /* Handle all BLKmode compares. */
4078 if (mode == BLKmode)
4080 enum machine_mode cmp_mode, result_mode;
4081 enum insn_code cmp_code;
4086 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
4090 /* Try to use a memory block compare insn - either cmpstr
4091 or cmpmem will do. */
4092 for (cmp_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
4093 cmp_mode != VOIDmode;
4094 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode))
4096 cmp_code = cmpmem_optab[cmp_mode];
4097 if (cmp_code == CODE_FOR_nothing)
4098 cmp_code = cmpstr_optab[cmp_mode];
4099 if (cmp_code == CODE_FOR_nothing)
4100 cmp_code = cmpstrn_optab[cmp_mode];
4101 if (cmp_code == CODE_FOR_nothing)
4104 /* Must make sure the size fits the insn's mode. */
4105 if ((GET_CODE (size) == CONST_INT
4106 && INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode)))
4107 || (GET_MODE_BITSIZE (GET_MODE (size))
4108 > GET_MODE_BITSIZE (cmp_mode)))
4111 result_mode = insn_data[cmp_code].operand[0].mode;
4112 result = gen_reg_rtx (result_mode);
4113 size = convert_to_mode (cmp_mode, size, 1);
4114 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
4118 *pmode = result_mode;
4122 /* Otherwise call a library function, memcmp. */
4123 libfunc = memcmp_libfunc;
4124 length_type = sizetype;
4125 result_mode = TYPE_MODE (integer_type_node);
4126 cmp_mode = TYPE_MODE (length_type);
4127 size = convert_to_mode (TYPE_MODE (length_type), size,
4128 TYPE_UNSIGNED (length_type));
4130 result = emit_library_call_value (libfunc, 0, LCT_PURE,
4137 *pmode = result_mode;
4141 /* Don't allow operands to the compare to trap, as that can put the
4142 compare and branch in different basic blocks. */
4143 if (flag_non_call_exceptions)
4146 x = force_reg (mode, x);
4148 y = force_reg (mode, y);
4153 if (can_compare_p (*pcomparison, mode, purpose))
4156 /* Handle a lib call just for the mode we are using. */
4158 libfunc = optab_libfunc (cmp_optab, mode);
4159 if (libfunc && !SCALAR_FLOAT_MODE_P (mode))
4163 /* If we want unsigned, and this mode has a distinct unsigned
4164 comparison routine, use that. */
4167 rtx ulibfunc = optab_libfunc (ucmp_optab, mode);
4172 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4173 targetm.libgcc_cmp_return_mode (),
4174 2, x, mode, y, mode);
4176 /* There are two kinds of comparison routines. Biased routines
4177 return 0/1/2, and unbiased routines return -1/0/1. Other parts
4178 of gcc expect that the comparison operation is equivalent
4179 to the modified comparison. For signed comparisons compare the
4180 result against 1 in the biased case, and zero in the unbiased
4181 case. For unsigned comparisons always compare against 1 after
4182 biasing the unbiased result by adding 1. This gives us a way to
4188 if (!TARGET_LIB_INT_CMP_BIASED)
4191 *px = plus_constant (result, 1);
4198 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
4199 prepare_float_lib_cmp (px, py, pcomparison, pmode, punsignedp);
4202 /* Before emitting an insn with code ICODE, make sure that X, which is going
4203 to be used for operand OPNUM of the insn, is converted from mode MODE to
4204 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
4205 that it is accepted by the operand predicate. Return the new value. */
4208 prepare_operand (int icode, rtx x, int opnum, enum machine_mode mode,
4209 enum machine_mode wider_mode, int unsignedp)
4211 if (mode != wider_mode)
4212 x = convert_modes (wider_mode, mode, x, unsignedp);
4214 if (!insn_data[icode].operand[opnum].predicate
4215 (x, insn_data[icode].operand[opnum].mode))
4217 if (reload_completed)
4219 x = copy_to_mode_reg (insn_data[icode].operand[opnum].mode, x);
4225 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
4226 we can do the comparison.
4227 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
4228 be NULL_RTX which indicates that only a comparison is to be generated. */
4231 emit_cmp_and_jump_insn_1 (rtx x, rtx y, enum machine_mode mode,
4232 enum rtx_code comparison, int unsignedp, rtx label)
4234 rtx test = gen_rtx_fmt_ee (comparison, mode, x, y);
4235 enum mode_class class = GET_MODE_CLASS (mode);
4236 enum machine_mode wider_mode = mode;
4238 /* Try combined insns first. */
4241 enum insn_code icode;
4242 PUT_MODE (test, wider_mode);
4246 icode = optab_handler (cbranch_optab, wider_mode)->insn_code;
4248 if (icode != CODE_FOR_nothing
4249 && insn_data[icode].operand[0].predicate (test, wider_mode))
4251 x = prepare_operand (icode, x, 1, mode, wider_mode, unsignedp);
4252 y = prepare_operand (icode, y, 2, mode, wider_mode, unsignedp);
4253 emit_jump_insn (GEN_FCN (icode) (test, x, y, label));
4258 /* Handle some compares against zero. */
4259 icode = (int) optab_handler (tst_optab, wider_mode)->insn_code;
4260 if (y == CONST0_RTX (mode) && icode != CODE_FOR_nothing)
4262 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
4263 emit_insn (GEN_FCN (icode) (x));
4265 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
4269 /* Handle compares for which there is a directly suitable insn. */
4271 icode = (int) optab_handler (cmp_optab, wider_mode)->insn_code;
4272 if (icode != CODE_FOR_nothing)
4274 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
4275 y = prepare_operand (icode, y, 1, mode, wider_mode, unsignedp);
4276 emit_insn (GEN_FCN (icode) (x, y));
4278 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
4282 if (!CLASS_HAS_WIDER_MODES_P (class))
4285 wider_mode = GET_MODE_WIDER_MODE (wider_mode);
4287 while (wider_mode != VOIDmode);
4292 /* Generate code to compare X with Y so that the condition codes are
4293 set and to jump to LABEL if the condition is true. If X is a
4294 constant and Y is not a constant, then the comparison is swapped to
4295 ensure that the comparison RTL has the canonical form.
4297 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
4298 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
4299 the proper branch condition code.
4301 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4303 MODE is the mode of the inputs (in case they are const_int).
4305 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
4306 be passed unchanged to emit_cmp_insn, then potentially converted into an
4307 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
4310 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
4311 enum machine_mode mode, int unsignedp, rtx label)
4313 rtx op0 = x, op1 = y;
4315 /* Swap operands and condition to ensure canonical RTL. */
4316 if (swap_commutative_operands_p (x, y))
4318 /* If we're not emitting a branch, callers are required to pass
4319 operands in an order conforming to canonical RTL. We relax this
4320 for commutative comparisons so callers using EQ don't need to do
4321 swapping by hand. */
4322 gcc_assert (label || (comparison == swap_condition (comparison)));
4325 comparison = swap_condition (comparison);
4329 /* If OP0 is still a constant, then both X and Y must be constants.
4330 Force X into a register to create canonical RTL. */
4331 if (CONSTANT_P (op0))
4332 op0 = force_reg (mode, op0);
4336 comparison = unsigned_condition (comparison);
4338 prepare_cmp_insn (&op0, &op1, &comparison, size, &mode, &unsignedp,
4340 emit_cmp_and_jump_insn_1 (op0, op1, mode, comparison, unsignedp, label);
4343 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
4346 emit_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
4347 enum machine_mode mode, int unsignedp)
4349 emit_cmp_and_jump_insns (x, y, comparison, size, mode, unsignedp, 0);
4352 /* Emit a library call comparison between floating point X and Y.
4353 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4356 prepare_float_lib_cmp (rtx *px, rtx *py, enum rtx_code *pcomparison,
4357 enum machine_mode *pmode, int *punsignedp)
4359 enum rtx_code comparison = *pcomparison;
4360 enum rtx_code swapped = swap_condition (comparison);
4361 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
4364 enum machine_mode orig_mode = GET_MODE (x);
4365 enum machine_mode mode, cmp_mode;
4366 rtx value, target, insns, equiv;
4368 bool reversed_p = false;
4369 cmp_mode = targetm.libgcc_cmp_return_mode ();
4371 for (mode = orig_mode;
4373 mode = GET_MODE_WIDER_MODE (mode))
4375 if ((libfunc = optab_libfunc (code_to_optab[comparison], mode)))
4378 if ((libfunc = optab_libfunc (code_to_optab[swapped] , mode)))
4381 tmp = x; x = y; y = tmp;
4382 comparison = swapped;
4386 if ((libfunc = optab_libfunc (code_to_optab[reversed], mode))
4387 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, reversed))
4389 comparison = reversed;
4395 gcc_assert (mode != VOIDmode);
4397 if (mode != orig_mode)
4399 x = convert_to_mode (mode, x, 0);
4400 y = convert_to_mode (mode, y, 0);
4403 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4404 the RTL. The allows the RTL optimizers to delete the libcall if the
4405 condition can be determined at compile-time. */
4406 if (comparison == UNORDERED)
4408 rtx temp = simplify_gen_relational (NE, cmp_mode, mode, x, x);
4409 equiv = simplify_gen_relational (NE, cmp_mode, mode, y, y);
4410 equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4411 temp, const_true_rtx, equiv);
4415 equiv = simplify_gen_relational (comparison, cmp_mode, mode, x, y);
4416 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4418 rtx true_rtx, false_rtx;
4423 true_rtx = const0_rtx;
4424 false_rtx = const_true_rtx;
4428 true_rtx = const_true_rtx;
4429 false_rtx = const0_rtx;
4433 true_rtx = const1_rtx;
4434 false_rtx = const0_rtx;
4438 true_rtx = const0_rtx;
4439 false_rtx = constm1_rtx;
4443 true_rtx = constm1_rtx;
4444 false_rtx = const0_rtx;
4448 true_rtx = const0_rtx;
4449 false_rtx = const1_rtx;
4455 equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4456 equiv, true_rtx, false_rtx);
4461 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4462 cmp_mode, 2, x, mode, y, mode);
4463 insns = get_insns ();
4466 target = gen_reg_rtx (cmp_mode);
4467 emit_libcall_block (insns, target, value, equiv);
4469 if (comparison == UNORDERED
4470 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4471 comparison = reversed_p ? EQ : NE;
4476 *pcomparison = comparison;
4480 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4483 emit_indirect_jump (rtx loc)
4485 if (!insn_data[(int) CODE_FOR_indirect_jump].operand[0].predicate
4487 loc = copy_to_mode_reg (Pmode, loc);
4489 emit_jump_insn (gen_indirect_jump (loc));
4493 #ifdef HAVE_conditional_move
4495 /* Emit a conditional move instruction if the machine supports one for that
4496 condition and machine mode.
4498 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4499 the mode to use should they be constants. If it is VOIDmode, they cannot
4502 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4503 should be stored there. MODE is the mode to use should they be constants.
4504 If it is VOIDmode, they cannot both be constants.
4506 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4507 is not supported. */
4510 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
4511 enum machine_mode cmode, rtx op2, rtx op3,
4512 enum machine_mode mode, int unsignedp)
4514 rtx tem, subtarget, comparison, insn;
4515 enum insn_code icode;
4516 enum rtx_code reversed;
4518 /* If one operand is constant, make it the second one. Only do this
4519 if the other operand is not constant as well. */
4521 if (swap_commutative_operands_p (op0, op1))
4526 code = swap_condition (code);
4529 /* get_condition will prefer to generate LT and GT even if the old
4530 comparison was against zero, so undo that canonicalization here since
4531 comparisons against zero are cheaper. */
4532 if (code == LT && op1 == const1_rtx)
4533 code = LE, op1 = const0_rtx;
4534 else if (code == GT && op1 == constm1_rtx)
4535 code = GE, op1 = const0_rtx;
4537 if (cmode == VOIDmode)
4538 cmode = GET_MODE (op0);
4540 if (swap_commutative_operands_p (op2, op3)
4541 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4550 if (mode == VOIDmode)
4551 mode = GET_MODE (op2);
4553 icode = movcc_gen_code[mode];
4555 if (icode == CODE_FOR_nothing)
4559 target = gen_reg_rtx (mode);
4563 /* If the insn doesn't accept these operands, put them in pseudos. */
4565 if (!insn_data[icode].operand[0].predicate
4566 (subtarget, insn_data[icode].operand[0].mode))
4567 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4569 if (!insn_data[icode].operand[2].predicate
4570 (op2, insn_data[icode].operand[2].mode))
4571 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4573 if (!insn_data[icode].operand[3].predicate
4574 (op3, insn_data[icode].operand[3].mode))
4575 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4577 /* Everything should now be in the suitable form, so emit the compare insn
4578 and then the conditional move. */
4581 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4583 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4584 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4585 return NULL and let the caller figure out how best to deal with this
4587 if (GET_CODE (comparison) != code)
4590 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4592 /* If that failed, then give up. */
4598 if (subtarget != target)
4599 convert_move (target, subtarget, 0);
4604 /* Return nonzero if a conditional move of mode MODE is supported.
4606 This function is for combine so it can tell whether an insn that looks
4607 like a conditional move is actually supported by the hardware. If we
4608 guess wrong we lose a bit on optimization, but that's it. */
4609 /* ??? sparc64 supports conditionally moving integers values based on fp
4610 comparisons, and vice versa. How do we handle them? */
4613 can_conditionally_move_p (enum machine_mode mode)
4615 if (movcc_gen_code[mode] != CODE_FOR_nothing)
4621 #endif /* HAVE_conditional_move */
4623 /* Emit a conditional addition instruction if the machine supports one for that
4624 condition and machine mode.
4626 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4627 the mode to use should they be constants. If it is VOIDmode, they cannot
4630 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
4631 should be stored there. MODE is the mode to use should they be constants.
4632 If it is VOIDmode, they cannot both be constants.
4634 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4635 is not supported. */
4638 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
4639 enum machine_mode cmode, rtx op2, rtx op3,
4640 enum machine_mode mode, int unsignedp)
4642 rtx tem, subtarget, comparison, insn;
4643 enum insn_code icode;
4644 enum rtx_code reversed;
4646 /* If one operand is constant, make it the second one. Only do this
4647 if the other operand is not constant as well. */
4649 if (swap_commutative_operands_p (op0, op1))
4654 code = swap_condition (code);
4657 /* get_condition will prefer to generate LT and GT even if the old
4658 comparison was against zero, so undo that canonicalization here since
4659 comparisons against zero are cheaper. */
4660 if (code == LT && op1 == const1_rtx)
4661 code = LE, op1 = const0_rtx;
4662 else if (code == GT && op1 == constm1_rtx)
4663 code = GE, op1 = const0_rtx;
4665 if (cmode == VOIDmode)
4666 cmode = GET_MODE (op0);
4668 if (swap_commutative_operands_p (op2, op3)
4669 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4678 if (mode == VOIDmode)
4679 mode = GET_MODE (op2);
4681 icode = optab_handler (addcc_optab, mode)->insn_code;
4683 if (icode == CODE_FOR_nothing)
4687 target = gen_reg_rtx (mode);
4689 /* If the insn doesn't accept these operands, put them in pseudos. */
4691 if (!insn_data[icode].operand[0].predicate
4692 (target, insn_data[icode].operand[0].mode))
4693 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4697 if (!insn_data[icode].operand[2].predicate
4698 (op2, insn_data[icode].operand[2].mode))
4699 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4701 if (!insn_data[icode].operand[3].predicate
4702 (op3, insn_data[icode].operand[3].mode))
4703 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4705 /* Everything should now be in the suitable form, so emit the compare insn
4706 and then the conditional move. */
4709 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4711 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4712 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4713 return NULL and let the caller figure out how best to deal with this
4715 if (GET_CODE (comparison) != code)
4718 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4720 /* If that failed, then give up. */
4726 if (subtarget != target)
4727 convert_move (target, subtarget, 0);
4732 /* These functions attempt to generate an insn body, rather than
4733 emitting the insn, but if the gen function already emits them, we
4734 make no attempt to turn them back into naked patterns. */
4736 /* Generate and return an insn body to add Y to X. */
4739 gen_add2_insn (rtx x, rtx y)
4741 int icode = (int) optab_handler (add_optab, GET_MODE (x))->insn_code;
4743 gcc_assert (insn_data[icode].operand[0].predicate
4744 (x, insn_data[icode].operand[0].mode));
4745 gcc_assert (insn_data[icode].operand[1].predicate
4746 (x, insn_data[icode].operand[1].mode));
4747 gcc_assert (insn_data[icode].operand[2].predicate
4748 (y, insn_data[icode].operand[2].mode));
4750 return GEN_FCN (icode) (x, x, y);
4753 /* Generate and return an insn body to add r1 and c,
4754 storing the result in r0. */
4757 gen_add3_insn (rtx r0, rtx r1, rtx c)
4759 int icode = (int) optab_handler (add_optab, GET_MODE (r0))->insn_code;
4761 if (icode == CODE_FOR_nothing
4762 || !(insn_data[icode].operand[0].predicate
4763 (r0, insn_data[icode].operand[0].mode))
4764 || !(insn_data[icode].operand[1].predicate
4765 (r1, insn_data[icode].operand[1].mode))
4766 || !(insn_data[icode].operand[2].predicate
4767 (c, insn_data[icode].operand[2].mode)))
4770 return GEN_FCN (icode) (r0, r1, c);
4774 have_add2_insn (rtx x, rtx y)
4778 gcc_assert (GET_MODE (x) != VOIDmode);
4780 icode = (int) optab_handler (add_optab, GET_MODE (x))->insn_code;
4782 if (icode == CODE_FOR_nothing)
4785 if (!(insn_data[icode].operand[0].predicate
4786 (x, insn_data[icode].operand[0].mode))
4787 || !(insn_data[icode].operand[1].predicate
4788 (x, insn_data[icode].operand[1].mode))
4789 || !(insn_data[icode].operand[2].predicate
4790 (y, insn_data[icode].operand[2].mode)))
4796 /* Generate and return an insn body to subtract Y from X. */
4799 gen_sub2_insn (rtx x, rtx y)
4801 int icode = (int) optab_handler (sub_optab, GET_MODE (x))->insn_code;
4803 gcc_assert (insn_data[icode].operand[0].predicate
4804 (x, insn_data[icode].operand[0].mode));
4805 gcc_assert (insn_data[icode].operand[1].predicate
4806 (x, insn_data[icode].operand[1].mode));
4807 gcc_assert (insn_data[icode].operand[2].predicate
4808 (y, insn_data[icode].operand[2].mode));
4810 return GEN_FCN (icode) (x, x, y);
4813 /* Generate and return an insn body to subtract r1 and c,
4814 storing the result in r0. */
4817 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4819 int icode = (int) optab_handler (sub_optab, GET_MODE (r0))->insn_code;
4821 if (icode == CODE_FOR_nothing
4822 || !(insn_data[icode].operand[0].predicate
4823 (r0, insn_data[icode].operand[0].mode))
4824 || !(insn_data[icode].operand[1].predicate
4825 (r1, insn_data[icode].operand[1].mode))
4826 || !(insn_data[icode].operand[2].predicate
4827 (c, insn_data[icode].operand[2].mode)))
4830 return GEN_FCN (icode) (r0, r1, c);
4834 have_sub2_insn (rtx x, rtx y)
4838 gcc_assert (GET_MODE (x) != VOIDmode);
4840 icode = (int) optab_handler (sub_optab, GET_MODE (x))->insn_code;
4842 if (icode == CODE_FOR_nothing)
4845 if (!(insn_data[icode].operand[0].predicate
4846 (x, insn_data[icode].operand[0].mode))
4847 || !(insn_data[icode].operand[1].predicate
4848 (x, insn_data[icode].operand[1].mode))
4849 || !(insn_data[icode].operand[2].predicate
4850 (y, insn_data[icode].operand[2].mode)))
4856 /* Generate the body of an instruction to copy Y into X.
4857 It may be a list of insns, if one insn isn't enough. */
4860 gen_move_insn (rtx x, rtx y)
4865 emit_move_insn_1 (x, y);
4871 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4872 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4873 no such operation exists, CODE_FOR_nothing will be returned. */
4876 can_extend_p (enum machine_mode to_mode, enum machine_mode from_mode,
4880 #ifdef HAVE_ptr_extend
4882 return CODE_FOR_ptr_extend;
4885 tab = unsignedp ? zext_optab : sext_optab;
4886 return convert_optab_handler (tab, to_mode, from_mode)->insn_code;
4889 /* Generate the body of an insn to extend Y (with mode MFROM)
4890 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4893 gen_extend_insn (rtx x, rtx y, enum machine_mode mto,
4894 enum machine_mode mfrom, int unsignedp)
4896 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
4897 return GEN_FCN (icode) (x, y);
4900 /* can_fix_p and can_float_p say whether the target machine
4901 can directly convert a given fixed point type to
4902 a given floating point type, or vice versa.
4903 The returned value is the CODE_FOR_... value to use,
4904 or CODE_FOR_nothing if these modes cannot be directly converted.
4906 *TRUNCP_PTR is set to 1 if it is necessary to output
4907 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4909 static enum insn_code
4910 can_fix_p (enum machine_mode fixmode, enum machine_mode fltmode,
4911 int unsignedp, int *truncp_ptr)
4914 enum insn_code icode;
4916 tab = unsignedp ? ufixtrunc_optab : sfixtrunc_optab;
4917 icode = convert_optab_handler (tab, fixmode, fltmode)->insn_code;
4918 if (icode != CODE_FOR_nothing)
4924 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4925 for this to work. We need to rework the fix* and ftrunc* patterns
4926 and documentation. */
4927 tab = unsignedp ? ufix_optab : sfix_optab;
4928 icode = convert_optab_handler (tab, fixmode, fltmode)->insn_code;
4929 if (icode != CODE_FOR_nothing
4930 && optab_handler (ftrunc_optab, fltmode)->insn_code != CODE_FOR_nothing)
4937 return CODE_FOR_nothing;
4940 static enum insn_code
4941 can_float_p (enum machine_mode fltmode, enum machine_mode fixmode,
4946 tab = unsignedp ? ufloat_optab : sfloat_optab;
4947 return convert_optab_handler (tab, fltmode, fixmode)->insn_code;
4950 /* Generate code to convert FROM to floating point
4951 and store in TO. FROM must be fixed point and not VOIDmode.
4952 UNSIGNEDP nonzero means regard FROM as unsigned.
4953 Normally this is done by correcting the final value
4954 if it is negative. */
4957 expand_float (rtx to, rtx from, int unsignedp)
4959 enum insn_code icode;
4961 enum machine_mode fmode, imode;
4962 bool can_do_signed = false;
4964 /* Crash now, because we won't be able to decide which mode to use. */
4965 gcc_assert (GET_MODE (from) != VOIDmode);
4967 /* Look for an insn to do the conversion. Do it in the specified
4968 modes if possible; otherwise convert either input, output or both to
4969 wider mode. If the integer mode is wider than the mode of FROM,
4970 we can do the conversion signed even if the input is unsigned. */
4972 for (fmode = GET_MODE (to); fmode != VOIDmode;
4973 fmode = GET_MODE_WIDER_MODE (fmode))
4974 for (imode = GET_MODE (from); imode != VOIDmode;
4975 imode = GET_MODE_WIDER_MODE (imode))
4977 int doing_unsigned = unsignedp;
4979 if (fmode != GET_MODE (to)
4980 && significand_size (fmode) < GET_MODE_BITSIZE (GET_MODE (from)))
4983 icode = can_float_p (fmode, imode, unsignedp);
4984 if (icode == CODE_FOR_nothing && unsignedp)
4986 enum insn_code scode = can_float_p (fmode, imode, 0);
4987 if (scode != CODE_FOR_nothing)
4988 can_do_signed = true;
4989 if (imode != GET_MODE (from))
4990 icode = scode, doing_unsigned = 0;
4993 if (icode != CODE_FOR_nothing)
4995 if (imode != GET_MODE (from))
4996 from = convert_to_mode (imode, from, unsignedp);
4998 if (fmode != GET_MODE (to))
4999 target = gen_reg_rtx (fmode);
5001 emit_unop_insn (icode, target, from,
5002 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
5005 convert_move (to, target, 0);
5010 /* Unsigned integer, and no way to convert directly. Convert as signed,
5011 then unconditionally adjust the result. */
5012 if (unsignedp && can_do_signed)
5014 rtx label = gen_label_rtx ();
5016 REAL_VALUE_TYPE offset;
5018 /* Look for a usable floating mode FMODE wider than the source and at
5019 least as wide as the target. Using FMODE will avoid rounding woes
5020 with unsigned values greater than the signed maximum value. */
5022 for (fmode = GET_MODE (to); fmode != VOIDmode;
5023 fmode = GET_MODE_WIDER_MODE (fmode))
5024 if (GET_MODE_BITSIZE (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
5025 && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
5028 if (fmode == VOIDmode)
5030 /* There is no such mode. Pretend the target is wide enough. */
5031 fmode = GET_MODE (to);
5033 /* Avoid double-rounding when TO is narrower than FROM. */
5034 if ((significand_size (fmode) + 1)
5035 < GET_MODE_BITSIZE (GET_MODE (from)))
5038 rtx neglabel = gen_label_rtx ();
5040 /* Don't use TARGET if it isn't a register, is a hard register,
5041 or is the wrong mode. */
5043 || REGNO (target) < FIRST_PSEUDO_REGISTER
5044 || GET_MODE (target) != fmode)
5045 target = gen_reg_rtx (fmode);
5047 imode = GET_MODE (from);
5048 do_pending_stack_adjust ();
5050 /* Test whether the sign bit is set. */
5051 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
5054 /* The sign bit is not set. Convert as signed. */
5055 expand_float (target, from, 0);
5056 emit_jump_insn (gen_jump (label));
5059 /* The sign bit is set.
5060 Convert to a usable (positive signed) value by shifting right
5061 one bit, while remembering if a nonzero bit was shifted
5062 out; i.e., compute (from & 1) | (from >> 1). */
5064 emit_label (neglabel);
5065 temp = expand_binop (imode, and_optab, from, const1_rtx,
5066 NULL_RTX, 1, OPTAB_LIB_WIDEN);
5067 temp1 = expand_shift (RSHIFT_EXPR, imode, from, integer_one_node,
5069 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
5071 expand_float (target, temp, 0);
5073 /* Multiply by 2 to undo the shift above. */
5074 temp = expand_binop (fmode, add_optab, target, target,
5075 target, 0, OPTAB_LIB_WIDEN);
5077 emit_move_insn (target, temp);
5079 do_pending_stack_adjust ();
5085 /* If we are about to do some arithmetic to correct for an
5086 unsigned operand, do it in a pseudo-register. */
5088 if (GET_MODE (to) != fmode
5089 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
5090 target = gen_reg_rtx (fmode);
5092 /* Convert as signed integer to floating. */
5093 expand_float (target, from, 0);
5095 /* If FROM is negative (and therefore TO is negative),
5096 correct its value by 2**bitwidth. */
5098 do_pending_stack_adjust ();
5099 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from),
5103 real_2expN (&offset, GET_MODE_BITSIZE (GET_MODE (from)), fmode);
5104 temp = expand_binop (fmode, add_optab, target,
5105 CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode),
5106 target, 0, OPTAB_LIB_WIDEN);
5108 emit_move_insn (target, temp);
5110 do_pending_stack_adjust ();
5115 /* No hardware instruction available; call a library routine. */
5120 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
5122 if (GET_MODE_SIZE (GET_MODE (from)) < GET_MODE_SIZE (SImode))
5123 from = convert_to_mode (SImode, from, unsignedp);
5125 libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
5126 gcc_assert (libfunc);
5130 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
5131 GET_MODE (to), 1, from,
5133 insns = get_insns ();
5136 emit_libcall_block (insns, target, value,
5137 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FLOAT : FLOAT,
5138 GET_MODE (to), from));
5143 /* Copy result to requested destination
5144 if we have been computing in a temp location. */
5148 if (GET_MODE (target) == GET_MODE (to))
5149 emit_move_insn (to, target);
5151 convert_move (to, target, 0);
5155 /* Generate code to convert FROM to fixed point and store in TO. FROM
5156 must be floating point. */
5159 expand_fix (rtx to, rtx from, int unsignedp)
5161 enum insn_code icode;
5163 enum machine_mode fmode, imode;
5166 /* We first try to find a pair of modes, one real and one integer, at
5167 least as wide as FROM and TO, respectively, in which we can open-code
5168 this conversion. If the integer mode is wider than the mode of TO,
5169 we can do the conversion either signed or unsigned. */
5171 for (fmode = GET_MODE (from); fmode != VOIDmode;
5172 fmode = GET_MODE_WIDER_MODE (fmode))
5173 for (imode = GET_MODE (to); imode != VOIDmode;
5174 imode = GET_MODE_WIDER_MODE (imode))
5176 int doing_unsigned = unsignedp;
5178 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
5179 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
5180 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
5182 if (icode != CODE_FOR_nothing)
5184 if (fmode != GET_MODE (from))
5185 from = convert_to_mode (fmode, from, 0);
5189 rtx temp = gen_reg_rtx (GET_MODE (from));
5190 from = expand_unop (GET_MODE (from), ftrunc_optab, from,
5194 if (imode != GET_MODE (to))
5195 target = gen_reg_rtx (imode);
5197 emit_unop_insn (icode, target, from,
5198 doing_unsigned ? UNSIGNED_FIX : FIX);
5200 convert_move (to, target, unsignedp);
5205 /* For an unsigned conversion, there is one more way to do it.
5206 If we have a signed conversion, we generate code that compares
5207 the real value to the largest representable positive number. If if
5208 is smaller, the conversion is done normally. Otherwise, subtract
5209 one plus the highest signed number, convert, and add it back.
5211 We only need to check all real modes, since we know we didn't find
5212 anything with a wider integer mode.
5214 This code used to extend FP value into mode wider than the destination.
5215 This is needed for decimal float modes which cannot accurately
5216 represent one plus the highest signed number of the same size, but
5217 not for binary modes. Consider, for instance conversion from SFmode
5220 The hot path through the code is dealing with inputs smaller than 2^63
5221 and doing just the conversion, so there is no bits to lose.
5223 In the other path we know the value is positive in the range 2^63..2^64-1
5224 inclusive. (as for other input overflow happens and result is undefined)
5225 So we know that the most important bit set in mantissa corresponds to
5226 2^63. The subtraction of 2^63 should not generate any rounding as it
5227 simply clears out that bit. The rest is trivial. */
5229 if (unsignedp && GET_MODE_BITSIZE (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
5230 for (fmode = GET_MODE (from); fmode != VOIDmode;
5231 fmode = GET_MODE_WIDER_MODE (fmode))
5232 if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0, &must_trunc)
5233 && (!DECIMAL_FLOAT_MODE_P (fmode)
5234 || GET_MODE_BITSIZE (fmode) > GET_MODE_BITSIZE (GET_MODE (to))))
5237 REAL_VALUE_TYPE offset;
5238 rtx limit, lab1, lab2, insn;
5240 bitsize = GET_MODE_BITSIZE (GET_MODE (to));
5241 real_2expN (&offset, bitsize - 1, fmode);
5242 limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode);
5243 lab1 = gen_label_rtx ();
5244 lab2 = gen_label_rtx ();
5246 if (fmode != GET_MODE (from))
5247 from = convert_to_mode (fmode, from, 0);
5249 /* See if we need to do the subtraction. */
5250 do_pending_stack_adjust ();
5251 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from),
5254 /* If not, do the signed "fix" and branch around fixup code. */
5255 expand_fix (to, from, 0);
5256 emit_jump_insn (gen_jump (lab2));
5259 /* Otherwise, subtract 2**(N-1), convert to signed number,
5260 then add 2**(N-1). Do the addition using XOR since this
5261 will often generate better code. */
5263 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
5264 NULL_RTX, 0, OPTAB_LIB_WIDEN);
5265 expand_fix (to, target, 0);
5266 target = expand_binop (GET_MODE (to), xor_optab, to,
5268 ((HOST_WIDE_INT) 1 << (bitsize - 1),
5270 to, 1, OPTAB_LIB_WIDEN);
5273 emit_move_insn (to, target);
5277 if (optab_handler (mov_optab, GET_MODE (to))->insn_code
5278 != CODE_FOR_nothing)
5280 /* Make a place for a REG_NOTE and add it. */
5281 insn = emit_move_insn (to, to);
5282 set_unique_reg_note (insn,
5284 gen_rtx_fmt_e (UNSIGNED_FIX,
5292 /* We can't do it with an insn, so use a library call. But first ensure
5293 that the mode of TO is at least as wide as SImode, since those are the
5294 only library calls we know about. */
5296 if (GET_MODE_SIZE (GET_MODE (to)) < GET_MODE_SIZE (SImode))
5298 target = gen_reg_rtx (SImode);
5300 expand_fix (target, from, unsignedp);
5308 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
5309 libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
5310 gcc_assert (libfunc);
5314 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
5315 GET_MODE (to), 1, from,
5317 insns = get_insns ();
5320 emit_libcall_block (insns, target, value,
5321 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
5322 GET_MODE (to), from));
5327 if (GET_MODE (to) == GET_MODE (target))
5328 emit_move_insn (to, target);
5330 convert_move (to, target, 0);
5334 /* Generate code to convert FROM or TO a fixed-point.
5335 If UINTP is true, either TO or FROM is an unsigned integer.
5336 If SATP is true, we need to saturate the result. */
5339 expand_fixed_convert (rtx to, rtx from, int uintp, int satp)
5341 enum machine_mode to_mode = GET_MODE (to);
5342 enum machine_mode from_mode = GET_MODE (from);
5344 enum rtx_code this_code;
5345 enum insn_code code;
5349 if (to_mode == from_mode)
5351 emit_move_insn (to, from);
5357 tab = satp ? satfractuns_optab : fractuns_optab;
5358 this_code = satp ? UNSIGNED_SAT_FRACT : UNSIGNED_FRACT_CONVERT;
5362 tab = satp ? satfract_optab : fract_optab;
5363 this_code = satp ? SAT_FRACT : FRACT_CONVERT;
5365 code = tab->handlers[to_mode][from_mode].insn_code;
5366 if (code != CODE_FOR_nothing)
5368 emit_unop_insn (code, to, from, this_code);
5372 libfunc = convert_optab_libfunc (tab, to_mode, from_mode);
5373 gcc_assert (libfunc);
5376 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, to_mode,
5377 1, from, from_mode);
5378 insns = get_insns ();
5381 emit_libcall_block (insns, to, value,
5382 gen_rtx_fmt_e (tab->code, to_mode, from));
5385 /* Generate code to convert FROM to fixed point and store in TO. FROM
5386 must be floating point, TO must be signed. Use the conversion optab
5387 TAB to do the conversion. */
5390 expand_sfix_optab (rtx to, rtx from, convert_optab tab)
5392 enum insn_code icode;
5394 enum machine_mode fmode, imode;
5396 /* We first try to find a pair of modes, one real and one integer, at
5397 least as wide as FROM and TO, respectively, in which we can open-code
5398 this conversion. If the integer mode is wider than the mode of TO,
5399 we can do the conversion either signed or unsigned. */
5401 for (fmode = GET_MODE (from); fmode != VOIDmode;
5402 fmode = GET_MODE_WIDER_MODE (fmode))
5403 for (imode = GET_MODE (to); imode != VOIDmode;
5404 imode = GET_MODE_WIDER_MODE (imode))
5406 icode = convert_optab_handler (tab, imode, fmode)->insn_code;
5407 if (icode != CODE_FOR_nothing)
5409 if (fmode != GET_MODE (from))
5410 from = convert_to_mode (fmode, from, 0);
5412 if (imode != GET_MODE (to))
5413 target = gen_reg_rtx (imode);
5415 emit_unop_insn (icode, target, from, UNKNOWN);
5417 convert_move (to, target, 0);
5425 /* Report whether we have an instruction to perform the operation
5426 specified by CODE on operands of mode MODE. */
5428 have_insn_for (enum rtx_code code, enum machine_mode mode)
5430 return (code_to_optab[(int) code] != 0
5431 && (optab_handler (code_to_optab[(int) code], mode)->insn_code
5432 != CODE_FOR_nothing));
5435 /* Set all insn_code fields to CODE_FOR_nothing. */
5438 init_insn_codes (void)
5442 for (i = 0; i < (unsigned int) OTI_MAX; i++)
5447 op = &optab_table[i];
5448 for (j = 0; j < NUM_MACHINE_MODES; j++)
5449 optab_handler (op, j)->insn_code = CODE_FOR_nothing;
5451 for (i = 0; i < (unsigned int) COI_MAX; i++)
5456 op = &convert_optab_table[i];
5457 for (j = 0; j < NUM_MACHINE_MODES; j++)
5458 for (k = 0; k < NUM_MACHINE_MODES; k++)
5459 convert_optab_handler (op, j, k)->insn_code = CODE_FOR_nothing;
5463 /* Initialize OP's code to CODE, and write it into the code_to_optab table. */
5465 init_optab (optab op, enum rtx_code code)
5468 code_to_optab[(int) code] = op;
5471 /* Same, but fill in its code as CODE, and do _not_ write it into
5472 the code_to_optab table. */
5474 init_optabv (optab op, enum rtx_code code)
5479 /* Conversion optabs never go in the code_to_optab table. */
5481 init_convert_optab (convert_optab op, enum rtx_code code)
5486 /* Initialize the libfunc fields of an entire group of entries in some
5487 optab. Each entry is set equal to a string consisting of a leading
5488 pair of underscores followed by a generic operation name followed by
5489 a mode name (downshifted to lowercase) followed by a single character
5490 representing the number of operands for the given operation (which is
5491 usually one of the characters '2', '3', or '4').
5493 OPTABLE is the table in which libfunc fields are to be initialized.
5494 OPNAME is the generic (string) name of the operation.
5495 SUFFIX is the character which specifies the number of operands for
5496 the given generic operation.
5497 MODE is the mode to generate for.
5501 gen_libfunc (optab optable, const char *opname, int suffix, enum machine_mode mode)
5503 unsigned opname_len = strlen (opname);
5504 const char *mname = GET_MODE_NAME (mode);
5505 unsigned mname_len = strlen (mname);
5506 char *libfunc_name = alloca (2 + opname_len + mname_len + 1 + 1);
5513 for (q = opname; *q; )
5515 for (q = mname; *q; q++)
5516 *p++ = TOLOWER (*q);
5520 set_optab_libfunc (optable, mode,
5521 ggc_alloc_string (libfunc_name, p - libfunc_name));
5524 /* Like gen_libfunc, but verify that integer operation is involved. */
5527 gen_int_libfunc (optab optable, const char *opname, char suffix,
5528 enum machine_mode mode)
5530 int maxsize = 2 * BITS_PER_WORD;
5532 if (GET_MODE_CLASS (mode) != MODE_INT)
5534 if (maxsize < LONG_LONG_TYPE_SIZE)
5535 maxsize = LONG_LONG_TYPE_SIZE;
5536 if (GET_MODE_CLASS (mode) != MODE_INT
5537 || mode < word_mode || GET_MODE_BITSIZE (mode) > maxsize)
5539 gen_libfunc (optable, opname, suffix, mode);
5542 /* Like gen_libfunc, but verify that FP and set decimal prefix if needed. */
5545 gen_fp_libfunc (optab optable, const char *opname, char suffix,
5546 enum machine_mode mode)
5550 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5551 gen_libfunc (optable, opname, suffix, mode);
5552 if (DECIMAL_FLOAT_MODE_P (mode))
5554 dec_opname = alloca (sizeof (DECIMAL_PREFIX) + strlen (opname));
5555 /* For BID support, change the name to have either a bid_ or dpd_ prefix
5556 depending on the low level floating format used. */
5557 memcpy (dec_opname, DECIMAL_PREFIX, sizeof (DECIMAL_PREFIX) - 1);
5558 strcpy (dec_opname + sizeof (DECIMAL_PREFIX) - 1, opname);
5559 gen_libfunc (optable, dec_opname, suffix, mode);
5563 /* Like gen_libfunc, but verify that fixed-point operation is involved. */
5566 gen_fixed_libfunc (optab optable, const char *opname, char suffix,
5567 enum machine_mode mode)
5569 if (!ALL_FIXED_POINT_MODE_P (mode))
5571 gen_libfunc (optable, opname, suffix, mode);
5574 /* Like gen_libfunc, but verify that signed fixed-point operation is
5578 gen_signed_fixed_libfunc (optab optable, const char *opname, char suffix,
5579 enum machine_mode mode)
5581 if (!SIGNED_FIXED_POINT_MODE_P (mode))
5583 gen_libfunc (optable, opname, suffix, mode);
5586 /* Like gen_libfunc, but verify that unsigned fixed-point operation is
5590 gen_unsigned_fixed_libfunc (optab optable, const char *opname, char suffix,
5591 enum machine_mode mode)
5593 if (!UNSIGNED_FIXED_POINT_MODE_P (mode))
5595 gen_libfunc (optable, opname, suffix, mode);
5598 /* Like gen_libfunc, but verify that FP or INT operation is involved. */
5601 gen_int_fp_libfunc (optab optable, const char *name, char suffix,
5602 enum machine_mode mode)
5604 if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5605 gen_fp_libfunc (optable, name, suffix, mode);
5606 if (INTEGRAL_MODE_P (mode))
5607 gen_int_libfunc (optable, name, suffix, mode);
5610 /* Like gen_libfunc, but verify that FP or INT operation is involved
5611 and add 'v' suffix for integer operation. */
5614 gen_intv_fp_libfunc (optab optable, const char *name, char suffix,
5615 enum machine_mode mode)
5617 if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5618 gen_fp_libfunc (optable, name, suffix, mode);
5619 if (GET_MODE_CLASS (mode) == MODE_INT)
5621 int len = strlen (name);
5622 char *v_name = alloca (len + 2);
5623 strcpy (v_name, name);
5625 v_name[len + 1] = 0;
5626 gen_int_libfunc (optable, v_name, suffix, mode);
5630 /* Like gen_libfunc, but verify that FP or INT or FIXED operation is
5634 gen_int_fp_fixed_libfunc (optab optable, const char *name, char suffix,
5635 enum machine_mode mode)
5637 if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5638 gen_fp_libfunc (optable, name, suffix, mode);
5639 if (INTEGRAL_MODE_P (mode))
5640 gen_int_libfunc (optable, name, suffix, mode);
5641 if (ALL_FIXED_POINT_MODE_P (mode))
5642 gen_fixed_libfunc (optable, name, suffix, mode);
5645 /* Like gen_libfunc, but verify that FP or INT or signed FIXED operation is
5649 gen_int_fp_signed_fixed_libfunc (optab optable, const char *name, char suffix,
5650 enum machine_mode mode)
5652 if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5653 gen_fp_libfunc (optable, name, suffix, mode);
5654 if (INTEGRAL_MODE_P (mode))
5655 gen_int_libfunc (optable, name, suffix, mode);
5656 if (SIGNED_FIXED_POINT_MODE_P (mode))
5657 gen_signed_fixed_libfunc (optable, name, suffix, mode);
5660 /* Like gen_libfunc, but verify that INT or FIXED operation is
5664 gen_int_fixed_libfunc (optab optable, const char *name, char suffix,
5665 enum machine_mode mode)
5667 if (INTEGRAL_MODE_P (mode))
5668 gen_int_libfunc (optable, name, suffix, mode);
5669 if (ALL_FIXED_POINT_MODE_P (mode))
5670 gen_fixed_libfunc (optable, name, suffix, mode);
5673 /* Like gen_libfunc, but verify that INT or signed FIXED operation is
5677 gen_int_signed_fixed_libfunc (optab optable, const char *name, char suffix,
5678 enum machine_mode mode)
5680 if (INTEGRAL_MODE_P (mode))
5681 gen_int_libfunc (optable, name, suffix, mode);
5682 if (SIGNED_FIXED_POINT_MODE_P (mode))
5683 gen_signed_fixed_libfunc (optable, name, suffix, mode);
5686 /* Like gen_libfunc, but verify that INT or unsigned FIXED operation is
5690 gen_int_unsigned_fixed_libfunc (optab optable, const char *name, char suffix,
5691 enum machine_mode mode)
5693 if (INTEGRAL_MODE_P (mode))
5694 gen_int_libfunc (optable, name, suffix, mode);
5695 if (UNSIGNED_FIXED_POINT_MODE_P (mode))
5696 gen_unsigned_fixed_libfunc (optable, name, suffix, mode);
5699 /* Initialize the libfunc fields of an entire group of entries of an
5700 inter-mode-class conversion optab. The string formation rules are
5701 similar to the ones for init_libfuncs, above, but instead of having
5702 a mode name and an operand count these functions have two mode names
5703 and no operand count. */
5706 gen_interclass_conv_libfunc (convert_optab tab,
5708 enum machine_mode tmode,
5709 enum machine_mode fmode)
5711 size_t opname_len = strlen (opname);
5712 size_t mname_len = 0;
5714 const char *fname, *tname;
5716 char *libfunc_name, *suffix;
5717 char *nondec_name, *dec_name, *nondec_suffix, *dec_suffix;
5720 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5721 depends on which underlying decimal floating point format is used. */
5722 const size_t dec_len = sizeof (DECIMAL_PREFIX) - 1;
5724 mname_len = strlen (GET_MODE_NAME (tmode)) + strlen (GET_MODE_NAME (fmode));
5726 nondec_name = alloca (2 + opname_len + mname_len + 1 + 1);
5727 nondec_name[0] = '_';
5728 nondec_name[1] = '_';
5729 memcpy (&nondec_name[2], opname, opname_len);
5730 nondec_suffix = nondec_name + opname_len + 2;
5732 dec_name = alloca (2 + dec_len + opname_len + mname_len + 1 + 1);
5735 memcpy (&dec_name[2], DECIMAL_PREFIX, dec_len);
5736 memcpy (&dec_name[2+dec_len], opname, opname_len);
5737 dec_suffix = dec_name + dec_len + opname_len + 2;
5739 fname = GET_MODE_NAME (fmode);
5740 tname = GET_MODE_NAME (tmode);
5742 if (DECIMAL_FLOAT_MODE_P(fmode) || DECIMAL_FLOAT_MODE_P(tmode))
5744 libfunc_name = dec_name;
5745 suffix = dec_suffix;
5749 libfunc_name = nondec_name;
5750 suffix = nondec_suffix;
5754 for (q = fname; *q; p++, q++)
5756 for (q = tname; *q; p++, q++)
5761 set_conv_libfunc (tab, tmode, fmode,
5762 ggc_alloc_string (libfunc_name, p - libfunc_name));
5765 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5766 int->fp conversion. */
5769 gen_int_to_fp_conv_libfunc (convert_optab tab,
5771 enum machine_mode tmode,
5772 enum machine_mode fmode)
5774 if (GET_MODE_CLASS (fmode) != MODE_INT)
5776 if (GET_MODE_CLASS (tmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (tmode))
5778 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5781 /* ufloat_optab is special by using floatun for FP and floatuns decimal fp
5785 gen_ufloat_conv_libfunc (convert_optab tab,
5786 const char *opname ATTRIBUTE_UNUSED,
5787 enum machine_mode tmode,
5788 enum machine_mode fmode)
5790 if (DECIMAL_FLOAT_MODE_P (tmode))
5791 gen_int_to_fp_conv_libfunc (tab, "floatuns", tmode, fmode);
5793 gen_int_to_fp_conv_libfunc (tab, "floatun", tmode, fmode);
5796 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5797 fp->int conversion. */
5800 gen_int_to_fp_nondecimal_conv_libfunc (convert_optab tab,
5802 enum machine_mode tmode,
5803 enum machine_mode fmode)
5805 if (GET_MODE_CLASS (fmode) != MODE_INT)
5807 if (GET_MODE_CLASS (tmode) != MODE_FLOAT)
5809 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5812 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5813 fp->int conversion with no decimal floating point involved. */
5816 gen_fp_to_int_conv_libfunc (convert_optab tab,
5818 enum machine_mode tmode,
5819 enum machine_mode fmode)
5821 if (GET_MODE_CLASS (fmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (fmode))
5823 if (GET_MODE_CLASS (tmode) != MODE_INT)
5825 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5828 /* Initialize the libfunc fiels of an of an intra-mode-class conversion optab.
5829 The string formation rules are
5830 similar to the ones for init_libfunc, above. */
5833 gen_intraclass_conv_libfunc (convert_optab tab, const char *opname,
5834 enum machine_mode tmode, enum machine_mode fmode)
5836 size_t opname_len = strlen (opname);
5837 size_t mname_len = 0;
5839 const char *fname, *tname;
5841 char *nondec_name, *dec_name, *nondec_suffix, *dec_suffix;
5842 char *libfunc_name, *suffix;
5845 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5846 depends on which underlying decimal floating point format is used. */
5847 const size_t dec_len = sizeof (DECIMAL_PREFIX) - 1;
5849 mname_len = strlen (GET_MODE_NAME (tmode)) + strlen (GET_MODE_NAME (fmode));
5851 nondec_name = alloca (2 + opname_len + mname_len + 1 + 1);
5852 nondec_name[0] = '_';
5853 nondec_name[1] = '_';
5854 memcpy (&nondec_name[2], opname, opname_len);
5855 nondec_suffix = nondec_name + opname_len + 2;
5857 dec_name = alloca (2 + dec_len + opname_len + mname_len + 1 + 1);
5860 memcpy (&dec_name[2], DECIMAL_PREFIX, dec_len);
5861 memcpy (&dec_name[2 + dec_len], opname, opname_len);
5862 dec_suffix = dec_name + dec_len + opname_len + 2;
5864 fname = GET_MODE_NAME (fmode);
5865 tname = GET_MODE_NAME (tmode);
5867 if (DECIMAL_FLOAT_MODE_P(fmode) || DECIMAL_FLOAT_MODE_P(tmode))
5869 libfunc_name = dec_name;
5870 suffix = dec_suffix;
5874 libfunc_name = nondec_name;
5875 suffix = nondec_suffix;
5879 for (q = fname; *q; p++, q++)
5881 for (q = tname; *q; p++, q++)
5887 set_conv_libfunc (tab, tmode, fmode,
5888 ggc_alloc_string (libfunc_name, p - libfunc_name));
5891 /* Pick proper libcall for trunc_optab. We need to chose if we do
5892 truncation or extension and interclass or intraclass. */
5895 gen_trunc_conv_libfunc (convert_optab tab,
5897 enum machine_mode tmode,
5898 enum machine_mode fmode)
5900 if (GET_MODE_CLASS (tmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (tmode))
5902 if (GET_MODE_CLASS (fmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (fmode))
5907 if ((GET_MODE_CLASS (tmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (fmode))
5908 || (GET_MODE_CLASS (fmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (tmode)))
5909 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5911 if (GET_MODE_PRECISION (fmode) <= GET_MODE_PRECISION (tmode))
5914 if ((GET_MODE_CLASS (tmode) == MODE_FLOAT
5915 && GET_MODE_CLASS (fmode) == MODE_FLOAT)
5916 || (DECIMAL_FLOAT_MODE_P (fmode) && DECIMAL_FLOAT_MODE_P (tmode)))
5917 gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
5920 /* Pick proper libcall for extend_optab. We need to chose if we do
5921 truncation or extension and interclass or intraclass. */
5924 gen_extend_conv_libfunc (convert_optab tab,
5925 const char *opname ATTRIBUTE_UNUSED,
5926 enum machine_mode tmode,
5927 enum machine_mode fmode)
5929 if (GET_MODE_CLASS (tmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (tmode))
5931 if (GET_MODE_CLASS (fmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (fmode))
5936 if ((GET_MODE_CLASS (tmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (fmode))
5937 || (GET_MODE_CLASS (fmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (tmode)))
5938 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5940 if (GET_MODE_PRECISION (fmode) > GET_MODE_PRECISION (tmode))
5943 if ((GET_MODE_CLASS (tmode) == MODE_FLOAT
5944 && GET_MODE_CLASS (fmode) == MODE_FLOAT)
5945 || (DECIMAL_FLOAT_MODE_P (fmode) && DECIMAL_FLOAT_MODE_P (tmode)))
5946 gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
5949 /* Pick proper libcall for fract_optab. We need to chose if we do
5950 interclass or intraclass. */
5953 gen_fract_conv_libfunc (convert_optab tab,
5955 enum machine_mode tmode,
5956 enum machine_mode fmode)
5960 if (!(ALL_FIXED_POINT_MODE_P (tmode) || ALL_FIXED_POINT_MODE_P (fmode)))
5963 if (GET_MODE_CLASS (tmode) == GET_MODE_CLASS (fmode))
5964 gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
5966 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5969 /* Pick proper libcall for fractuns_optab. */
5972 gen_fractuns_conv_libfunc (convert_optab tab,
5974 enum machine_mode tmode,
5975 enum machine_mode fmode)
5979 /* One mode must be a fixed-point mode, and the other must be an integer
5981 if (!((ALL_FIXED_POINT_MODE_P (tmode) && GET_MODE_CLASS (fmode) == MODE_INT)
5982 || (ALL_FIXED_POINT_MODE_P (fmode)
5983 && GET_MODE_CLASS (tmode) == MODE_INT)))
5986 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5989 /* Pick proper libcall for satfract_optab. We need to chose if we do
5990 interclass or intraclass. */
5993 gen_satfract_conv_libfunc (convert_optab tab,
5995 enum machine_mode tmode,
5996 enum machine_mode fmode)
6000 /* TMODE must be a fixed-point mode. */
6001 if (!ALL_FIXED_POINT_MODE_P (tmode))
6004 if (GET_MODE_CLASS (tmode) == GET_MODE_CLASS (fmode))
6005 gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
6007 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
6010 /* Pick proper libcall for satfractuns_optab. */
6013 gen_satfractuns_conv_libfunc (convert_optab tab,
6015 enum machine_mode tmode,
6016 enum machine_mode fmode)
6020 /* TMODE must be a fixed-point mode, and FMODE must be an integer mode. */
6021 if (!(ALL_FIXED_POINT_MODE_P (tmode) && GET_MODE_CLASS (fmode) == MODE_INT))
6024 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
6028 init_one_libfunc (const char *name)
6032 /* Create a FUNCTION_DECL that can be passed to
6033 targetm.encode_section_info. */
6034 /* ??? We don't have any type information except for this is
6035 a function. Pretend this is "int foo()". */
6036 tree decl = build_decl (FUNCTION_DECL, get_identifier (name),
6037 build_function_type (integer_type_node, NULL_TREE));
6038 DECL_ARTIFICIAL (decl) = 1;
6039 DECL_EXTERNAL (decl) = 1;
6040 TREE_PUBLIC (decl) = 1;
6042 symbol = XEXP (DECL_RTL (decl), 0);
6044 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
6045 are the flags assigned by targetm.encode_section_info. */
6046 SET_SYMBOL_REF_DECL (symbol, 0);
6051 /* Call this to reset the function entry for one optab (OPTABLE) in mode
6052 MODE to NAME, which should be either 0 or a string constant. */
6054 set_optab_libfunc (optab optable, enum machine_mode mode, const char *name)
6057 struct libfunc_entry e;
6058 struct libfunc_entry **slot;
6059 e.optab = (size_t) (optable - &optab_table[0]);
6064 val = init_one_libfunc (name);
6067 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, INSERT);
6069 *slot = ggc_alloc (sizeof (struct libfunc_entry));
6070 (*slot)->optab = (size_t) (optable - &optab_table[0]);
6071 (*slot)->mode1 = mode;
6072 (*slot)->mode2 = VOIDmode;
6073 (*slot)->libfunc = val;
6076 /* Call this to reset the function entry for one conversion optab
6077 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
6078 either 0 or a string constant. */
6080 set_conv_libfunc (convert_optab optable, enum machine_mode tmode,
6081 enum machine_mode fmode, const char *name)
6084 struct libfunc_entry e;
6085 struct libfunc_entry **slot;
6086 e.optab = (size_t) (optable - &convert_optab_table[0]);
6091 val = init_one_libfunc (name);
6094 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, INSERT);
6096 *slot = ggc_alloc (sizeof (struct libfunc_entry));
6097 (*slot)->optab = (size_t) (optable - &convert_optab_table[0]);
6098 (*slot)->mode1 = tmode;
6099 (*slot)->mode2 = fmode;
6100 (*slot)->libfunc = val;
6103 /* Call this to initialize the contents of the optabs
6104 appropriately for the current target machine. */
6110 enum machine_mode int_mode;
6113 libfunc_hash = htab_create_ggc (10, hash_libfunc, eq_libfunc, NULL);
6114 /* Start by initializing all tables to contain CODE_FOR_nothing. */
6116 for (i = 0; i < NUM_RTX_CODE; i++)
6117 setcc_gen_code[i] = CODE_FOR_nothing;
6119 #ifdef HAVE_conditional_move
6120 for (i = 0; i < NUM_MACHINE_MODES; i++)
6121 movcc_gen_code[i] = CODE_FOR_nothing;
6124 for (i = 0; i < NUM_MACHINE_MODES; i++)
6126 vcond_gen_code[i] = CODE_FOR_nothing;
6127 vcondu_gen_code[i] = CODE_FOR_nothing;
6130 #if GCC_VERSION >= 4000
6131 /* We statically initialize the insn_codes with CODE_FOR_nothing. */
6138 init_optab (add_optab, PLUS);
6139 init_optabv (addv_optab, PLUS);
6140 init_optab (sub_optab, MINUS);
6141 init_optabv (subv_optab, MINUS);
6142 init_optab (ssadd_optab, SS_PLUS);
6143 init_optab (usadd_optab, US_PLUS);
6144 init_optab (sssub_optab, SS_MINUS);
6145 init_optab (ussub_optab, US_MINUS);
6146 init_optab (smul_optab, MULT);
6147 init_optab (ssmul_optab, SS_MULT);
6148 init_optab (usmul_optab, US_MULT);
6149 init_optabv (smulv_optab, MULT);
6150 init_optab (smul_highpart_optab, UNKNOWN);
6151 init_optab (umul_highpart_optab, UNKNOWN);
6152 init_optab (smul_widen_optab, UNKNOWN);
6153 init_optab (umul_widen_optab, UNKNOWN);
6154 init_optab (usmul_widen_optab, UNKNOWN);
6155 init_optab (smadd_widen_optab, UNKNOWN);
6156 init_optab (umadd_widen_optab, UNKNOWN);
6157 init_optab (ssmadd_widen_optab, UNKNOWN);
6158 init_optab (usmadd_widen_optab, UNKNOWN);
6159 init_optab (smsub_widen_optab, UNKNOWN);
6160 init_optab (umsub_widen_optab, UNKNOWN);
6161 init_optab (ssmsub_widen_optab, UNKNOWN);
6162 init_optab (usmsub_widen_optab, UNKNOWN);
6163 init_optab (sdiv_optab, DIV);
6164 init_optab (ssdiv_optab, SS_DIV);
6165 init_optab (usdiv_optab, US_DIV);
6166 init_optabv (sdivv_optab, DIV);
6167 init_optab (sdivmod_optab, UNKNOWN);
6168 init_optab (udiv_optab, UDIV);
6169 init_optab (udivmod_optab, UNKNOWN);
6170 init_optab (smod_optab, MOD);
6171 init_optab (umod_optab, UMOD);
6172 init_optab (fmod_optab, UNKNOWN);
6173 init_optab (remainder_optab, UNKNOWN);
6174 init_optab (ftrunc_optab, UNKNOWN);
6175 init_optab (and_optab, AND);
6176 init_optab (ior_optab, IOR);
6177 init_optab (xor_optab, XOR);
6178 init_optab (ashl_optab, ASHIFT);
6179 init_optab (ssashl_optab, SS_ASHIFT);
6180 init_optab (usashl_optab, US_ASHIFT);
6181 init_optab (ashr_optab, ASHIFTRT);
6182 init_optab (lshr_optab, LSHIFTRT);
6183 init_optab (rotl_optab, ROTATE);
6184 init_optab (rotr_optab, ROTATERT);
6185 init_optab (smin_optab, SMIN);
6186 init_optab (smax_optab, SMAX);
6187 init_optab (umin_optab, UMIN);
6188 init_optab (umax_optab, UMAX);
6189 init_optab (pow_optab, UNKNOWN);
6190 init_optab (atan2_optab, UNKNOWN);
6192 /* These three have codes assigned exclusively for the sake of
6194 init_optab (mov_optab, SET);
6195 init_optab (movstrict_optab, STRICT_LOW_PART);
6196 init_optab (cmp_optab, COMPARE);
6198 init_optab (storent_optab, UNKNOWN);
6200 init_optab (ucmp_optab, UNKNOWN);
6201 init_optab (tst_optab, UNKNOWN);
6203 init_optab (eq_optab, EQ);
6204 init_optab (ne_optab, NE);
6205 init_optab (gt_optab, GT);
6206 init_optab (ge_optab, GE);
6207 init_optab (lt_optab, LT);
6208 init_optab (le_optab, LE);
6209 init_optab (unord_optab, UNORDERED);
6211 init_optab (neg_optab, NEG);
6212 init_optab (ssneg_optab, SS_NEG);
6213 init_optab (usneg_optab, US_NEG);
6214 init_optabv (negv_optab, NEG);
6215 init_optab (abs_optab, ABS);
6216 init_optabv (absv_optab, ABS);
6217 init_optab (addcc_optab, UNKNOWN);
6218 init_optab (one_cmpl_optab, NOT);
6219 init_optab (bswap_optab, BSWAP);
6220 init_optab (ffs_optab, FFS);
6221 init_optab (clz_optab, CLZ);
6222 init_optab (ctz_optab, CTZ);
6223 init_optab (popcount_optab, POPCOUNT);
6224 init_optab (parity_optab, PARITY);
6225 init_optab (sqrt_optab, SQRT);
6226 init_optab (floor_optab, UNKNOWN);
6227 init_optab (ceil_optab, UNKNOWN);
6228 init_optab (round_optab, UNKNOWN);
6229 init_optab (btrunc_optab, UNKNOWN);
6230 init_optab (nearbyint_optab, UNKNOWN);
6231 init_optab (rint_optab, UNKNOWN);
6232 init_optab (sincos_optab, UNKNOWN);
6233 init_optab (sin_optab, UNKNOWN);
6234 init_optab (asin_optab, UNKNOWN);
6235 init_optab (cos_optab, UNKNOWN);
6236 init_optab (acos_optab, UNKNOWN);
6237 init_optab (exp_optab, UNKNOWN);
6238 init_optab (exp10_optab, UNKNOWN);
6239 init_optab (exp2_optab, UNKNOWN);
6240 init_optab (expm1_optab, UNKNOWN);
6241 init_optab (ldexp_optab, UNKNOWN);
6242 init_optab (scalb_optab, UNKNOWN);
6243 init_optab (logb_optab, UNKNOWN);
6244 init_optab (ilogb_optab, UNKNOWN);
6245 init_optab (log_optab, UNKNOWN);
6246 init_optab (log10_optab, UNKNOWN);
6247 init_optab (log2_optab, UNKNOWN);
6248 init_optab (log1p_optab, UNKNOWN);
6249 init_optab (tan_optab, UNKNOWN);
6250 init_optab (atan_optab, UNKNOWN);
6251 init_optab (copysign_optab, UNKNOWN);
6252 init_optab (signbit_optab, UNKNOWN);
6254 init_optab (isinf_optab, UNKNOWN);
6256 init_optab (strlen_optab, UNKNOWN);
6257 init_optab (cbranch_optab, UNKNOWN);
6258 init_optab (cmov_optab, UNKNOWN);
6259 init_optab (cstore_optab, UNKNOWN);
6260 init_optab (push_optab, UNKNOWN);
6262 init_optab (reduc_smax_optab, UNKNOWN);
6263 init_optab (reduc_umax_optab, UNKNOWN);
6264 init_optab (reduc_smin_optab, UNKNOWN);
6265 init_optab (reduc_umin_optab, UNKNOWN);
6266 init_optab (reduc_splus_optab, UNKNOWN);
6267 init_optab (reduc_uplus_optab, UNKNOWN);
6269 init_optab (ssum_widen_optab, UNKNOWN);
6270 init_optab (usum_widen_optab, UNKNOWN);
6271 init_optab (sdot_prod_optab, UNKNOWN);
6272 init_optab (udot_prod_optab, UNKNOWN);
6274 init_optab (vec_extract_optab, UNKNOWN);
6275 init_optab (vec_extract_even_optab, UNKNOWN);
6276 init_optab (vec_extract_odd_optab, UNKNOWN);
6277 init_optab (vec_interleave_high_optab, UNKNOWN);
6278 init_optab (vec_interleave_low_optab, UNKNOWN);
6279 init_optab (vec_set_optab, UNKNOWN);
6280 init_optab (vec_init_optab, UNKNOWN);
6281 init_optab (vec_shl_optab, UNKNOWN);
6282 init_optab (vec_shr_optab, UNKNOWN);
6283 init_optab (vec_realign_load_optab, UNKNOWN);
6284 init_optab (movmisalign_optab, UNKNOWN);
6285 init_optab (vec_widen_umult_hi_optab, UNKNOWN);
6286 init_optab (vec_widen_umult_lo_optab, UNKNOWN);
6287 init_optab (vec_widen_smult_hi_optab, UNKNOWN);
6288 init_optab (vec_widen_smult_lo_optab, UNKNOWN);
6289 init_optab (vec_unpacks_hi_optab, UNKNOWN);
6290 init_optab (vec_unpacks_lo_optab, UNKNOWN);
6291 init_optab (vec_unpacku_hi_optab, UNKNOWN);
6292 init_optab (vec_unpacku_lo_optab, UNKNOWN);
6293 init_optab (vec_unpacks_float_hi_optab, UNKNOWN);
6294 init_optab (vec_unpacks_float_lo_optab, UNKNOWN);
6295 init_optab (vec_unpacku_float_hi_optab, UNKNOWN);
6296 init_optab (vec_unpacku_float_lo_optab, UNKNOWN);
6297 init_optab (vec_pack_trunc_optab, UNKNOWN);
6298 init_optab (vec_pack_usat_optab, UNKNOWN);
6299 init_optab (vec_pack_ssat_optab, UNKNOWN);
6300 init_optab (vec_pack_ufix_trunc_optab, UNKNOWN);
6301 init_optab (vec_pack_sfix_trunc_optab, UNKNOWN);
6303 init_optab (powi_optab, UNKNOWN);
6306 init_convert_optab (sext_optab, SIGN_EXTEND);
6307 init_convert_optab (zext_optab, ZERO_EXTEND);
6308 init_convert_optab (trunc_optab, TRUNCATE);
6309 init_convert_optab (sfix_optab, FIX);
6310 init_convert_optab (ufix_optab, UNSIGNED_FIX);
6311 init_convert_optab (sfixtrunc_optab, UNKNOWN);
6312 init_convert_optab (ufixtrunc_optab, UNKNOWN);
6313 init_convert_optab (sfloat_optab, FLOAT);
6314 init_convert_optab (ufloat_optab, UNSIGNED_FLOAT);
6315 init_convert_optab (lrint_optab, UNKNOWN);
6316 init_convert_optab (lround_optab, UNKNOWN);
6317 init_convert_optab (lfloor_optab, UNKNOWN);
6318 init_convert_optab (lceil_optab, UNKNOWN);
6320 init_convert_optab (fract_optab, FRACT_CONVERT);
6321 init_convert_optab (fractuns_optab, UNSIGNED_FRACT_CONVERT);
6322 init_convert_optab (satfract_optab, SAT_FRACT);
6323 init_convert_optab (satfractuns_optab, UNSIGNED_SAT_FRACT);
6325 for (i = 0; i < NUM_MACHINE_MODES; i++)
6327 movmem_optab[i] = CODE_FOR_nothing;
6328 cmpstr_optab[i] = CODE_FOR_nothing;
6329 cmpstrn_optab[i] = CODE_FOR_nothing;
6330 cmpmem_optab[i] = CODE_FOR_nothing;
6331 setmem_optab[i] = CODE_FOR_nothing;
6333 sync_add_optab[i] = CODE_FOR_nothing;
6334 sync_sub_optab[i] = CODE_FOR_nothing;
6335 sync_ior_optab[i] = CODE_FOR_nothing;
6336 sync_and_optab[i] = CODE_FOR_nothing;
6337 sync_xor_optab[i] = CODE_FOR_nothing;
6338 sync_nand_optab[i] = CODE_FOR_nothing;
6339 sync_old_add_optab[i] = CODE_FOR_nothing;
6340 sync_old_sub_optab[i] = CODE_FOR_nothing;
6341 sync_old_ior_optab[i] = CODE_FOR_nothing;
6342 sync_old_and_optab[i] = CODE_FOR_nothing;
6343 sync_old_xor_optab[i] = CODE_FOR_nothing;
6344 sync_old_nand_optab[i] = CODE_FOR_nothing;
6345 sync_new_add_optab[i] = CODE_FOR_nothing;
6346 sync_new_sub_optab[i] = CODE_FOR_nothing;
6347 sync_new_ior_optab[i] = CODE_FOR_nothing;
6348 sync_new_and_optab[i] = CODE_FOR_nothing;
6349 sync_new_xor_optab[i] = CODE_FOR_nothing;
6350 sync_new_nand_optab[i] = CODE_FOR_nothing;
6351 sync_compare_and_swap[i] = CODE_FOR_nothing;
6352 sync_compare_and_swap_cc[i] = CODE_FOR_nothing;
6353 sync_lock_test_and_set[i] = CODE_FOR_nothing;
6354 sync_lock_release[i] = CODE_FOR_nothing;
6356 reload_in_optab[i] = reload_out_optab[i] = CODE_FOR_nothing;
6359 /* Fill in the optabs with the insns we support. */
6362 /* Initialize the optabs with the names of the library functions. */
6363 add_optab->libcall_basename = "add";
6364 add_optab->libcall_suffix = '3';
6365 add_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6366 addv_optab->libcall_basename = "add";
6367 addv_optab->libcall_suffix = '3';
6368 addv_optab->libcall_gen = gen_intv_fp_libfunc;
6369 ssadd_optab->libcall_basename = "ssadd";
6370 ssadd_optab->libcall_suffix = '3';
6371 ssadd_optab->libcall_gen = gen_signed_fixed_libfunc;
6372 usadd_optab->libcall_basename = "usadd";
6373 usadd_optab->libcall_suffix = '3';
6374 usadd_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6375 sub_optab->libcall_basename = "sub";
6376 sub_optab->libcall_suffix = '3';
6377 sub_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6378 subv_optab->libcall_basename = "sub";
6379 subv_optab->libcall_suffix = '3';
6380 subv_optab->libcall_gen = gen_intv_fp_libfunc;
6381 sssub_optab->libcall_basename = "sssub";
6382 sssub_optab->libcall_suffix = '3';
6383 sssub_optab->libcall_gen = gen_signed_fixed_libfunc;
6384 ussub_optab->libcall_basename = "ussub";
6385 ussub_optab->libcall_suffix = '3';
6386 ussub_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6387 smul_optab->libcall_basename = "mul";
6388 smul_optab->libcall_suffix = '3';
6389 smul_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6390 smulv_optab->libcall_basename = "mul";
6391 smulv_optab->libcall_suffix = '3';
6392 smulv_optab->libcall_gen = gen_intv_fp_libfunc;
6393 ssmul_optab->libcall_basename = "ssmul";
6394 ssmul_optab->libcall_suffix = '3';
6395 ssmul_optab->libcall_gen = gen_signed_fixed_libfunc;
6396 usmul_optab->libcall_basename = "usmul";
6397 usmul_optab->libcall_suffix = '3';
6398 usmul_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6399 sdiv_optab->libcall_basename = "div";
6400 sdiv_optab->libcall_suffix = '3';
6401 sdiv_optab->libcall_gen = gen_int_fp_signed_fixed_libfunc;
6402 sdivv_optab->libcall_basename = "divv";
6403 sdivv_optab->libcall_suffix = '3';
6404 sdivv_optab->libcall_gen = gen_int_libfunc;
6405 ssdiv_optab->libcall_basename = "ssdiv";
6406 ssdiv_optab->libcall_suffix = '3';
6407 ssdiv_optab->libcall_gen = gen_signed_fixed_libfunc;
6408 udiv_optab->libcall_basename = "udiv";
6409 udiv_optab->libcall_suffix = '3';
6410 udiv_optab->libcall_gen = gen_int_unsigned_fixed_libfunc;
6411 usdiv_optab->libcall_basename = "usdiv";
6412 usdiv_optab->libcall_suffix = '3';
6413 usdiv_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6414 sdivmod_optab->libcall_basename = "divmod";
6415 sdivmod_optab->libcall_suffix = '4';
6416 sdivmod_optab->libcall_gen = gen_int_libfunc;
6417 udivmod_optab->libcall_basename = "udivmod";
6418 udivmod_optab->libcall_suffix = '4';
6419 udivmod_optab->libcall_gen = gen_int_libfunc;
6420 smod_optab->libcall_basename = "mod";
6421 smod_optab->libcall_suffix = '3';
6422 smod_optab->libcall_gen = gen_int_libfunc;
6423 umod_optab->libcall_basename = "umod";
6424 umod_optab->libcall_suffix = '3';
6425 umod_optab->libcall_gen = gen_int_libfunc;
6426 ftrunc_optab->libcall_basename = "ftrunc";
6427 ftrunc_optab->libcall_suffix = '2';
6428 ftrunc_optab->libcall_gen = gen_fp_libfunc;
6429 and_optab->libcall_basename = "and";
6430 and_optab->libcall_suffix = '3';
6431 and_optab->libcall_gen = gen_int_libfunc;
6432 ior_optab->libcall_basename = "ior";
6433 ior_optab->libcall_suffix = '3';
6434 ior_optab->libcall_gen = gen_int_libfunc;
6435 xor_optab->libcall_basename = "xor";
6436 xor_optab->libcall_suffix = '3';
6437 xor_optab->libcall_gen = gen_int_libfunc;
6438 ashl_optab->libcall_basename = "ashl";
6439 ashl_optab->libcall_suffix = '3';
6440 ashl_optab->libcall_gen = gen_int_fixed_libfunc;
6441 ssashl_optab->libcall_basename = "ssashl";
6442 ssashl_optab->libcall_suffix = '3';
6443 ssashl_optab->libcall_gen = gen_signed_fixed_libfunc;
6444 usashl_optab->libcall_basename = "usashl";
6445 usashl_optab->libcall_suffix = '3';
6446 usashl_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6447 ashr_optab->libcall_basename = "ashr";
6448 ashr_optab->libcall_suffix = '3';
6449 ashr_optab->libcall_gen = gen_int_signed_fixed_libfunc;
6450 lshr_optab->libcall_basename = "lshr";
6451 lshr_optab->libcall_suffix = '3';
6452 lshr_optab->libcall_gen = gen_int_unsigned_fixed_libfunc;
6453 smin_optab->libcall_basename = "min";
6454 smin_optab->libcall_suffix = '3';
6455 smin_optab->libcall_gen = gen_int_fp_libfunc;
6456 smax_optab->libcall_basename = "max";
6457 smax_optab->libcall_suffix = '3';
6458 smax_optab->libcall_gen = gen_int_fp_libfunc;
6459 umin_optab->libcall_basename = "umin";
6460 umin_optab->libcall_suffix = '3';
6461 umin_optab->libcall_gen = gen_int_libfunc;
6462 umax_optab->libcall_basename = "umax";
6463 umax_optab->libcall_suffix = '3';
6464 umax_optab->libcall_gen = gen_int_libfunc;
6465 neg_optab->libcall_basename = "neg";
6466 neg_optab->libcall_suffix = '2';
6467 neg_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6468 ssneg_optab->libcall_basename = "ssneg";
6469 ssneg_optab->libcall_suffix = '2';
6470 ssneg_optab->libcall_gen = gen_signed_fixed_libfunc;
6471 usneg_optab->libcall_basename = "usneg";
6472 usneg_optab->libcall_suffix = '2';
6473 usneg_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6474 negv_optab->libcall_basename = "neg";
6475 negv_optab->libcall_suffix = '2';
6476 negv_optab->libcall_gen = gen_intv_fp_libfunc;
6477 one_cmpl_optab->libcall_basename = "one_cmpl";
6478 one_cmpl_optab->libcall_suffix = '2';
6479 one_cmpl_optab->libcall_gen = gen_int_libfunc;
6480 ffs_optab->libcall_basename = "ffs";
6481 ffs_optab->libcall_suffix = '2';
6482 ffs_optab->libcall_gen = gen_int_libfunc;
6483 clz_optab->libcall_basename = "clz";
6484 clz_optab->libcall_suffix = '2';
6485 clz_optab->libcall_gen = gen_int_libfunc;
6486 ctz_optab->libcall_basename = "ctz";
6487 ctz_optab->libcall_suffix = '2';
6488 ctz_optab->libcall_gen = gen_int_libfunc;
6489 popcount_optab->libcall_basename = "popcount";
6490 popcount_optab->libcall_suffix = '2';
6491 popcount_optab->libcall_gen = gen_int_libfunc;
6492 parity_optab->libcall_basename = "parity";
6493 parity_optab->libcall_suffix = '2';
6494 parity_optab->libcall_gen = gen_int_libfunc;
6496 /* Comparison libcalls for integers MUST come in pairs,
6498 cmp_optab->libcall_basename = "cmp";
6499 cmp_optab->libcall_suffix = '2';
6500 cmp_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6501 ucmp_optab->libcall_basename = "ucmp";
6502 ucmp_optab->libcall_suffix = '2';
6503 ucmp_optab->libcall_gen = gen_int_libfunc;
6505 /* EQ etc are floating point only. */
6506 eq_optab->libcall_basename = "eq";
6507 eq_optab->libcall_suffix = '2';
6508 eq_optab->libcall_gen = gen_fp_libfunc;
6509 ne_optab->libcall_basename = "ne";
6510 ne_optab->libcall_suffix = '2';
6511 ne_optab->libcall_gen = gen_fp_libfunc;
6512 gt_optab->libcall_basename = "gt";
6513 gt_optab->libcall_suffix = '2';
6514 gt_optab->libcall_gen = gen_fp_libfunc;
6515 ge_optab->libcall_basename = "ge";
6516 ge_optab->libcall_suffix = '2';
6517 ge_optab->libcall_gen = gen_fp_libfunc;
6518 lt_optab->libcall_basename = "lt";
6519 lt_optab->libcall_suffix = '2';
6520 lt_optab->libcall_gen = gen_fp_libfunc;
6521 le_optab->libcall_basename = "le";
6522 le_optab->libcall_suffix = '2';
6523 le_optab->libcall_gen = gen_fp_libfunc;
6524 unord_optab->libcall_basename = "unord";
6525 unord_optab->libcall_suffix = '2';
6526 unord_optab->libcall_gen = gen_fp_libfunc;
6528 powi_optab->libcall_basename = "powi";
6529 powi_optab->libcall_suffix = '2';
6530 powi_optab->libcall_gen = gen_fp_libfunc;
6533 sfloat_optab->libcall_basename = "float";
6534 sfloat_optab->libcall_gen = gen_int_to_fp_conv_libfunc;
6535 ufloat_optab->libcall_gen = gen_ufloat_conv_libfunc;
6536 sfix_optab->libcall_basename = "fix";
6537 sfix_optab->libcall_gen = gen_fp_to_int_conv_libfunc;
6538 ufix_optab->libcall_basename = "fixuns";
6539 ufix_optab->libcall_gen = gen_fp_to_int_conv_libfunc;
6540 lrint_optab->libcall_basename = "lrint";
6541 lrint_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6542 lround_optab->libcall_basename = "lround";
6543 lround_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6544 lfloor_optab->libcall_basename = "lfloor";
6545 lfloor_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6546 lceil_optab->libcall_basename = "lceil";
6547 lceil_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6549 /* trunc_optab is also used for FLOAT_EXTEND. */
6550 sext_optab->libcall_basename = "extend";
6551 sext_optab->libcall_gen = gen_extend_conv_libfunc;
6552 trunc_optab->libcall_basename = "trunc";
6553 trunc_optab->libcall_gen = gen_trunc_conv_libfunc;
6555 /* Conversions for fixed-point modes and other modes. */
6556 fract_optab->libcall_basename = "fract";
6557 fract_optab->libcall_gen = gen_fract_conv_libfunc;
6558 satfract_optab->libcall_basename = "satfract";
6559 satfract_optab->libcall_gen = gen_satfract_conv_libfunc;
6560 fractuns_optab->libcall_basename = "fractuns";
6561 fractuns_optab->libcall_gen = gen_fractuns_conv_libfunc;
6562 satfractuns_optab->libcall_basename = "satfractuns";
6563 satfractuns_optab->libcall_gen = gen_satfractuns_conv_libfunc;
6565 /* The ffs function operates on `int'. Fall back on it if we do not
6566 have a libgcc2 function for that width. */
6567 if (INT_TYPE_SIZE < BITS_PER_WORD)
6569 int_mode = mode_for_size (INT_TYPE_SIZE, MODE_INT, 0);
6570 set_optab_libfunc (ffs_optab, mode_for_size (INT_TYPE_SIZE, MODE_INT, 0),
6574 /* Explicitly initialize the bswap libfuncs since we need them to be
6575 valid for things other than word_mode. */
6576 set_optab_libfunc (bswap_optab, SImode, "__bswapsi2");
6577 set_optab_libfunc (bswap_optab, DImode, "__bswapdi2");
6579 /* Use cabs for double complex abs, since systems generally have cabs.
6580 Don't define any libcall for float complex, so that cabs will be used. */
6581 if (complex_double_type_node)
6582 set_optab_libfunc (abs_optab, TYPE_MODE (complex_double_type_node), "cabs");
6584 abort_libfunc = init_one_libfunc ("abort");
6585 memcpy_libfunc = init_one_libfunc ("memcpy");
6586 memmove_libfunc = init_one_libfunc ("memmove");
6587 memcmp_libfunc = init_one_libfunc ("memcmp");
6588 memset_libfunc = init_one_libfunc ("memset");
6589 setbits_libfunc = init_one_libfunc ("__setbits");
6591 #ifndef DONT_USE_BUILTIN_SETJMP
6592 setjmp_libfunc = init_one_libfunc ("__builtin_setjmp");
6593 longjmp_libfunc = init_one_libfunc ("__builtin_longjmp");
6595 setjmp_libfunc = init_one_libfunc ("setjmp");
6596 longjmp_libfunc = init_one_libfunc ("longjmp");
6598 unwind_sjlj_register_libfunc = init_one_libfunc ("_Unwind_SjLj_Register");
6599 unwind_sjlj_unregister_libfunc
6600 = init_one_libfunc ("_Unwind_SjLj_Unregister");
6602 /* For function entry/exit instrumentation. */
6603 profile_function_entry_libfunc
6604 = init_one_libfunc ("__cyg_profile_func_enter");
6605 profile_function_exit_libfunc
6606 = init_one_libfunc ("__cyg_profile_func_exit");
6608 gcov_flush_libfunc = init_one_libfunc ("__gcov_flush");
6610 if (HAVE_conditional_trap)
6611 trap_rtx = gen_rtx_fmt_ee (EQ, VOIDmode, NULL_RTX, NULL_RTX);
6613 /* Allow the target to add more libcalls or rename some, etc. */
6614 targetm.init_libfuncs ();
6619 /* Print information about the current contents of the optabs on
6623 debug_optab_libfuncs (void)
6629 /* Dump the arithmetic optabs. */
6630 for (i = 0; i != (int) OTI_MAX; i++)
6631 for (j = 0; j < NUM_MACHINE_MODES; ++j)
6636 o = &optab_table[i];
6637 l = optab_libfunc (o, j);
6640 gcc_assert (GET_CODE (l) == SYMBOL_REF);
6641 fprintf (stderr, "%s\t%s:\t%s\n",
6642 GET_RTX_NAME (o->code),
6648 /* Dump the conversion optabs. */
6649 for (i = 0; i < (int) COI_MAX; ++i)
6650 for (j = 0; j < NUM_MACHINE_MODES; ++j)
6651 for (k = 0; k < NUM_MACHINE_MODES; ++k)
6656 o = &convert_optab_table[i];
6657 l = convert_optab_libfunc (o, j, k);
6660 gcc_assert (GET_CODE (l) == SYMBOL_REF);
6661 fprintf (stderr, "%s\t%s\t%s:\t%s\n",
6662 GET_RTX_NAME (o->code),
6671 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
6672 CODE. Return 0 on failure. */
6675 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED, rtx op1,
6676 rtx op2 ATTRIBUTE_UNUSED, rtx tcode ATTRIBUTE_UNUSED)
6678 enum machine_mode mode = GET_MODE (op1);
6679 enum insn_code icode;
6682 if (!HAVE_conditional_trap)
6685 if (mode == VOIDmode)
6688 icode = optab_handler (cmp_optab, mode)->insn_code;
6689 if (icode == CODE_FOR_nothing)
6693 op1 = prepare_operand (icode, op1, 0, mode, mode, 0);
6694 op2 = prepare_operand (icode, op2, 1, mode, mode, 0);
6700 emit_insn (GEN_FCN (icode) (op1, op2));
6702 PUT_CODE (trap_rtx, code);
6703 gcc_assert (HAVE_conditional_trap);
6704 insn = gen_conditional_trap (trap_rtx, tcode);
6708 insn = get_insns ();
6715 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
6716 or unsigned operation code. */
6718 static enum rtx_code
6719 get_rtx_code (enum tree_code tcode, bool unsignedp)
6731 code = unsignedp ? LTU : LT;
6734 code = unsignedp ? LEU : LE;
6737 code = unsignedp ? GTU : GT;
6740 code = unsignedp ? GEU : GE;
6743 case UNORDERED_EXPR:
6774 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
6775 unsigned operators. Do not generate compare instruction. */
6778 vector_compare_rtx (tree cond, bool unsignedp, enum insn_code icode)
6780 enum rtx_code rcode;
6782 rtx rtx_op0, rtx_op1;
6784 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
6785 ensures that condition is a relational operation. */
6786 gcc_assert (COMPARISON_CLASS_P (cond));
6788 rcode = get_rtx_code (TREE_CODE (cond), unsignedp);
6789 t_op0 = TREE_OPERAND (cond, 0);
6790 t_op1 = TREE_OPERAND (cond, 1);
6792 /* Expand operands. */
6793 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)),
6795 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)),
6798 if (!insn_data[icode].operand[4].predicate (rtx_op0, GET_MODE (rtx_op0))
6799 && GET_MODE (rtx_op0) != VOIDmode)
6800 rtx_op0 = force_reg (GET_MODE (rtx_op0), rtx_op0);
6802 if (!insn_data[icode].operand[5].predicate (rtx_op1, GET_MODE (rtx_op1))
6803 && GET_MODE (rtx_op1) != VOIDmode)
6804 rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1);
6806 return gen_rtx_fmt_ee (rcode, VOIDmode, rtx_op0, rtx_op1);
6809 /* Return insn code for VEC_COND_EXPR EXPR. */
6811 static inline enum insn_code
6812 get_vcond_icode (tree expr, enum machine_mode mode)
6814 enum insn_code icode = CODE_FOR_nothing;
6816 if (TYPE_UNSIGNED (TREE_TYPE (expr)))
6817 icode = vcondu_gen_code[mode];
6819 icode = vcond_gen_code[mode];
6823 /* Return TRUE iff, appropriate vector insns are available
6824 for vector cond expr expr in VMODE mode. */
6827 expand_vec_cond_expr_p (tree expr, enum machine_mode vmode)
6829 if (get_vcond_icode (expr, vmode) == CODE_FOR_nothing)
6834 /* Generate insns for VEC_COND_EXPR. */
6837 expand_vec_cond_expr (tree vec_cond_expr, rtx target)
6839 enum insn_code icode;
6840 rtx comparison, rtx_op1, rtx_op2, cc_op0, cc_op1;
6841 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_cond_expr));
6842 bool unsignedp = TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr));
6844 icode = get_vcond_icode (vec_cond_expr, mode);
6845 if (icode == CODE_FOR_nothing)
6848 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6849 target = gen_reg_rtx (mode);
6851 /* Get comparison rtx. First expand both cond expr operands. */
6852 comparison = vector_compare_rtx (TREE_OPERAND (vec_cond_expr, 0),
6854 cc_op0 = XEXP (comparison, 0);
6855 cc_op1 = XEXP (comparison, 1);
6856 /* Expand both operands and force them in reg, if required. */
6857 rtx_op1 = expand_normal (TREE_OPERAND (vec_cond_expr, 1));
6858 if (!insn_data[icode].operand[1].predicate (rtx_op1, mode)
6859 && mode != VOIDmode)
6860 rtx_op1 = force_reg (mode, rtx_op1);
6862 rtx_op2 = expand_normal (TREE_OPERAND (vec_cond_expr, 2));
6863 if (!insn_data[icode].operand[2].predicate (rtx_op2, mode)
6864 && mode != VOIDmode)
6865 rtx_op2 = force_reg (mode, rtx_op2);
6867 /* Emit instruction! */
6868 emit_insn (GEN_FCN (icode) (target, rtx_op1, rtx_op2,
6869 comparison, cc_op0, cc_op1));
6875 /* This is an internal subroutine of the other compare_and_swap expanders.
6876 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
6877 operation. TARGET is an optional place to store the value result of
6878 the operation. ICODE is the particular instruction to expand. Return
6879 the result of the operation. */
6882 expand_val_compare_and_swap_1 (rtx mem, rtx old_val, rtx new_val,
6883 rtx target, enum insn_code icode)
6885 enum machine_mode mode = GET_MODE (mem);
6888 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6889 target = gen_reg_rtx (mode);
6891 if (GET_MODE (old_val) != VOIDmode && GET_MODE (old_val) != mode)
6892 old_val = convert_modes (mode, GET_MODE (old_val), old_val, 1);
6893 if (!insn_data[icode].operand[2].predicate (old_val, mode))
6894 old_val = force_reg (mode, old_val);
6896 if (GET_MODE (new_val) != VOIDmode && GET_MODE (new_val) != mode)
6897 new_val = convert_modes (mode, GET_MODE (new_val), new_val, 1);
6898 if (!insn_data[icode].operand[3].predicate (new_val, mode))
6899 new_val = force_reg (mode, new_val);
6901 insn = GEN_FCN (icode) (target, mem, old_val, new_val);
6902 if (insn == NULL_RTX)
6909 /* Expand a compare-and-swap operation and return its value. */
6912 expand_val_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
6914 enum machine_mode mode = GET_MODE (mem);
6915 enum insn_code icode = sync_compare_and_swap[mode];
6917 if (icode == CODE_FOR_nothing)
6920 return expand_val_compare_and_swap_1 (mem, old_val, new_val, target, icode);
6923 /* Expand a compare-and-swap operation and store true into the result if
6924 the operation was successful and false otherwise. Return the result.
6925 Unlike other routines, TARGET is not optional. */
6928 expand_bool_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
6930 enum machine_mode mode = GET_MODE (mem);
6931 enum insn_code icode;
6932 rtx subtarget, label0, label1;
6934 /* If the target supports a compare-and-swap pattern that simultaneously
6935 sets some flag for success, then use it. Otherwise use the regular
6936 compare-and-swap and follow that immediately with a compare insn. */
6937 icode = sync_compare_and_swap_cc[mode];
6941 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
6943 if (subtarget != NULL_RTX)
6947 case CODE_FOR_nothing:
6948 icode = sync_compare_and_swap[mode];
6949 if (icode == CODE_FOR_nothing)
6952 /* Ensure that if old_val == mem, that we're not comparing
6953 against an old value. */
6954 if (MEM_P (old_val))
6955 old_val = force_reg (mode, old_val);
6957 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
6959 if (subtarget == NULL_RTX)
6962 emit_cmp_insn (subtarget, old_val, EQ, const0_rtx, mode, true);
6965 /* If the target has a sane STORE_FLAG_VALUE, then go ahead and use a
6966 setcc instruction from the beginning. We don't work too hard here,
6967 but it's nice to not be stupid about initial code gen either. */
6968 if (STORE_FLAG_VALUE == 1)
6970 icode = setcc_gen_code[EQ];
6971 if (icode != CODE_FOR_nothing)
6973 enum machine_mode cmode = insn_data[icode].operand[0].mode;
6977 if (!insn_data[icode].operand[0].predicate (target, cmode))
6978 subtarget = gen_reg_rtx (cmode);
6980 insn = GEN_FCN (icode) (subtarget);
6984 if (GET_MODE (target) != GET_MODE (subtarget))
6986 convert_move (target, subtarget, 1);
6994 /* Without an appropriate setcc instruction, use a set of branches to
6995 get 1 and 0 stored into target. Presumably if the target has a
6996 STORE_FLAG_VALUE that isn't 1, then this will get cleaned up by ifcvt. */
6998 label0 = gen_label_rtx ();
6999 label1 = gen_label_rtx ();
7001 emit_jump_insn (bcc_gen_fctn[EQ] (label0));
7002 emit_move_insn (target, const0_rtx);
7003 emit_jump_insn (gen_jump (label1));
7005 emit_label (label0);
7006 emit_move_insn (target, const1_rtx);
7007 emit_label (label1);
7012 /* This is a helper function for the other atomic operations. This function
7013 emits a loop that contains SEQ that iterates until a compare-and-swap
7014 operation at the end succeeds. MEM is the memory to be modified. SEQ is
7015 a set of instructions that takes a value from OLD_REG as an input and
7016 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
7017 set to the current contents of MEM. After SEQ, a compare-and-swap will
7018 attempt to update MEM with NEW_REG. The function returns true when the
7019 loop was generated successfully. */
7022 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
7024 enum machine_mode mode = GET_MODE (mem);
7025 enum insn_code icode;
7026 rtx label, cmp_reg, subtarget;
7028 /* The loop we want to generate looks like
7034 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
7035 if (cmp_reg != old_reg)
7038 Note that we only do the plain load from memory once. Subsequent
7039 iterations use the value loaded by the compare-and-swap pattern. */
7041 label = gen_label_rtx ();
7042 cmp_reg = gen_reg_rtx (mode);
7044 emit_move_insn (cmp_reg, mem);
7046 emit_move_insn (old_reg, cmp_reg);
7050 /* If the target supports a compare-and-swap pattern that simultaneously
7051 sets some flag for success, then use it. Otherwise use the regular
7052 compare-and-swap and follow that immediately with a compare insn. */
7053 icode = sync_compare_and_swap_cc[mode];
7057 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
7059 if (subtarget != NULL_RTX)
7061 gcc_assert (subtarget == cmp_reg);
7066 case CODE_FOR_nothing:
7067 icode = sync_compare_and_swap[mode];
7068 if (icode == CODE_FOR_nothing)
7071 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
7073 if (subtarget == NULL_RTX)
7075 if (subtarget != cmp_reg)
7076 emit_move_insn (cmp_reg, subtarget);
7078 emit_cmp_insn (cmp_reg, old_reg, EQ, const0_rtx, mode, true);
7081 /* ??? Mark this jump predicted not taken? */
7082 emit_jump_insn (bcc_gen_fctn[NE] (label));
7087 /* This function generates the atomic operation MEM CODE= VAL. In this
7088 case, we do not care about any resulting value. Returns NULL if we
7089 cannot generate the operation. */
7092 expand_sync_operation (rtx mem, rtx val, enum rtx_code code)
7094 enum machine_mode mode = GET_MODE (mem);
7095 enum insn_code icode;
7098 /* Look to see if the target supports the operation directly. */
7102 icode = sync_add_optab[mode];
7105 icode = sync_ior_optab[mode];
7108 icode = sync_xor_optab[mode];
7111 icode = sync_and_optab[mode];
7114 icode = sync_nand_optab[mode];
7118 icode = sync_sub_optab[mode];
7119 if (icode == CODE_FOR_nothing || CONST_INT_P (val))
7121 icode = sync_add_optab[mode];
7122 if (icode != CODE_FOR_nothing)
7124 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
7134 /* Generate the direct operation, if present. */
7135 if (icode != CODE_FOR_nothing)
7137 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
7138 val = convert_modes (mode, GET_MODE (val), val, 1);
7139 if (!insn_data[icode].operand[1].predicate (val, mode))
7140 val = force_reg (mode, val);
7142 insn = GEN_FCN (icode) (mem, val);
7150 /* Failing that, generate a compare-and-swap loop in which we perform the
7151 operation with normal arithmetic instructions. */
7152 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
7154 rtx t0 = gen_reg_rtx (mode), t1;
7161 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
7164 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
7165 true, OPTAB_LIB_WIDEN);
7167 insn = get_insns ();
7170 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
7177 /* This function generates the atomic operation MEM CODE= VAL. In this
7178 case, we do care about the resulting value: if AFTER is true then
7179 return the value MEM holds after the operation, if AFTER is false
7180 then return the value MEM holds before the operation. TARGET is an
7181 optional place for the result value to be stored. */
7184 expand_sync_fetch_operation (rtx mem, rtx val, enum rtx_code code,
7185 bool after, rtx target)
7187 enum machine_mode mode = GET_MODE (mem);
7188 enum insn_code old_code, new_code, icode;
7192 /* Look to see if the target supports the operation directly. */
7196 old_code = sync_old_add_optab[mode];
7197 new_code = sync_new_add_optab[mode];
7200 old_code = sync_old_ior_optab[mode];
7201 new_code = sync_new_ior_optab[mode];
7204 old_code = sync_old_xor_optab[mode];
7205 new_code = sync_new_xor_optab[mode];
7208 old_code = sync_old_and_optab[mode];
7209 new_code = sync_new_and_optab[mode];
7212 old_code = sync_old_nand_optab[mode];
7213 new_code = sync_new_nand_optab[mode];
7217 old_code = sync_old_sub_optab[mode];
7218 new_code = sync_new_sub_optab[mode];
7219 if ((old_code == CODE_FOR_nothing && new_code == CODE_FOR_nothing)
7220 || CONST_INT_P (val))
7222 old_code = sync_old_add_optab[mode];
7223 new_code = sync_new_add_optab[mode];
7224 if (old_code != CODE_FOR_nothing || new_code != CODE_FOR_nothing)
7226 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
7236 /* If the target does supports the proper new/old operation, great. But
7237 if we only support the opposite old/new operation, check to see if we
7238 can compensate. In the case in which the old value is supported, then
7239 we can always perform the operation again with normal arithmetic. In
7240 the case in which the new value is supported, then we can only handle
7241 this in the case the operation is reversible. */
7246 if (icode == CODE_FOR_nothing)
7249 if (icode != CODE_FOR_nothing)
7256 if (icode == CODE_FOR_nothing
7257 && (code == PLUS || code == MINUS || code == XOR))
7260 if (icode != CODE_FOR_nothing)
7265 /* If we found something supported, great. */
7266 if (icode != CODE_FOR_nothing)
7268 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
7269 target = gen_reg_rtx (mode);
7271 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
7272 val = convert_modes (mode, GET_MODE (val), val, 1);
7273 if (!insn_data[icode].operand[2].predicate (val, mode))
7274 val = force_reg (mode, val);
7276 insn = GEN_FCN (icode) (target, mem, val);
7281 /* If we need to compensate for using an operation with the
7282 wrong return value, do so now. */
7289 else if (code == MINUS)
7294 target = expand_simple_unop (mode, NOT, target, NULL_RTX, true);
7295 target = expand_simple_binop (mode, code, target, val, NULL_RTX,
7296 true, OPTAB_LIB_WIDEN);
7303 /* Failing that, generate a compare-and-swap loop in which we perform the
7304 operation with normal arithmetic instructions. */
7305 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
7307 rtx t0 = gen_reg_rtx (mode), t1;
7309 if (!target || !register_operand (target, mode))
7310 target = gen_reg_rtx (mode);
7315 emit_move_insn (target, t0);
7319 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
7322 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
7323 true, OPTAB_LIB_WIDEN);
7325 emit_move_insn (target, t1);
7327 insn = get_insns ();
7330 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
7337 /* This function expands a test-and-set operation. Ideally we atomically
7338 store VAL in MEM and return the previous value in MEM. Some targets
7339 may not support this operation and only support VAL with the constant 1;
7340 in this case while the return value will be 0/1, but the exact value
7341 stored in MEM is target defined. TARGET is an option place to stick
7342 the return value. */
7345 expand_sync_lock_test_and_set (rtx mem, rtx val, rtx target)
7347 enum machine_mode mode = GET_MODE (mem);
7348 enum insn_code icode;
7351 /* If the target supports the test-and-set directly, great. */
7352 icode = sync_lock_test_and_set[mode];
7353 if (icode != CODE_FOR_nothing)
7355 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
7356 target = gen_reg_rtx (mode);
7358 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
7359 val = convert_modes (mode, GET_MODE (val), val, 1);
7360 if (!insn_data[icode].operand[2].predicate (val, mode))
7361 val = force_reg (mode, val);
7363 insn = GEN_FCN (icode) (target, mem, val);
7371 /* Otherwise, use a compare-and-swap loop for the exchange. */
7372 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
7374 if (!target || !register_operand (target, mode))
7375 target = gen_reg_rtx (mode);
7376 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
7377 val = convert_modes (mode, GET_MODE (val), val, 1);
7378 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
7385 #include "gt-optabs.h"