1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
45 #include "basic-block.h"
48 /* Each optab contains info on how this target machine
49 can perform a particular operation
50 for all sizes and kinds of operands.
52 The operation to be performed is often specified
53 by passing one of these optabs as an argument.
55 See expr.h for documentation of these optabs. */
57 #if GCC_VERSION >= 4000
58 __extension__ struct optab optab_table[OTI_MAX]
59 = { [0 ... OTI_MAX - 1].handlers[0 ... NUM_MACHINE_MODES - 1].insn_code
62 /* init_insn_codes will do runtime initialization otherwise. */
63 struct optab optab_table[OTI_MAX];
66 rtx libfunc_table[LTI_MAX];
68 /* Tables of patterns for converting one mode to another. */
69 #if GCC_VERSION >= 4000
70 __extension__ struct convert_optab convert_optab_table[COI_MAX]
71 = { [0 ... COI_MAX - 1].handlers[0 ... NUM_MACHINE_MODES - 1]
72 [0 ... NUM_MACHINE_MODES - 1].insn_code
75 /* init_convert_optab will do runtime initialization otherwise. */
76 struct convert_optab convert_optab_table[COI_MAX];
79 /* Contains the optab used for each rtx code. */
80 optab code_to_optab[NUM_RTX_CODE + 1];
82 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
83 gives the gen_function to make a branch to test that condition. */
85 rtxfun bcc_gen_fctn[NUM_RTX_CODE];
87 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
88 gives the insn code to make a store-condition insn
89 to test that condition. */
91 enum insn_code setcc_gen_code[NUM_RTX_CODE];
93 #ifdef HAVE_conditional_move
94 /* Indexed by the machine mode, gives the insn code to make a conditional
95 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
96 setcc_gen_code to cut down on the number of named patterns. Consider a day
97 when a lot more rtx codes are conditional (eg: for the ARM). */
99 enum insn_code movcc_gen_code[NUM_MACHINE_MODES];
102 /* Indexed by the machine mode, gives the insn code for vector conditional
105 enum insn_code vcond_gen_code[NUM_MACHINE_MODES];
106 enum insn_code vcondu_gen_code[NUM_MACHINE_MODES];
108 /* The insn generating function can not take an rtx_code argument.
109 TRAP_RTX is used as an rtx argument. Its code is replaced with
110 the code to be used in the trap insn and all other fields are ignored. */
111 static GTY(()) rtx trap_rtx;
113 static void prepare_float_lib_cmp (rtx *, rtx *, enum rtx_code *,
114 enum machine_mode *, int *);
115 static rtx expand_unop_direct (enum machine_mode, optab, rtx, rtx, int);
117 /* Debug facility for use in GDB. */
118 void debug_optab_libfuncs (void);
120 #ifndef HAVE_conditional_trap
121 #define HAVE_conditional_trap 0
122 #define gen_conditional_trap(a,b) (gcc_unreachable (), NULL_RTX)
125 /* Prefixes for the current version of decimal floating point (BID vs. DPD) */
126 #if ENABLE_DECIMAL_BID_FORMAT
127 #define DECIMAL_PREFIX "bid_"
129 #define DECIMAL_PREFIX "dpd_"
133 /* Info about libfunc. We use same hashtable for normal optabs and conversion
134 optab. In the first case mode2 is unused. */
135 struct libfunc_entry GTY(())
138 enum machine_mode mode1, mode2;
142 /* Hash table used to convert declarations into nodes. */
143 static GTY((param_is (struct libfunc_entry))) htab_t libfunc_hash;
145 /* Used for attribute_hash. */
148 hash_libfunc (const void *p)
150 const struct libfunc_entry *const e = (const struct libfunc_entry *) p;
152 return (((int) e->mode1 + (int) e->mode2 * NUM_MACHINE_MODES)
156 /* Used for optab_hash. */
159 eq_libfunc (const void *p, const void *q)
161 const struct libfunc_entry *const e1 = (const struct libfunc_entry *) p;
162 const struct libfunc_entry *const e2 = (const struct libfunc_entry *) q;
164 return (e1->optab == e2->optab
165 && e1->mode1 == e2->mode1
166 && e1->mode2 == e2->mode2);
169 /* Return libfunc corresponding operation defined by OPTAB converting
170 from MODE2 to MODE1. Trigger lazy initialization if needed, return NULL
171 if no libfunc is available. */
173 convert_optab_libfunc (convert_optab optab, enum machine_mode mode1,
174 enum machine_mode mode2)
176 struct libfunc_entry e;
177 struct libfunc_entry **slot;
179 e.optab = (size_t) (optab - &convert_optab_table[0]);
182 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, NO_INSERT);
185 if (optab->libcall_gen)
187 optab->libcall_gen (optab, optab->libcall_basename, mode1, mode2);
188 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, NO_INSERT);
190 return (*slot)->libfunc;
196 return (*slot)->libfunc;
199 /* Return libfunc corresponding operation defined by OPTAB in MODE.
200 Trigger lazy initialization if needed, return NULL if no libfunc is
203 optab_libfunc (optab optab, enum machine_mode mode)
205 struct libfunc_entry e;
206 struct libfunc_entry **slot;
208 e.optab = (size_t) (optab - &optab_table[0]);
211 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, NO_INSERT);
214 if (optab->libcall_gen)
216 optab->libcall_gen (optab, optab->libcall_basename,
217 optab->libcall_suffix, mode);
218 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash,
221 return (*slot)->libfunc;
227 return (*slot)->libfunc;
231 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
232 the result of operation CODE applied to OP0 (and OP1 if it is a binary
235 If the last insn does not set TARGET, don't do anything, but return 1.
237 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
238 don't add the REG_EQUAL note but return 0. Our caller can then try
239 again, ensuring that TARGET is not one of the operands. */
242 add_equal_note (rtx insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
244 rtx last_insn, insn, set;
247 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
249 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
250 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
251 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
252 && GET_RTX_CLASS (code) != RTX_COMPARE
253 && GET_RTX_CLASS (code) != RTX_UNARY)
256 if (GET_CODE (target) == ZERO_EXTRACT)
259 for (last_insn = insns;
260 NEXT_INSN (last_insn) != NULL_RTX;
261 last_insn = NEXT_INSN (last_insn))
264 set = single_set (last_insn);
268 if (! rtx_equal_p (SET_DEST (set), target)
269 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
270 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
271 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
274 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
275 besides the last insn. */
276 if (reg_overlap_mentioned_p (target, op0)
277 || (op1 && reg_overlap_mentioned_p (target, op1)))
279 insn = PREV_INSN (last_insn);
280 while (insn != NULL_RTX)
282 if (reg_set_p (target, insn))
285 insn = PREV_INSN (insn);
289 if (GET_RTX_CLASS (code) == RTX_UNARY)
290 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
292 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
294 set_unique_reg_note (last_insn, REG_EQUAL, note);
299 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
300 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
301 not actually do a sign-extend or zero-extend, but can leave the
302 higher-order bits of the result rtx undefined, for example, in the case
303 of logical operations, but not right shifts. */
306 widen_operand (rtx op, enum machine_mode mode, enum machine_mode oldmode,
307 int unsignedp, int no_extend)
311 /* If we don't have to extend and this is a constant, return it. */
312 if (no_extend && GET_MODE (op) == VOIDmode)
315 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
316 extend since it will be more efficient to do so unless the signedness of
317 a promoted object differs from our extension. */
319 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
320 && SUBREG_PROMOTED_UNSIGNED_P (op) == unsignedp))
321 return convert_modes (mode, oldmode, op, unsignedp);
323 /* If MODE is no wider than a single word, we return a paradoxical
325 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
326 return gen_rtx_SUBREG (mode, force_reg (GET_MODE (op), op), 0);
328 /* Otherwise, get an object of MODE, clobber it, and set the low-order
331 result = gen_reg_rtx (mode);
332 emit_insn (gen_rtx_CLOBBER (VOIDmode, result));
333 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
337 /* Return the optab used for computing the operation given by the tree code,
338 CODE and the tree EXP. This function is not always usable (for example, it
339 cannot give complete results for multiplication or division) but probably
340 ought to be relied on more widely throughout the expander. */
342 optab_for_tree_code (enum tree_code code, const_tree type,
343 enum optab_subtype subtype)
355 return one_cmpl_optab;
364 return TYPE_UNSIGNED (type) ? umod_optab : smod_optab;
372 if (TYPE_SATURATING(type))
373 return TYPE_UNSIGNED(type) ? usdiv_optab : ssdiv_optab;
374 return TYPE_UNSIGNED (type) ? udiv_optab : sdiv_optab;
377 if (VECTOR_MODE_P (TYPE_MODE (type)))
379 if (subtype == optab_vector)
380 return TYPE_SATURATING (type) ? NULL : vashl_optab;
382 gcc_assert (subtype == optab_scalar);
384 if (TYPE_SATURATING(type))
385 return TYPE_UNSIGNED(type) ? usashl_optab : ssashl_optab;
389 if (VECTOR_MODE_P (TYPE_MODE (type)))
391 if (subtype == optab_vector)
392 return TYPE_UNSIGNED (type) ? vlshr_optab : vashr_optab;
394 gcc_assert (subtype == optab_scalar);
396 return TYPE_UNSIGNED (type) ? lshr_optab : ashr_optab;
399 if (VECTOR_MODE_P (TYPE_MODE (type)))
401 if (subtype == optab_vector)
404 gcc_assert (subtype == optab_scalar);
409 if (VECTOR_MODE_P (TYPE_MODE (type)))
411 if (subtype == optab_vector)
414 gcc_assert (subtype == optab_scalar);
419 return TYPE_UNSIGNED (type) ? umax_optab : smax_optab;
422 return TYPE_UNSIGNED (type) ? umin_optab : smin_optab;
424 case REALIGN_LOAD_EXPR:
425 return vec_realign_load_optab;
428 return TYPE_UNSIGNED (type) ? usum_widen_optab : ssum_widen_optab;
431 return TYPE_UNSIGNED (type) ? udot_prod_optab : sdot_prod_optab;
434 return TYPE_UNSIGNED (type) ? reduc_umax_optab : reduc_smax_optab;
437 return TYPE_UNSIGNED (type) ? reduc_umin_optab : reduc_smin_optab;
439 case REDUC_PLUS_EXPR:
440 return TYPE_UNSIGNED (type) ? reduc_uplus_optab : reduc_splus_optab;
442 case VEC_LSHIFT_EXPR:
443 return vec_shl_optab;
445 case VEC_RSHIFT_EXPR:
446 return vec_shr_optab;
448 case VEC_WIDEN_MULT_HI_EXPR:
449 return TYPE_UNSIGNED (type) ?
450 vec_widen_umult_hi_optab : vec_widen_smult_hi_optab;
452 case VEC_WIDEN_MULT_LO_EXPR:
453 return TYPE_UNSIGNED (type) ?
454 vec_widen_umult_lo_optab : vec_widen_smult_lo_optab;
456 case VEC_UNPACK_HI_EXPR:
457 return TYPE_UNSIGNED (type) ?
458 vec_unpacku_hi_optab : vec_unpacks_hi_optab;
460 case VEC_UNPACK_LO_EXPR:
461 return TYPE_UNSIGNED (type) ?
462 vec_unpacku_lo_optab : vec_unpacks_lo_optab;
464 case VEC_UNPACK_FLOAT_HI_EXPR:
465 /* The signedness is determined from input operand. */
466 return TYPE_UNSIGNED (type) ?
467 vec_unpacku_float_hi_optab : vec_unpacks_float_hi_optab;
469 case VEC_UNPACK_FLOAT_LO_EXPR:
470 /* The signedness is determined from input operand. */
471 return TYPE_UNSIGNED (type) ?
472 vec_unpacku_float_lo_optab : vec_unpacks_float_lo_optab;
474 case VEC_PACK_TRUNC_EXPR:
475 return vec_pack_trunc_optab;
477 case VEC_PACK_SAT_EXPR:
478 return TYPE_UNSIGNED (type) ? vec_pack_usat_optab : vec_pack_ssat_optab;
480 case VEC_PACK_FIX_TRUNC_EXPR:
481 /* The signedness is determined from output operand. */
482 return TYPE_UNSIGNED (type) ?
483 vec_pack_ufix_trunc_optab : vec_pack_sfix_trunc_optab;
489 trapv = INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type);
492 case POINTER_PLUS_EXPR:
494 if (TYPE_SATURATING(type))
495 return TYPE_UNSIGNED(type) ? usadd_optab : ssadd_optab;
496 return trapv ? addv_optab : add_optab;
499 if (TYPE_SATURATING(type))
500 return TYPE_UNSIGNED(type) ? ussub_optab : sssub_optab;
501 return trapv ? subv_optab : sub_optab;
504 if (TYPE_SATURATING(type))
505 return TYPE_UNSIGNED(type) ? usmul_optab : ssmul_optab;
506 return trapv ? smulv_optab : smul_optab;
509 if (TYPE_SATURATING(type))
510 return TYPE_UNSIGNED(type) ? usneg_optab : ssneg_optab;
511 return trapv ? negv_optab : neg_optab;
514 return trapv ? absv_optab : abs_optab;
516 case VEC_EXTRACT_EVEN_EXPR:
517 return vec_extract_even_optab;
519 case VEC_EXTRACT_ODD_EXPR:
520 return vec_extract_odd_optab;
522 case VEC_INTERLEAVE_HIGH_EXPR:
523 return vec_interleave_high_optab;
525 case VEC_INTERLEAVE_LOW_EXPR:
526 return vec_interleave_low_optab;
534 /* Expand vector widening operations.
536 There are two different classes of operations handled here:
537 1) Operations whose result is wider than all the arguments to the operation.
538 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
539 In this case OP0 and optionally OP1 would be initialized,
540 but WIDE_OP wouldn't (not relevant for this case).
541 2) Operations whose result is of the same size as the last argument to the
542 operation, but wider than all the other arguments to the operation.
543 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
544 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
546 E.g, when called to expand the following operations, this is how
547 the arguments will be initialized:
549 widening-sum 2 oprnd0 - oprnd1
550 widening-dot-product 3 oprnd0 oprnd1 oprnd2
551 widening-mult 2 oprnd0 oprnd1 -
552 type-promotion (vec-unpack) 1 oprnd0 - - */
555 expand_widen_pattern_expr (tree exp, rtx op0, rtx op1, rtx wide_op, rtx target,
558 tree oprnd0, oprnd1, oprnd2;
559 enum machine_mode wmode = 0, tmode0, tmode1 = 0;
560 optab widen_pattern_optab;
562 enum machine_mode xmode0, xmode1 = 0, wxmode = 0;
565 rtx xop0, xop1, wxop;
566 int nops = TREE_OPERAND_LENGTH (exp);
568 oprnd0 = TREE_OPERAND (exp, 0);
569 tmode0 = TYPE_MODE (TREE_TYPE (oprnd0));
570 widen_pattern_optab =
571 optab_for_tree_code (TREE_CODE (exp), TREE_TYPE (oprnd0), optab_default);
572 icode = (int) optab_handler (widen_pattern_optab, tmode0)->insn_code;
573 gcc_assert (icode != CODE_FOR_nothing);
574 xmode0 = insn_data[icode].operand[1].mode;
578 oprnd1 = TREE_OPERAND (exp, 1);
579 tmode1 = TYPE_MODE (TREE_TYPE (oprnd1));
580 xmode1 = insn_data[icode].operand[2].mode;
583 /* The last operand is of a wider mode than the rest of the operands. */
591 gcc_assert (tmode1 == tmode0);
593 oprnd2 = TREE_OPERAND (exp, 2);
594 wmode = TYPE_MODE (TREE_TYPE (oprnd2));
595 wxmode = insn_data[icode].operand[3].mode;
599 wmode = wxmode = insn_data[icode].operand[0].mode;
602 || ! (*insn_data[icode].operand[0].predicate) (target, wmode))
603 temp = gen_reg_rtx (wmode);
611 /* In case the insn wants input operands in modes different from
612 those of the actual operands, convert the operands. It would
613 seem that we don't need to convert CONST_INTs, but we do, so
614 that they're properly zero-extended, sign-extended or truncated
617 if (GET_MODE (op0) != xmode0 && xmode0 != VOIDmode)
618 xop0 = convert_modes (xmode0,
619 GET_MODE (op0) != VOIDmode
625 if (GET_MODE (op1) != xmode1 && xmode1 != VOIDmode)
626 xop1 = convert_modes (xmode1,
627 GET_MODE (op1) != VOIDmode
633 if (GET_MODE (wide_op) != wxmode && wxmode != VOIDmode)
634 wxop = convert_modes (wxmode,
635 GET_MODE (wide_op) != VOIDmode
640 /* Now, if insn's predicates don't allow our operands, put them into
643 if (! (*insn_data[icode].operand[1].predicate) (xop0, xmode0)
644 && xmode0 != VOIDmode)
645 xop0 = copy_to_mode_reg (xmode0, xop0);
649 if (! (*insn_data[icode].operand[2].predicate) (xop1, xmode1)
650 && xmode1 != VOIDmode)
651 xop1 = copy_to_mode_reg (xmode1, xop1);
655 if (! (*insn_data[icode].operand[3].predicate) (wxop, wxmode)
656 && wxmode != VOIDmode)
657 wxop = copy_to_mode_reg (wxmode, wxop);
659 pat = GEN_FCN (icode) (temp, xop0, xop1, wxop);
662 pat = GEN_FCN (icode) (temp, xop0, xop1);
668 if (! (*insn_data[icode].operand[2].predicate) (wxop, wxmode)
669 && wxmode != VOIDmode)
670 wxop = copy_to_mode_reg (wxmode, wxop);
672 pat = GEN_FCN (icode) (temp, xop0, wxop);
675 pat = GEN_FCN (icode) (temp, xop0);
682 /* Generate code to perform an operation specified by TERNARY_OPTAB
683 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
685 UNSIGNEDP is for the case where we have to widen the operands
686 to perform the operation. It says to use zero-extension.
688 If TARGET is nonzero, the value
689 is generated there, if it is convenient to do so.
690 In all cases an rtx is returned for the locus of the value;
691 this may or may not be TARGET. */
694 expand_ternary_op (enum machine_mode mode, optab ternary_optab, rtx op0,
695 rtx op1, rtx op2, rtx target, int unsignedp)
697 int icode = (int) optab_handler (ternary_optab, mode)->insn_code;
698 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
699 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
700 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
703 rtx xop0 = op0, xop1 = op1, xop2 = op2;
705 gcc_assert (optab_handler (ternary_optab, mode)->insn_code
706 != CODE_FOR_nothing);
708 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
709 temp = gen_reg_rtx (mode);
713 /* In case the insn wants input operands in modes different from
714 those of the actual operands, convert the operands. It would
715 seem that we don't need to convert CONST_INTs, but we do, so
716 that they're properly zero-extended, sign-extended or truncated
719 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
720 xop0 = convert_modes (mode0,
721 GET_MODE (op0) != VOIDmode
726 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
727 xop1 = convert_modes (mode1,
728 GET_MODE (op1) != VOIDmode
733 if (GET_MODE (op2) != mode2 && mode2 != VOIDmode)
734 xop2 = convert_modes (mode2,
735 GET_MODE (op2) != VOIDmode
740 /* Now, if insn's predicates don't allow our operands, put them into
743 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
744 && mode0 != VOIDmode)
745 xop0 = copy_to_mode_reg (mode0, xop0);
747 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
748 && mode1 != VOIDmode)
749 xop1 = copy_to_mode_reg (mode1, xop1);
751 if (!insn_data[icode].operand[3].predicate (xop2, mode2)
752 && mode2 != VOIDmode)
753 xop2 = copy_to_mode_reg (mode2, xop2);
755 pat = GEN_FCN (icode) (temp, xop0, xop1, xop2);
762 /* Like expand_binop, but return a constant rtx if the result can be
763 calculated at compile time. The arguments and return value are
764 otherwise the same as for expand_binop. */
767 simplify_expand_binop (enum machine_mode mode, optab binoptab,
768 rtx op0, rtx op1, rtx target, int unsignedp,
769 enum optab_methods methods)
771 if (CONSTANT_P (op0) && CONSTANT_P (op1))
773 rtx x = simplify_binary_operation (binoptab->code, mode, op0, op1);
779 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
782 /* Like simplify_expand_binop, but always put the result in TARGET.
783 Return true if the expansion succeeded. */
786 force_expand_binop (enum machine_mode mode, optab binoptab,
787 rtx op0, rtx op1, rtx target, int unsignedp,
788 enum optab_methods methods)
790 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
791 target, unsignedp, methods);
795 emit_move_insn (target, x);
799 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
802 expand_vec_shift_expr (tree vec_shift_expr, rtx target)
804 enum insn_code icode;
805 rtx rtx_op1, rtx_op2;
806 enum machine_mode mode1;
807 enum machine_mode mode2;
808 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_shift_expr));
809 tree vec_oprnd = TREE_OPERAND (vec_shift_expr, 0);
810 tree shift_oprnd = TREE_OPERAND (vec_shift_expr, 1);
814 switch (TREE_CODE (vec_shift_expr))
816 case VEC_RSHIFT_EXPR:
817 shift_optab = vec_shr_optab;
819 case VEC_LSHIFT_EXPR:
820 shift_optab = vec_shl_optab;
826 icode = (int) optab_handler (shift_optab, mode)->insn_code;
827 gcc_assert (icode != CODE_FOR_nothing);
829 mode1 = insn_data[icode].operand[1].mode;
830 mode2 = insn_data[icode].operand[2].mode;
832 rtx_op1 = expand_normal (vec_oprnd);
833 if (!(*insn_data[icode].operand[1].predicate) (rtx_op1, mode1)
834 && mode1 != VOIDmode)
835 rtx_op1 = force_reg (mode1, rtx_op1);
837 rtx_op2 = expand_normal (shift_oprnd);
838 if (!(*insn_data[icode].operand[2].predicate) (rtx_op2, mode2)
839 && mode2 != VOIDmode)
840 rtx_op2 = force_reg (mode2, rtx_op2);
843 || ! (*insn_data[icode].operand[0].predicate) (target, mode))
844 target = gen_reg_rtx (mode);
846 /* Emit instruction */
847 pat = GEN_FCN (icode) (target, rtx_op1, rtx_op2);
854 /* This subroutine of expand_doubleword_shift handles the cases in which
855 the effective shift value is >= BITS_PER_WORD. The arguments and return
856 value are the same as for the parent routine, except that SUPERWORD_OP1
857 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
858 INTO_TARGET may be null if the caller has decided to calculate it. */
861 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
862 rtx outof_target, rtx into_target,
863 int unsignedp, enum optab_methods methods)
865 if (into_target != 0)
866 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
867 into_target, unsignedp, methods))
870 if (outof_target != 0)
872 /* For a signed right shift, we must fill OUTOF_TARGET with copies
873 of the sign bit, otherwise we must fill it with zeros. */
874 if (binoptab != ashr_optab)
875 emit_move_insn (outof_target, CONST0_RTX (word_mode));
877 if (!force_expand_binop (word_mode, binoptab,
878 outof_input, GEN_INT (BITS_PER_WORD - 1),
879 outof_target, unsignedp, methods))
885 /* This subroutine of expand_doubleword_shift handles the cases in which
886 the effective shift value is < BITS_PER_WORD. The arguments and return
887 value are the same as for the parent routine. */
890 expand_subword_shift (enum machine_mode op1_mode, optab binoptab,
891 rtx outof_input, rtx into_input, rtx op1,
892 rtx outof_target, rtx into_target,
893 int unsignedp, enum optab_methods methods,
894 unsigned HOST_WIDE_INT shift_mask)
896 optab reverse_unsigned_shift, unsigned_shift;
899 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
900 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
902 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
903 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
904 the opposite direction to BINOPTAB. */
905 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
907 carries = outof_input;
908 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
909 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
914 /* We must avoid shifting by BITS_PER_WORD bits since that is either
915 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
916 has unknown behavior. Do a single shift first, then shift by the
917 remainder. It's OK to use ~OP1 as the remainder if shift counts
918 are truncated to the mode size. */
919 carries = expand_binop (word_mode, reverse_unsigned_shift,
920 outof_input, const1_rtx, 0, unsignedp, methods);
921 if (shift_mask == BITS_PER_WORD - 1)
923 tmp = immed_double_const (-1, -1, op1_mode);
924 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
929 tmp = immed_double_const (BITS_PER_WORD - 1, 0, op1_mode);
930 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
934 if (tmp == 0 || carries == 0)
936 carries = expand_binop (word_mode, reverse_unsigned_shift,
937 carries, tmp, 0, unsignedp, methods);
941 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
942 so the result can go directly into INTO_TARGET if convenient. */
943 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
944 into_target, unsignedp, methods);
948 /* Now OR in the bits carried over from OUTOF_INPUT. */
949 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
950 into_target, unsignedp, methods))
953 /* Use a standard word_mode shift for the out-of half. */
954 if (outof_target != 0)
955 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
956 outof_target, unsignedp, methods))
963 #ifdef HAVE_conditional_move
964 /* Try implementing expand_doubleword_shift using conditional moves.
965 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
966 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
967 are the shift counts to use in the former and latter case. All other
968 arguments are the same as the parent routine. */
971 expand_doubleword_shift_condmove (enum machine_mode op1_mode, optab binoptab,
972 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
973 rtx outof_input, rtx into_input,
974 rtx subword_op1, rtx superword_op1,
975 rtx outof_target, rtx into_target,
976 int unsignedp, enum optab_methods methods,
977 unsigned HOST_WIDE_INT shift_mask)
979 rtx outof_superword, into_superword;
981 /* Put the superword version of the output into OUTOF_SUPERWORD and
983 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
984 if (outof_target != 0 && subword_op1 == superword_op1)
986 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
987 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
988 into_superword = outof_target;
989 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
990 outof_superword, 0, unsignedp, methods))
995 into_superword = gen_reg_rtx (word_mode);
996 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
997 outof_superword, into_superword,
1002 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
1003 if (!expand_subword_shift (op1_mode, binoptab,
1004 outof_input, into_input, subword_op1,
1005 outof_target, into_target,
1006 unsignedp, methods, shift_mask))
1009 /* Select between them. Do the INTO half first because INTO_SUPERWORD
1010 might be the current value of OUTOF_TARGET. */
1011 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
1012 into_target, into_superword, word_mode, false))
1015 if (outof_target != 0)
1016 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
1017 outof_target, outof_superword,
1025 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
1026 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
1027 input operand; the shift moves bits in the direction OUTOF_INPUT->
1028 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
1029 of the target. OP1 is the shift count and OP1_MODE is its mode.
1030 If OP1 is constant, it will have been truncated as appropriate
1031 and is known to be nonzero.
1033 If SHIFT_MASK is zero, the result of word shifts is undefined when the
1034 shift count is outside the range [0, BITS_PER_WORD). This routine must
1035 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
1037 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
1038 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
1039 fill with zeros or sign bits as appropriate.
1041 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
1042 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
1043 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
1044 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
1047 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
1048 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
1049 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
1050 function wants to calculate it itself.
1052 Return true if the shift could be successfully synthesized. */
1055 expand_doubleword_shift (enum machine_mode op1_mode, optab binoptab,
1056 rtx outof_input, rtx into_input, rtx op1,
1057 rtx outof_target, rtx into_target,
1058 int unsignedp, enum optab_methods methods,
1059 unsigned HOST_WIDE_INT shift_mask)
1061 rtx superword_op1, tmp, cmp1, cmp2;
1062 rtx subword_label, done_label;
1063 enum rtx_code cmp_code;
1065 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
1066 fill the result with sign or zero bits as appropriate. If so, the value
1067 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
1068 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
1069 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
1071 This isn't worthwhile for constant shifts since the optimizers will
1072 cope better with in-range shift counts. */
1073 if (shift_mask >= BITS_PER_WORD
1074 && outof_target != 0
1075 && !CONSTANT_P (op1))
1077 if (!expand_doubleword_shift (op1_mode, binoptab,
1078 outof_input, into_input, op1,
1080 unsignedp, methods, shift_mask))
1082 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
1083 outof_target, unsignedp, methods))
1088 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
1089 is true when the effective shift value is less than BITS_PER_WORD.
1090 Set SUPERWORD_OP1 to the shift count that should be used to shift
1091 OUTOF_INPUT into INTO_TARGET when the condition is false. */
1092 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
1093 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
1095 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
1096 is a subword shift count. */
1097 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
1099 cmp2 = CONST0_RTX (op1_mode);
1101 superword_op1 = op1;
1105 /* Set CMP1 to OP1 - BITS_PER_WORD. */
1106 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
1108 cmp2 = CONST0_RTX (op1_mode);
1110 superword_op1 = cmp1;
1115 /* If we can compute the condition at compile time, pick the
1116 appropriate subroutine. */
1117 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
1118 if (tmp != 0 && GET_CODE (tmp) == CONST_INT)
1120 if (tmp == const0_rtx)
1121 return expand_superword_shift (binoptab, outof_input, superword_op1,
1122 outof_target, into_target,
1123 unsignedp, methods);
1125 return expand_subword_shift (op1_mode, binoptab,
1126 outof_input, into_input, op1,
1127 outof_target, into_target,
1128 unsignedp, methods, shift_mask);
1131 #ifdef HAVE_conditional_move
1132 /* Try using conditional moves to generate straight-line code. */
1134 rtx start = get_last_insn ();
1135 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
1136 cmp_code, cmp1, cmp2,
1137 outof_input, into_input,
1139 outof_target, into_target,
1140 unsignedp, methods, shift_mask))
1142 delete_insns_since (start);
1146 /* As a last resort, use branches to select the correct alternative. */
1147 subword_label = gen_label_rtx ();
1148 done_label = gen_label_rtx ();
1151 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
1152 0, 0, subword_label);
1155 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
1156 outof_target, into_target,
1157 unsignedp, methods))
1160 emit_jump_insn (gen_jump (done_label));
1162 emit_label (subword_label);
1164 if (!expand_subword_shift (op1_mode, binoptab,
1165 outof_input, into_input, op1,
1166 outof_target, into_target,
1167 unsignedp, methods, shift_mask))
1170 emit_label (done_label);
1174 /* Subroutine of expand_binop. Perform a double word multiplication of
1175 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
1176 as the target's word_mode. This function return NULL_RTX if anything
1177 goes wrong, in which case it may have already emitted instructions
1178 which need to be deleted.
1180 If we want to multiply two two-word values and have normal and widening
1181 multiplies of single-word values, we can do this with three smaller
1184 The multiplication proceeds as follows:
1185 _______________________
1186 [__op0_high_|__op0_low__]
1187 _______________________
1188 * [__op1_high_|__op1_low__]
1189 _______________________________________________
1190 _______________________
1191 (1) [__op0_low__*__op1_low__]
1192 _______________________
1193 (2a) [__op0_low__*__op1_high_]
1194 _______________________
1195 (2b) [__op0_high_*__op1_low__]
1196 _______________________
1197 (3) [__op0_high_*__op1_high_]
1200 This gives a 4-word result. Since we are only interested in the
1201 lower 2 words, partial result (3) and the upper words of (2a) and
1202 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1203 calculated using non-widening multiplication.
1205 (1), however, needs to be calculated with an unsigned widening
1206 multiplication. If this operation is not directly supported we
1207 try using a signed widening multiplication and adjust the result.
1208 This adjustment works as follows:
1210 If both operands are positive then no adjustment is needed.
1212 If the operands have different signs, for example op0_low < 0 and
1213 op1_low >= 0, the instruction treats the most significant bit of
1214 op0_low as a sign bit instead of a bit with significance
1215 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1216 with 2**BITS_PER_WORD - op0_low, and two's complements the
1217 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1220 Similarly, if both operands are negative, we need to add
1221 (op0_low + op1_low) * 2**BITS_PER_WORD.
1223 We use a trick to adjust quickly. We logically shift op0_low right
1224 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1225 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1226 logical shift exists, we do an arithmetic right shift and subtract
1230 expand_doubleword_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
1231 bool umulp, enum optab_methods methods)
1233 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
1234 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
1235 rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
1236 rtx product, adjust, product_high, temp;
1238 rtx op0_high = operand_subword_force (op0, high, mode);
1239 rtx op0_low = operand_subword_force (op0, low, mode);
1240 rtx op1_high = operand_subword_force (op1, high, mode);
1241 rtx op1_low = operand_subword_force (op1, low, mode);
1243 /* If we're using an unsigned multiply to directly compute the product
1244 of the low-order words of the operands and perform any required
1245 adjustments of the operands, we begin by trying two more multiplications
1246 and then computing the appropriate sum.
1248 We have checked above that the required addition is provided.
1249 Full-word addition will normally always succeed, especially if
1250 it is provided at all, so we don't worry about its failure. The
1251 multiplication may well fail, however, so we do handle that. */
1255 /* ??? This could be done with emit_store_flag where available. */
1256 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
1257 NULL_RTX, 1, methods);
1259 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
1260 NULL_RTX, 0, OPTAB_DIRECT);
1263 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
1264 NULL_RTX, 0, methods);
1267 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
1268 NULL_RTX, 0, OPTAB_DIRECT);
1275 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
1276 NULL_RTX, 0, OPTAB_DIRECT);
1280 /* OP0_HIGH should now be dead. */
1284 /* ??? This could be done with emit_store_flag where available. */
1285 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
1286 NULL_RTX, 1, methods);
1288 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
1289 NULL_RTX, 0, OPTAB_DIRECT);
1292 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
1293 NULL_RTX, 0, methods);
1296 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
1297 NULL_RTX, 0, OPTAB_DIRECT);
1304 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
1305 NULL_RTX, 0, OPTAB_DIRECT);
1309 /* OP1_HIGH should now be dead. */
1311 adjust = expand_binop (word_mode, add_optab, adjust, temp,
1312 adjust, 0, OPTAB_DIRECT);
1314 if (target && !REG_P (target))
1318 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
1319 target, 1, OPTAB_DIRECT);
1321 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
1322 target, 1, OPTAB_DIRECT);
1327 product_high = operand_subword (product, high, 1, mode);
1328 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
1329 REG_P (product_high) ? product_high : adjust,
1331 emit_move_insn (product_high, adjust);
1335 /* Wrapper around expand_binop which takes an rtx code to specify
1336 the operation to perform, not an optab pointer. All other
1337 arguments are the same. */
1339 expand_simple_binop (enum machine_mode mode, enum rtx_code code, rtx op0,
1340 rtx op1, rtx target, int unsignedp,
1341 enum optab_methods methods)
1343 optab binop = code_to_optab[(int) code];
1346 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
1349 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1350 binop. Order them according to commutative_operand_precedence and, if
1351 possible, try to put TARGET or a pseudo first. */
1353 swap_commutative_operands_with_target (rtx target, rtx op0, rtx op1)
1355 int op0_prec = commutative_operand_precedence (op0);
1356 int op1_prec = commutative_operand_precedence (op1);
1358 if (op0_prec < op1_prec)
1361 if (op0_prec > op1_prec)
1364 /* With equal precedence, both orders are ok, but it is better if the
1365 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
1366 if (target == 0 || REG_P (target))
1367 return (REG_P (op1) && !REG_P (op0)) || target == op1;
1369 return rtx_equal_p (op1, target);
1372 /* Return true if BINOPTAB implements a shift operation. */
1375 shift_optab_p (optab binoptab)
1377 switch (binoptab->code)
1393 /* Return true if BINOPTAB implements a commutative binary operation. */
1396 commutative_optab_p (optab binoptab)
1398 return (GET_RTX_CLASS (binoptab->code) == RTX_COMM_ARITH
1399 || binoptab == smul_widen_optab
1400 || binoptab == umul_widen_optab
1401 || binoptab == smul_highpart_optab
1402 || binoptab == umul_highpart_optab);
1405 /* X is to be used in mode MODE as an operand to BINOPTAB. If we're
1406 optimizing, and if the operand is a constant that costs more than
1407 1 instruction, force the constant into a register and return that
1408 register. Return X otherwise. UNSIGNEDP says whether X is unsigned. */
1411 avoid_expensive_constant (enum machine_mode mode, optab binoptab,
1412 rtx x, bool unsignedp)
1414 if (mode != VOIDmode
1417 && rtx_cost (x, binoptab->code) > COSTS_N_INSNS (1))
1419 if (GET_CODE (x) == CONST_INT)
1421 HOST_WIDE_INT intval = trunc_int_for_mode (INTVAL (x), mode);
1422 if (intval != INTVAL (x))
1423 x = GEN_INT (intval);
1426 x = convert_modes (mode, VOIDmode, x, unsignedp);
1427 x = force_reg (mode, x);
1432 /* Helper function for expand_binop: handle the case where there
1433 is an insn that directly implements the indicated operation.
1434 Returns null if this is not possible. */
1436 expand_binop_directly (enum machine_mode mode, optab binoptab,
1438 rtx target, int unsignedp, enum optab_methods methods,
1441 int icode = (int) optab_handler (binoptab, mode)->insn_code;
1442 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
1443 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
1444 enum machine_mode tmp_mode;
1447 rtx xop0 = op0, xop1 = op1;
1454 temp = gen_reg_rtx (mode);
1456 /* If it is a commutative operator and the modes would match
1457 if we would swap the operands, we can save the conversions. */
1458 commutative_p = commutative_optab_p (binoptab);
1460 && GET_MODE (xop0) != mode0 && GET_MODE (xop1) != mode1
1461 && GET_MODE (xop0) == mode1 && GET_MODE (xop1) == mode1)
1468 /* If we are optimizing, force expensive constants into a register. */
1469 xop0 = avoid_expensive_constant (mode0, binoptab, xop0, unsignedp);
1470 if (!shift_optab_p (binoptab))
1471 xop1 = avoid_expensive_constant (mode1, binoptab, xop1, unsignedp);
1473 /* In case the insn wants input operands in modes different from
1474 those of the actual operands, convert the operands. It would
1475 seem that we don't need to convert CONST_INTs, but we do, so
1476 that they're properly zero-extended, sign-extended or truncated
1479 if (GET_MODE (xop0) != mode0 && mode0 != VOIDmode)
1480 xop0 = convert_modes (mode0,
1481 GET_MODE (xop0) != VOIDmode
1486 if (GET_MODE (xop1) != mode1 && mode1 != VOIDmode)
1487 xop1 = convert_modes (mode1,
1488 GET_MODE (xop1) != VOIDmode
1493 /* If operation is commutative,
1494 try to make the first operand a register.
1495 Even better, try to make it the same as the target.
1496 Also try to make the last operand a constant. */
1498 && swap_commutative_operands_with_target (target, xop0, xop1))
1505 /* Now, if insn's predicates don't allow our operands, put them into
1508 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
1509 && mode0 != VOIDmode)
1510 xop0 = copy_to_mode_reg (mode0, xop0);
1512 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
1513 && mode1 != VOIDmode)
1514 xop1 = copy_to_mode_reg (mode1, xop1);
1516 if (binoptab == vec_pack_trunc_optab
1517 || binoptab == vec_pack_usat_optab
1518 || binoptab == vec_pack_ssat_optab
1519 || binoptab == vec_pack_ufix_trunc_optab
1520 || binoptab == vec_pack_sfix_trunc_optab)
1522 /* The mode of the result is different then the mode of the
1524 tmp_mode = insn_data[icode].operand[0].mode;
1525 if (GET_MODE_NUNITS (tmp_mode) != 2 * GET_MODE_NUNITS (mode))
1531 if (!insn_data[icode].operand[0].predicate (temp, tmp_mode))
1532 temp = gen_reg_rtx (tmp_mode);
1534 pat = GEN_FCN (icode) (temp, xop0, xop1);
1537 /* If PAT is composed of more than one insn, try to add an appropriate
1538 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1539 operand, call expand_binop again, this time without a target. */
1540 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1541 && ! add_equal_note (pat, temp, binoptab->code, xop0, xop1))
1543 delete_insns_since (last);
1544 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1545 unsignedp, methods);
1552 delete_insns_since (last);
1556 /* Generate code to perform an operation specified by BINOPTAB
1557 on operands OP0 and OP1, with result having machine-mode MODE.
1559 UNSIGNEDP is for the case where we have to widen the operands
1560 to perform the operation. It says to use zero-extension.
1562 If TARGET is nonzero, the value
1563 is generated there, if it is convenient to do so.
1564 In all cases an rtx is returned for the locus of the value;
1565 this may or may not be TARGET. */
1568 expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1,
1569 rtx target, int unsignedp, enum optab_methods methods)
1571 enum optab_methods next_methods
1572 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1573 ? OPTAB_WIDEN : methods);
1574 enum mode_class class;
1575 enum machine_mode wider_mode;
1578 rtx entry_last = get_last_insn ();
1581 class = GET_MODE_CLASS (mode);
1583 /* If subtracting an integer constant, convert this into an addition of
1584 the negated constant. */
1586 if (binoptab == sub_optab && GET_CODE (op1) == CONST_INT)
1588 op1 = negate_rtx (mode, op1);
1589 binoptab = add_optab;
1592 /* Record where to delete back to if we backtrack. */
1593 last = get_last_insn ();
1595 /* If we can do it with a three-operand insn, do so. */
1597 if (methods != OPTAB_MUST_WIDEN
1598 && optab_handler (binoptab, mode)->insn_code != CODE_FOR_nothing)
1600 temp = expand_binop_directly (mode, binoptab, op0, op1, target,
1601 unsignedp, methods, last);
1606 /* If we were trying to rotate, and that didn't work, try rotating
1607 the other direction before falling back to shifts and bitwise-or. */
1608 if (((binoptab == rotl_optab
1609 && optab_handler (rotr_optab, mode)->insn_code != CODE_FOR_nothing)
1610 || (binoptab == rotr_optab
1611 && optab_handler (rotl_optab, mode)->insn_code != CODE_FOR_nothing))
1612 && class == MODE_INT)
1614 optab otheroptab = (binoptab == rotl_optab ? rotr_optab : rotl_optab);
1616 unsigned int bits = GET_MODE_BITSIZE (mode);
1618 if (GET_CODE (op1) == CONST_INT)
1619 newop1 = GEN_INT (bits - INTVAL (op1));
1620 else if (targetm.shift_truncation_mask (mode) == bits - 1)
1621 newop1 = negate_rtx (mode, op1);
1623 newop1 = expand_binop (mode, sub_optab,
1624 GEN_INT (bits), op1,
1625 NULL_RTX, unsignedp, OPTAB_DIRECT);
1627 temp = expand_binop_directly (mode, otheroptab, op0, newop1,
1628 target, unsignedp, methods, last);
1633 /* If this is a multiply, see if we can do a widening operation that
1634 takes operands of this mode and makes a wider mode. */
1636 if (binoptab == smul_optab
1637 && GET_MODE_WIDER_MODE (mode) != VOIDmode
1638 && ((optab_handler ((unsignedp ? umul_widen_optab : smul_widen_optab),
1639 GET_MODE_WIDER_MODE (mode))->insn_code)
1640 != CODE_FOR_nothing))
1642 temp = expand_binop (GET_MODE_WIDER_MODE (mode),
1643 unsignedp ? umul_widen_optab : smul_widen_optab,
1644 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1648 if (GET_MODE_CLASS (mode) == MODE_INT
1649 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1650 GET_MODE_BITSIZE (GET_MODE (temp))))
1651 return gen_lowpart (mode, temp);
1653 return convert_to_mode (mode, temp, unsignedp);
1657 /* Look for a wider mode of the same class for which we think we
1658 can open-code the operation. Check for a widening multiply at the
1659 wider mode as well. */
1661 if (CLASS_HAS_WIDER_MODES_P (class)
1662 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1663 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1664 wider_mode != VOIDmode;
1665 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1667 if (optab_handler (binoptab, wider_mode)->insn_code != CODE_FOR_nothing
1668 || (binoptab == smul_optab
1669 && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
1670 && ((optab_handler ((unsignedp ? umul_widen_optab
1671 : smul_widen_optab),
1672 GET_MODE_WIDER_MODE (wider_mode))->insn_code)
1673 != CODE_FOR_nothing)))
1675 rtx xop0 = op0, xop1 = op1;
1678 /* For certain integer operations, we need not actually extend
1679 the narrow operands, as long as we will truncate
1680 the results to the same narrowness. */
1682 if ((binoptab == ior_optab || binoptab == and_optab
1683 || binoptab == xor_optab
1684 || binoptab == add_optab || binoptab == sub_optab
1685 || binoptab == smul_optab || binoptab == ashl_optab)
1686 && class == MODE_INT)
1689 xop0 = avoid_expensive_constant (mode, binoptab,
1691 if (binoptab != ashl_optab)
1692 xop1 = avoid_expensive_constant (mode, binoptab,
1696 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1698 /* The second operand of a shift must always be extended. */
1699 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1700 no_extend && binoptab != ashl_optab);
1702 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1703 unsignedp, OPTAB_DIRECT);
1706 if (class != MODE_INT
1707 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1708 GET_MODE_BITSIZE (wider_mode)))
1711 target = gen_reg_rtx (mode);
1712 convert_move (target, temp, 0);
1716 return gen_lowpart (mode, temp);
1719 delete_insns_since (last);
1723 /* If operation is commutative,
1724 try to make the first operand a register.
1725 Even better, try to make it the same as the target.
1726 Also try to make the last operand a constant. */
1727 if (commutative_optab_p (binoptab)
1728 && swap_commutative_operands_with_target (target, op0, op1))
1735 /* These can be done a word at a time. */
1736 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1737 && class == MODE_INT
1738 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
1739 && optab_handler (binoptab, word_mode)->insn_code != CODE_FOR_nothing)
1745 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1746 won't be accurate, so use a new target. */
1747 if (target == 0 || target == op0 || target == op1)
1748 target = gen_reg_rtx (mode);
1752 /* Do the actual arithmetic. */
1753 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
1755 rtx target_piece = operand_subword (target, i, 1, mode);
1756 rtx x = expand_binop (word_mode, binoptab,
1757 operand_subword_force (op0, i, mode),
1758 operand_subword_force (op1, i, mode),
1759 target_piece, unsignedp, next_methods);
1764 if (target_piece != x)
1765 emit_move_insn (target_piece, x);
1768 insns = get_insns ();
1771 if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
1773 if (binoptab->code != UNKNOWN)
1775 = gen_rtx_fmt_ee (binoptab->code, mode,
1776 copy_rtx (op0), copy_rtx (op1));
1785 /* Synthesize double word shifts from single word shifts. */
1786 if ((binoptab == lshr_optab || binoptab == ashl_optab
1787 || binoptab == ashr_optab)
1788 && class == MODE_INT
1789 && (GET_CODE (op1) == CONST_INT || !optimize_size)
1790 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1791 && optab_handler (binoptab, word_mode)->insn_code != CODE_FOR_nothing
1792 && optab_handler (ashl_optab, word_mode)->insn_code != CODE_FOR_nothing
1793 && optab_handler (lshr_optab, word_mode)->insn_code != CODE_FOR_nothing)
1795 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1796 enum machine_mode op1_mode;
1798 double_shift_mask = targetm.shift_truncation_mask (mode);
1799 shift_mask = targetm.shift_truncation_mask (word_mode);
1800 op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode;
1802 /* Apply the truncation to constant shifts. */
1803 if (double_shift_mask > 0 && GET_CODE (op1) == CONST_INT)
1804 op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
1806 if (op1 == CONST0_RTX (op1_mode))
1809 /* Make sure that this is a combination that expand_doubleword_shift
1810 can handle. See the comments there for details. */
1811 if (double_shift_mask == 0
1812 || (shift_mask == BITS_PER_WORD - 1
1813 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1816 rtx into_target, outof_target;
1817 rtx into_input, outof_input;
1818 int left_shift, outof_word;
1820 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1821 won't be accurate, so use a new target. */
1822 if (target == 0 || target == op0 || target == op1)
1823 target = gen_reg_rtx (mode);
1827 /* OUTOF_* is the word we are shifting bits away from, and
1828 INTO_* is the word that we are shifting bits towards, thus
1829 they differ depending on the direction of the shift and
1830 WORDS_BIG_ENDIAN. */
1832 left_shift = binoptab == ashl_optab;
1833 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1835 outof_target = operand_subword (target, outof_word, 1, mode);
1836 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1838 outof_input = operand_subword_force (op0, outof_word, mode);
1839 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1841 if (expand_doubleword_shift (op1_mode, binoptab,
1842 outof_input, into_input, op1,
1843 outof_target, into_target,
1844 unsignedp, next_methods, shift_mask))
1846 insns = get_insns ();
1856 /* Synthesize double word rotates from single word shifts. */
1857 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1858 && class == MODE_INT
1859 && GET_CODE (op1) == CONST_INT
1860 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1861 && optab_handler (ashl_optab, word_mode)->insn_code != CODE_FOR_nothing
1862 && optab_handler (lshr_optab, word_mode)->insn_code != CODE_FOR_nothing)
1865 rtx into_target, outof_target;
1866 rtx into_input, outof_input;
1868 int shift_count, left_shift, outof_word;
1870 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1871 won't be accurate, so use a new target. Do this also if target is not
1872 a REG, first because having a register instead may open optimization
1873 opportunities, and second because if target and op0 happen to be MEMs
1874 designating the same location, we would risk clobbering it too early
1875 in the code sequence we generate below. */
1876 if (target == 0 || target == op0 || target == op1 || ! REG_P (target))
1877 target = gen_reg_rtx (mode);
1881 shift_count = INTVAL (op1);
1883 /* OUTOF_* is the word we are shifting bits away from, and
1884 INTO_* is the word that we are shifting bits towards, thus
1885 they differ depending on the direction of the shift and
1886 WORDS_BIG_ENDIAN. */
1888 left_shift = (binoptab == rotl_optab);
1889 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1891 outof_target = operand_subword (target, outof_word, 1, mode);
1892 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1894 outof_input = operand_subword_force (op0, outof_word, mode);
1895 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1897 if (shift_count == BITS_PER_WORD)
1899 /* This is just a word swap. */
1900 emit_move_insn (outof_target, into_input);
1901 emit_move_insn (into_target, outof_input);
1906 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1907 rtx first_shift_count, second_shift_count;
1908 optab reverse_unsigned_shift, unsigned_shift;
1910 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1911 ? lshr_optab : ashl_optab);
1913 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1914 ? ashl_optab : lshr_optab);
1916 if (shift_count > BITS_PER_WORD)
1918 first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1919 second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1923 first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1924 second_shift_count = GEN_INT (shift_count);
1927 into_temp1 = expand_binop (word_mode, unsigned_shift,
1928 outof_input, first_shift_count,
1929 NULL_RTX, unsignedp, next_methods);
1930 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1931 into_input, second_shift_count,
1932 NULL_RTX, unsignedp, next_methods);
1934 if (into_temp1 != 0 && into_temp2 != 0)
1935 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1936 into_target, unsignedp, next_methods);
1940 if (inter != 0 && inter != into_target)
1941 emit_move_insn (into_target, inter);
1943 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1944 into_input, first_shift_count,
1945 NULL_RTX, unsignedp, next_methods);
1946 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1947 outof_input, second_shift_count,
1948 NULL_RTX, unsignedp, next_methods);
1950 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1951 inter = expand_binop (word_mode, ior_optab,
1952 outof_temp1, outof_temp2,
1953 outof_target, unsignedp, next_methods);
1955 if (inter != 0 && inter != outof_target)
1956 emit_move_insn (outof_target, inter);
1959 insns = get_insns ();
1969 /* These can be done a word at a time by propagating carries. */
1970 if ((binoptab == add_optab || binoptab == sub_optab)
1971 && class == MODE_INT
1972 && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
1973 && optab_handler (binoptab, word_mode)->insn_code != CODE_FOR_nothing)
1976 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1977 const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
1978 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1979 rtx xop0, xop1, xtarget;
1981 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1982 value is one of those, use it. Otherwise, use 1 since it is the
1983 one easiest to get. */
1984 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1985 int normalizep = STORE_FLAG_VALUE;
1990 /* Prepare the operands. */
1991 xop0 = force_reg (mode, op0);
1992 xop1 = force_reg (mode, op1);
1994 xtarget = gen_reg_rtx (mode);
1996 if (target == 0 || !REG_P (target))
1999 /* Indicate for flow that the entire target reg is being set. */
2001 emit_insn (gen_rtx_CLOBBER (VOIDmode, xtarget));
2003 /* Do the actual arithmetic. */
2004 for (i = 0; i < nwords; i++)
2006 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
2007 rtx target_piece = operand_subword (xtarget, index, 1, mode);
2008 rtx op0_piece = operand_subword_force (xop0, index, mode);
2009 rtx op1_piece = operand_subword_force (xop1, index, mode);
2012 /* Main add/subtract of the input operands. */
2013 x = expand_binop (word_mode, binoptab,
2014 op0_piece, op1_piece,
2015 target_piece, unsignedp, next_methods);
2021 /* Store carry from main add/subtract. */
2022 carry_out = gen_reg_rtx (word_mode);
2023 carry_out = emit_store_flag_force (carry_out,
2024 (binoptab == add_optab
2027 word_mode, 1, normalizep);
2034 /* Add/subtract previous carry to main result. */
2035 newx = expand_binop (word_mode,
2036 normalizep == 1 ? binoptab : otheroptab,
2038 NULL_RTX, 1, next_methods);
2042 /* Get out carry from adding/subtracting carry in. */
2043 rtx carry_tmp = gen_reg_rtx (word_mode);
2044 carry_tmp = emit_store_flag_force (carry_tmp,
2045 (binoptab == add_optab
2048 word_mode, 1, normalizep);
2050 /* Logical-ior the two poss. carry together. */
2051 carry_out = expand_binop (word_mode, ior_optab,
2052 carry_out, carry_tmp,
2053 carry_out, 0, next_methods);
2057 emit_move_insn (target_piece, newx);
2061 if (x != target_piece)
2062 emit_move_insn (target_piece, x);
2065 carry_in = carry_out;
2068 if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
2070 if (optab_handler (mov_optab, mode)->insn_code != CODE_FOR_nothing
2071 || ! rtx_equal_p (target, xtarget))
2073 rtx temp = emit_move_insn (target, xtarget);
2075 set_unique_reg_note (temp,
2077 gen_rtx_fmt_ee (binoptab->code, mode,
2088 delete_insns_since (last);
2091 /* Attempt to synthesize double word multiplies using a sequence of word
2092 mode multiplications. We first attempt to generate a sequence using a
2093 more efficient unsigned widening multiply, and if that fails we then
2094 try using a signed widening multiply. */
2096 if (binoptab == smul_optab
2097 && class == MODE_INT
2098 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
2099 && optab_handler (smul_optab, word_mode)->insn_code != CODE_FOR_nothing
2100 && optab_handler (add_optab, word_mode)->insn_code != CODE_FOR_nothing)
2102 rtx product = NULL_RTX;
2104 if (optab_handler (umul_widen_optab, mode)->insn_code
2105 != CODE_FOR_nothing)
2107 product = expand_doubleword_mult (mode, op0, op1, target,
2110 delete_insns_since (last);
2113 if (product == NULL_RTX
2114 && optab_handler (smul_widen_optab, mode)->insn_code
2115 != CODE_FOR_nothing)
2117 product = expand_doubleword_mult (mode, op0, op1, target,
2120 delete_insns_since (last);
2123 if (product != NULL_RTX)
2125 if (optab_handler (mov_optab, mode)->insn_code != CODE_FOR_nothing)
2127 temp = emit_move_insn (target ? target : product, product);
2128 set_unique_reg_note (temp,
2130 gen_rtx_fmt_ee (MULT, mode,
2138 /* It can't be open-coded in this mode.
2139 Use a library call if one is available and caller says that's ok. */
2141 libfunc = optab_libfunc (binoptab, mode);
2143 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
2147 enum machine_mode op1_mode = mode;
2152 if (shift_optab_p (binoptab))
2154 op1_mode = targetm.libgcc_shift_count_mode ();
2155 /* Specify unsigned here,
2156 since negative shift counts are meaningless. */
2157 op1x = convert_to_mode (op1_mode, op1, 1);
2160 if (GET_MODE (op0) != VOIDmode
2161 && GET_MODE (op0) != mode)
2162 op0 = convert_to_mode (mode, op0, unsignedp);
2164 /* Pass 1 for NO_QUEUE so we don't lose any increments
2165 if the libcall is cse'd or moved. */
2166 value = emit_library_call_value (libfunc,
2167 NULL_RTX, LCT_CONST, mode, 2,
2168 op0, mode, op1x, op1_mode);
2170 insns = get_insns ();
2173 target = gen_reg_rtx (mode);
2174 emit_libcall_block (insns, target, value,
2175 gen_rtx_fmt_ee (binoptab->code, mode, op0, op1));
2180 delete_insns_since (last);
2182 /* It can't be done in this mode. Can we do it in a wider mode? */
2184 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
2185 || methods == OPTAB_MUST_WIDEN))
2187 /* Caller says, don't even try. */
2188 delete_insns_since (entry_last);
2192 /* Compute the value of METHODS to pass to recursive calls.
2193 Don't allow widening to be tried recursively. */
2195 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
2197 /* Look for a wider mode of the same class for which it appears we can do
2200 if (CLASS_HAS_WIDER_MODES_P (class))
2202 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2203 wider_mode != VOIDmode;
2204 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2206 if ((optab_handler (binoptab, wider_mode)->insn_code
2207 != CODE_FOR_nothing)
2208 || (methods == OPTAB_LIB
2209 && optab_libfunc (binoptab, wider_mode)))
2211 rtx xop0 = op0, xop1 = op1;
2214 /* For certain integer operations, we need not actually extend
2215 the narrow operands, as long as we will truncate
2216 the results to the same narrowness. */
2218 if ((binoptab == ior_optab || binoptab == and_optab
2219 || binoptab == xor_optab
2220 || binoptab == add_optab || binoptab == sub_optab
2221 || binoptab == smul_optab || binoptab == ashl_optab)
2222 && class == MODE_INT)
2225 xop0 = widen_operand (xop0, wider_mode, mode,
2226 unsignedp, no_extend);
2228 /* The second operand of a shift must always be extended. */
2229 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
2230 no_extend && binoptab != ashl_optab);
2232 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
2233 unsignedp, methods);
2236 if (class != MODE_INT
2237 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
2238 GET_MODE_BITSIZE (wider_mode)))
2241 target = gen_reg_rtx (mode);
2242 convert_move (target, temp, 0);
2246 return gen_lowpart (mode, temp);
2249 delete_insns_since (last);
2254 delete_insns_since (entry_last);
2258 /* Expand a binary operator which has both signed and unsigned forms.
2259 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2262 If we widen unsigned operands, we may use a signed wider operation instead
2263 of an unsigned wider operation, since the result would be the same. */
2266 sign_expand_binop (enum machine_mode mode, optab uoptab, optab soptab,
2267 rtx op0, rtx op1, rtx target, int unsignedp,
2268 enum optab_methods methods)
2271 optab direct_optab = unsignedp ? uoptab : soptab;
2272 struct optab wide_soptab;
2274 /* Do it without widening, if possible. */
2275 temp = expand_binop (mode, direct_optab, op0, op1, target,
2276 unsignedp, OPTAB_DIRECT);
2277 if (temp || methods == OPTAB_DIRECT)
2280 /* Try widening to a signed int. Make a fake signed optab that
2281 hides any signed insn for direct use. */
2282 wide_soptab = *soptab;
2283 optab_handler (&wide_soptab, mode)->insn_code = CODE_FOR_nothing;
2284 /* We don't want to generate new hash table entries from this fake
2286 wide_soptab.libcall_gen = NULL;
2288 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2289 unsignedp, OPTAB_WIDEN);
2291 /* For unsigned operands, try widening to an unsigned int. */
2292 if (temp == 0 && unsignedp)
2293 temp = expand_binop (mode, uoptab, op0, op1, target,
2294 unsignedp, OPTAB_WIDEN);
2295 if (temp || methods == OPTAB_WIDEN)
2298 /* Use the right width lib call if that exists. */
2299 temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_LIB);
2300 if (temp || methods == OPTAB_LIB)
2303 /* Must widen and use a lib call, use either signed or unsigned. */
2304 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2305 unsignedp, methods);
2309 return expand_binop (mode, uoptab, op0, op1, target,
2310 unsignedp, methods);
2314 /* Generate code to perform an operation specified by UNOPPTAB
2315 on operand OP0, with two results to TARG0 and TARG1.
2316 We assume that the order of the operands for the instruction
2317 is TARG0, TARG1, OP0.
2319 Either TARG0 or TARG1 may be zero, but what that means is that
2320 the result is not actually wanted. We will generate it into
2321 a dummy pseudo-reg and discard it. They may not both be zero.
2323 Returns 1 if this operation can be performed; 0 if not. */
2326 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
2329 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2330 enum mode_class class;
2331 enum machine_mode wider_mode;
2332 rtx entry_last = get_last_insn ();
2335 class = GET_MODE_CLASS (mode);
2338 targ0 = gen_reg_rtx (mode);
2340 targ1 = gen_reg_rtx (mode);
2342 /* Record where to go back to if we fail. */
2343 last = get_last_insn ();
2345 if (optab_handler (unoptab, mode)->insn_code != CODE_FOR_nothing)
2347 int icode = (int) optab_handler (unoptab, mode)->insn_code;
2348 enum machine_mode mode0 = insn_data[icode].operand[2].mode;
2352 if (GET_MODE (xop0) != VOIDmode
2353 && GET_MODE (xop0) != mode0)
2354 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2356 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2357 if (!insn_data[icode].operand[2].predicate (xop0, mode0))
2358 xop0 = copy_to_mode_reg (mode0, xop0);
2360 /* We could handle this, but we should always be called with a pseudo
2361 for our targets and all insns should take them as outputs. */
2362 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2363 gcc_assert (insn_data[icode].operand[1].predicate (targ1, mode));
2365 pat = GEN_FCN (icode) (targ0, targ1, xop0);
2372 delete_insns_since (last);
2375 /* It can't be done in this mode. Can we do it in a wider mode? */
2377 if (CLASS_HAS_WIDER_MODES_P (class))
2379 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2380 wider_mode != VOIDmode;
2381 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2383 if (optab_handler (unoptab, wider_mode)->insn_code
2384 != CODE_FOR_nothing)
2386 rtx t0 = gen_reg_rtx (wider_mode);
2387 rtx t1 = gen_reg_rtx (wider_mode);
2388 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2390 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
2392 convert_move (targ0, t0, unsignedp);
2393 convert_move (targ1, t1, unsignedp);
2397 delete_insns_since (last);
2402 delete_insns_since (entry_last);
2406 /* Generate code to perform an operation specified by BINOPTAB
2407 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2408 We assume that the order of the operands for the instruction
2409 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2410 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2412 Either TARG0 or TARG1 may be zero, but what that means is that
2413 the result is not actually wanted. We will generate it into
2414 a dummy pseudo-reg and discard it. They may not both be zero.
2416 Returns 1 if this operation can be performed; 0 if not. */
2419 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
2422 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2423 enum mode_class class;
2424 enum machine_mode wider_mode;
2425 rtx entry_last = get_last_insn ();
2428 class = GET_MODE_CLASS (mode);
2431 targ0 = gen_reg_rtx (mode);
2433 targ1 = gen_reg_rtx (mode);
2435 /* Record where to go back to if we fail. */
2436 last = get_last_insn ();
2438 if (optab_handler (binoptab, mode)->insn_code != CODE_FOR_nothing)
2440 int icode = (int) optab_handler (binoptab, mode)->insn_code;
2441 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2442 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
2444 rtx xop0 = op0, xop1 = op1;
2446 /* If we are optimizing, force expensive constants into a register. */
2447 xop0 = avoid_expensive_constant (mode0, binoptab, xop0, unsignedp);
2448 xop1 = avoid_expensive_constant (mode1, binoptab, xop1, unsignedp);
2450 /* In case the insn wants input operands in modes different from
2451 those of the actual operands, convert the operands. It would
2452 seem that we don't need to convert CONST_INTs, but we do, so
2453 that they're properly zero-extended, sign-extended or truncated
2456 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
2457 xop0 = convert_modes (mode0,
2458 GET_MODE (op0) != VOIDmode
2463 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
2464 xop1 = convert_modes (mode1,
2465 GET_MODE (op1) != VOIDmode
2470 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2471 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2472 xop0 = copy_to_mode_reg (mode0, xop0);
2474 if (!insn_data[icode].operand[2].predicate (xop1, mode1))
2475 xop1 = copy_to_mode_reg (mode1, xop1);
2477 /* We could handle this, but we should always be called with a pseudo
2478 for our targets and all insns should take them as outputs. */
2479 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2480 gcc_assert (insn_data[icode].operand[3].predicate (targ1, mode));
2482 pat = GEN_FCN (icode) (targ0, xop0, xop1, targ1);
2489 delete_insns_since (last);
2492 /* It can't be done in this mode. Can we do it in a wider mode? */
2494 if (CLASS_HAS_WIDER_MODES_P (class))
2496 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2497 wider_mode != VOIDmode;
2498 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2500 if (optab_handler (binoptab, wider_mode)->insn_code
2501 != CODE_FOR_nothing)
2503 rtx t0 = gen_reg_rtx (wider_mode);
2504 rtx t1 = gen_reg_rtx (wider_mode);
2505 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2506 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2508 if (expand_twoval_binop (binoptab, cop0, cop1,
2511 convert_move (targ0, t0, unsignedp);
2512 convert_move (targ1, t1, unsignedp);
2516 delete_insns_since (last);
2521 delete_insns_since (entry_last);
2525 /* Expand the two-valued library call indicated by BINOPTAB, but
2526 preserve only one of the values. If TARG0 is non-NULL, the first
2527 value is placed into TARG0; otherwise the second value is placed
2528 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2529 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2530 This routine assumes that the value returned by the library call is
2531 as if the return value was of an integral mode twice as wide as the
2532 mode of OP0. Returns 1 if the call was successful. */
2535 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2536 rtx targ0, rtx targ1, enum rtx_code code)
2538 enum machine_mode mode;
2539 enum machine_mode libval_mode;
2544 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2545 gcc_assert (!targ0 != !targ1);
2547 mode = GET_MODE (op0);
2548 libfunc = optab_libfunc (binoptab, mode);
2552 /* The value returned by the library function will have twice as
2553 many bits as the nominal MODE. */
2554 libval_mode = smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode),
2557 libval = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
2561 /* Get the part of VAL containing the value that we want. */
2562 libval = simplify_gen_subreg (mode, libval, libval_mode,
2563 targ0 ? 0 : GET_MODE_SIZE (mode));
2564 insns = get_insns ();
2566 /* Move the into the desired location. */
2567 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2568 gen_rtx_fmt_ee (code, mode, op0, op1));
2574 /* Wrapper around expand_unop which takes an rtx code to specify
2575 the operation to perform, not an optab pointer. All other
2576 arguments are the same. */
2578 expand_simple_unop (enum machine_mode mode, enum rtx_code code, rtx op0,
2579 rtx target, int unsignedp)
2581 optab unop = code_to_optab[(int) code];
2584 return expand_unop (mode, unop, op0, target, unsignedp);
2590 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2592 widen_clz (enum machine_mode mode, rtx op0, rtx target)
2594 enum mode_class class = GET_MODE_CLASS (mode);
2595 if (CLASS_HAS_WIDER_MODES_P (class))
2597 enum machine_mode wider_mode;
2598 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2599 wider_mode != VOIDmode;
2600 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2602 if (optab_handler (clz_optab, wider_mode)->insn_code
2603 != CODE_FOR_nothing)
2605 rtx xop0, temp, last;
2607 last = get_last_insn ();
2610 target = gen_reg_rtx (mode);
2611 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2612 temp = expand_unop (wider_mode, clz_optab, xop0, NULL_RTX, true);
2614 temp = expand_binop (wider_mode, sub_optab, temp,
2615 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2616 - GET_MODE_BITSIZE (mode)),
2617 target, true, OPTAB_DIRECT);
2619 delete_insns_since (last);
2628 /* Try calculating clz of a double-word quantity as two clz's of word-sized
2629 quantities, choosing which based on whether the high word is nonzero. */
2631 expand_doubleword_clz (enum machine_mode mode, rtx op0, rtx target)
2633 rtx xop0 = force_reg (mode, op0);
2634 rtx subhi = gen_highpart (word_mode, xop0);
2635 rtx sublo = gen_lowpart (word_mode, xop0);
2636 rtx hi0_label = gen_label_rtx ();
2637 rtx after_label = gen_label_rtx ();
2638 rtx seq, temp, result;
2640 /* If we were not given a target, use a word_mode register, not a
2641 'mode' register. The result will fit, and nobody is expecting
2642 anything bigger (the return type of __builtin_clz* is int). */
2644 target = gen_reg_rtx (word_mode);
2646 /* In any case, write to a word_mode scratch in both branches of the
2647 conditional, so we can ensure there is a single move insn setting
2648 'target' to tag a REG_EQUAL note on. */
2649 result = gen_reg_rtx (word_mode);
2653 /* If the high word is not equal to zero,
2654 then clz of the full value is clz of the high word. */
2655 emit_cmp_and_jump_insns (subhi, CONST0_RTX (word_mode), EQ, 0,
2656 word_mode, true, hi0_label);
2658 temp = expand_unop_direct (word_mode, clz_optab, subhi, result, true);
2663 convert_move (result, temp, true);
2665 emit_jump_insn (gen_jump (after_label));
2668 /* Else clz of the full value is clz of the low word plus the number
2669 of bits in the high word. */
2670 emit_label (hi0_label);
2672 temp = expand_unop_direct (word_mode, clz_optab, sublo, 0, true);
2675 temp = expand_binop (word_mode, add_optab, temp,
2676 GEN_INT (GET_MODE_BITSIZE (word_mode)),
2677 result, true, OPTAB_DIRECT);
2681 convert_move (result, temp, true);
2683 emit_label (after_label);
2684 convert_move (target, result, true);
2689 add_equal_note (seq, target, CLZ, xop0, 0);
2701 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2703 widen_bswap (enum machine_mode mode, rtx op0, rtx target)
2705 enum mode_class class = GET_MODE_CLASS (mode);
2706 enum machine_mode wider_mode;
2709 if (!CLASS_HAS_WIDER_MODES_P (class))
2712 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2713 wider_mode != VOIDmode;
2714 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2715 if (optab_handler (bswap_optab, wider_mode)->insn_code != CODE_FOR_nothing)
2720 last = get_last_insn ();
2722 x = widen_operand (op0, wider_mode, mode, true, true);
2723 x = expand_unop (wider_mode, bswap_optab, x, NULL_RTX, true);
2726 x = expand_shift (RSHIFT_EXPR, wider_mode, x,
2727 size_int (GET_MODE_BITSIZE (wider_mode)
2728 - GET_MODE_BITSIZE (mode)),
2734 target = gen_reg_rtx (mode);
2735 emit_move_insn (target, gen_lowpart (mode, x));
2738 delete_insns_since (last);
2743 /* Try calculating bswap as two bswaps of two word-sized operands. */
2746 expand_doubleword_bswap (enum machine_mode mode, rtx op, rtx target)
2750 t1 = expand_unop (word_mode, bswap_optab,
2751 operand_subword_force (op, 0, mode), NULL_RTX, true);
2752 t0 = expand_unop (word_mode, bswap_optab,
2753 operand_subword_force (op, 1, mode), NULL_RTX, true);
2756 target = gen_reg_rtx (mode);
2758 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
2759 emit_move_insn (operand_subword (target, 0, 1, mode), t0);
2760 emit_move_insn (operand_subword (target, 1, 1, mode), t1);
2765 /* Try calculating (parity x) as (and (popcount x) 1), where
2766 popcount can also be done in a wider mode. */
2768 expand_parity (enum machine_mode mode, rtx op0, rtx target)
2770 enum mode_class class = GET_MODE_CLASS (mode);
2771 if (CLASS_HAS_WIDER_MODES_P (class))
2773 enum machine_mode wider_mode;
2774 for (wider_mode = mode; wider_mode != VOIDmode;
2775 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2777 if (optab_handler (popcount_optab, wider_mode)->insn_code
2778 != CODE_FOR_nothing)
2780 rtx xop0, temp, last;
2782 last = get_last_insn ();
2785 target = gen_reg_rtx (mode);
2786 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2787 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2790 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2791 target, true, OPTAB_DIRECT);
2793 delete_insns_since (last);
2802 /* Try calculating ctz(x) as K - clz(x & -x) ,
2803 where K is GET_MODE_BITSIZE(mode) - 1.
2805 Both __builtin_ctz and __builtin_clz are undefined at zero, so we
2806 don't have to worry about what the hardware does in that case. (If
2807 the clz instruction produces the usual value at 0, which is K, the
2808 result of this code sequence will be -1; expand_ffs, below, relies
2809 on this. It might be nice to have it be K instead, for consistency
2810 with the (very few) processors that provide a ctz with a defined
2811 value, but that would take one more instruction, and it would be
2812 less convenient for expand_ffs anyway. */
2815 expand_ctz (enum machine_mode mode, rtx op0, rtx target)
2819 if (optab_handler (clz_optab, mode)->insn_code == CODE_FOR_nothing)
2824 temp = expand_unop_direct (mode, neg_optab, op0, NULL_RTX, true);
2826 temp = expand_binop (mode, and_optab, op0, temp, NULL_RTX,
2827 true, OPTAB_DIRECT);
2829 temp = expand_unop_direct (mode, clz_optab, temp, NULL_RTX, true);
2831 temp = expand_binop (mode, sub_optab, GEN_INT (GET_MODE_BITSIZE (mode) - 1),
2833 true, OPTAB_DIRECT);
2843 add_equal_note (seq, temp, CTZ, op0, 0);
2849 /* Try calculating ffs(x) using ctz(x) if we have that instruction, or
2850 else with the sequence used by expand_clz.
2852 The ffs builtin promises to return zero for a zero value and ctz/clz
2853 may have an undefined value in that case. If they do not give us a
2854 convenient value, we have to generate a test and branch. */
2856 expand_ffs (enum machine_mode mode, rtx op0, rtx target)
2858 HOST_WIDE_INT val = 0;
2859 bool defined_at_zero = false;
2862 if (optab_handler (ctz_optab, mode)->insn_code != CODE_FOR_nothing)
2866 temp = expand_unop_direct (mode, ctz_optab, op0, 0, true);
2870 defined_at_zero = (CTZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2);
2872 else if (optab_handler (clz_optab, mode)->insn_code != CODE_FOR_nothing)
2875 temp = expand_ctz (mode, op0, 0);
2879 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2)
2881 defined_at_zero = true;
2882 val = (GET_MODE_BITSIZE (mode) - 1) - val;
2888 if (defined_at_zero && val == -1)
2889 /* No correction needed at zero. */;
2892 /* We don't try to do anything clever with the situation found
2893 on some processors (eg Alpha) where ctz(0:mode) ==
2894 bitsize(mode). If someone can think of a way to send N to -1
2895 and leave alone all values in the range 0..N-1 (where N is a
2896 power of two), cheaper than this test-and-branch, please add it.
2898 The test-and-branch is done after the operation itself, in case
2899 the operation sets condition codes that can be recycled for this.
2900 (This is true on i386, for instance.) */
2902 rtx nonzero_label = gen_label_rtx ();
2903 emit_cmp_and_jump_insns (op0, CONST0_RTX (mode), NE, 0,
2904 mode, true, nonzero_label);
2906 convert_move (temp, GEN_INT (-1), false);
2907 emit_label (nonzero_label);
2910 /* temp now has a value in the range -1..bitsize-1. ffs is supposed
2911 to produce a value in the range 0..bitsize. */
2912 temp = expand_binop (mode, add_optab, temp, GEN_INT (1),
2913 target, false, OPTAB_DIRECT);
2920 add_equal_note (seq, temp, FFS, op0, 0);
2929 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2930 conditions, VAL may already be a SUBREG against which we cannot generate
2931 a further SUBREG. In this case, we expect forcing the value into a
2932 register will work around the situation. */
2935 lowpart_subreg_maybe_copy (enum machine_mode omode, rtx val,
2936 enum machine_mode imode)
2939 ret = lowpart_subreg (omode, val, imode);
2942 val = force_reg (imode, val);
2943 ret = lowpart_subreg (omode, val, imode);
2944 gcc_assert (ret != NULL);
2949 /* Expand a floating point absolute value or negation operation via a
2950 logical operation on the sign bit. */
2953 expand_absneg_bit (enum rtx_code code, enum machine_mode mode,
2954 rtx op0, rtx target)
2956 const struct real_format *fmt;
2957 int bitpos, word, nwords, i;
2958 enum machine_mode imode;
2959 HOST_WIDE_INT hi, lo;
2962 /* The format has to have a simple sign bit. */
2963 fmt = REAL_MODE_FORMAT (mode);
2967 bitpos = fmt->signbit_rw;
2971 /* Don't create negative zeros if the format doesn't support them. */
2972 if (code == NEG && !fmt->has_signed_zero)
2975 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2977 imode = int_mode_for_mode (mode);
2978 if (imode == BLKmode)
2987 if (FLOAT_WORDS_BIG_ENDIAN)
2988 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2990 word = bitpos / BITS_PER_WORD;
2991 bitpos = bitpos % BITS_PER_WORD;
2992 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2995 if (bitpos < HOST_BITS_PER_WIDE_INT)
2998 lo = (HOST_WIDE_INT) 1 << bitpos;
3002 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
3008 if (target == 0 || target == op0)
3009 target = gen_reg_rtx (mode);
3015 for (i = 0; i < nwords; ++i)
3017 rtx targ_piece = operand_subword (target, i, 1, mode);
3018 rtx op0_piece = operand_subword_force (op0, i, mode);
3022 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
3024 immed_double_const (lo, hi, imode),
3025 targ_piece, 1, OPTAB_LIB_WIDEN);
3026 if (temp != targ_piece)
3027 emit_move_insn (targ_piece, temp);
3030 emit_move_insn (targ_piece, op0_piece);
3033 insns = get_insns ();
3040 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
3041 gen_lowpart (imode, op0),
3042 immed_double_const (lo, hi, imode),
3043 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3044 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3046 set_unique_reg_note (get_last_insn (), REG_EQUAL,
3047 gen_rtx_fmt_e (code, mode, copy_rtx (op0)));
3053 /* As expand_unop, but will fail rather than attempt the operation in a
3054 different mode or with a libcall. */
3056 expand_unop_direct (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
3059 if (optab_handler (unoptab, mode)->insn_code != CODE_FOR_nothing)
3061 int icode = (int) optab_handler (unoptab, mode)->insn_code;
3062 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
3064 rtx last = get_last_insn ();
3070 temp = gen_reg_rtx (mode);
3072 if (GET_MODE (xop0) != VOIDmode
3073 && GET_MODE (xop0) != mode0)
3074 xop0 = convert_to_mode (mode0, xop0, unsignedp);
3076 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
3078 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
3079 xop0 = copy_to_mode_reg (mode0, xop0);
3081 if (!insn_data[icode].operand[0].predicate (temp, mode))
3082 temp = gen_reg_rtx (mode);
3084 pat = GEN_FCN (icode) (temp, xop0);
3087 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
3088 && ! add_equal_note (pat, temp, unoptab->code, xop0, NULL_RTX))
3090 delete_insns_since (last);
3091 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
3099 delete_insns_since (last);
3104 /* Generate code to perform an operation specified by UNOPTAB
3105 on operand OP0, with result having machine-mode MODE.
3107 UNSIGNEDP is for the case where we have to widen the operands
3108 to perform the operation. It says to use zero-extension.
3110 If TARGET is nonzero, the value
3111 is generated there, if it is convenient to do so.
3112 In all cases an rtx is returned for the locus of the value;
3113 this may or may not be TARGET. */
3116 expand_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
3119 enum mode_class class = GET_MODE_CLASS (mode);
3120 enum machine_mode wider_mode;
3124 temp = expand_unop_direct (mode, unoptab, op0, target, unsignedp);
3128 /* It can't be done in this mode. Can we open-code it in a wider mode? */
3130 /* Widening (or narrowing) clz needs special treatment. */
3131 if (unoptab == clz_optab)
3133 temp = widen_clz (mode, op0, target);
3137 if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
3138 && optab_handler (unoptab, word_mode)->insn_code != CODE_FOR_nothing)
3140 temp = expand_doubleword_clz (mode, op0, target);
3148 /* Widening (or narrowing) bswap needs special treatment. */
3149 if (unoptab == bswap_optab)
3151 temp = widen_bswap (mode, op0, target);
3155 if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
3156 && optab_handler (unoptab, word_mode)->insn_code != CODE_FOR_nothing)
3158 temp = expand_doubleword_bswap (mode, op0, target);
3166 if (CLASS_HAS_WIDER_MODES_P (class))
3167 for (wider_mode = GET_MODE_WIDER_MODE (mode);
3168 wider_mode != VOIDmode;
3169 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
3171 if (optab_handler (unoptab, wider_mode)->insn_code != CODE_FOR_nothing)
3174 rtx last = get_last_insn ();
3176 /* For certain operations, we need not actually extend
3177 the narrow operand, as long as we will truncate the
3178 results to the same narrowness. */
3180 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
3181 (unoptab == neg_optab
3182 || unoptab == one_cmpl_optab)
3183 && class == MODE_INT);
3185 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
3190 if (class != MODE_INT
3191 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
3192 GET_MODE_BITSIZE (wider_mode)))
3195 target = gen_reg_rtx (mode);
3196 convert_move (target, temp, 0);
3200 return gen_lowpart (mode, temp);
3203 delete_insns_since (last);
3207 /* These can be done a word at a time. */
3208 if (unoptab == one_cmpl_optab
3209 && class == MODE_INT
3210 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
3211 && optab_handler (unoptab, word_mode)->insn_code != CODE_FOR_nothing)
3216 if (target == 0 || target == op0)
3217 target = gen_reg_rtx (mode);
3221 /* Do the actual arithmetic. */
3222 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
3224 rtx target_piece = operand_subword (target, i, 1, mode);
3225 rtx x = expand_unop (word_mode, unoptab,
3226 operand_subword_force (op0, i, mode),
3227 target_piece, unsignedp);
3229 if (target_piece != x)
3230 emit_move_insn (target_piece, x);
3233 insns = get_insns ();
3240 if (unoptab->code == NEG)
3242 /* Try negating floating point values by flipping the sign bit. */
3243 if (SCALAR_FLOAT_MODE_P (mode))
3245 temp = expand_absneg_bit (NEG, mode, op0, target);
3250 /* If there is no negation pattern, and we have no negative zero,
3251 try subtracting from zero. */
3252 if (!HONOR_SIGNED_ZEROS (mode))
3254 temp = expand_binop (mode, (unoptab == negv_optab
3255 ? subv_optab : sub_optab),
3256 CONST0_RTX (mode), op0, target,
3257 unsignedp, OPTAB_DIRECT);
3263 /* Try calculating parity (x) as popcount (x) % 2. */
3264 if (unoptab == parity_optab)
3266 temp = expand_parity (mode, op0, target);
3271 /* Try implementing ffs (x) in terms of clz (x). */
3272 if (unoptab == ffs_optab)
3274 temp = expand_ffs (mode, op0, target);
3279 /* Try implementing ctz (x) in terms of clz (x). */
3280 if (unoptab == ctz_optab)
3282 temp = expand_ctz (mode, op0, target);
3288 /* Now try a library call in this mode. */
3289 libfunc = optab_libfunc (unoptab, mode);
3295 enum machine_mode outmode = mode;
3297 /* All of these functions return small values. Thus we choose to
3298 have them return something that isn't a double-word. */
3299 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
3300 || unoptab == popcount_optab || unoptab == parity_optab)
3302 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node)));
3306 /* Pass 1 for NO_QUEUE so we don't lose any increments
3307 if the libcall is cse'd or moved. */
3308 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, outmode,
3310 insns = get_insns ();
3313 target = gen_reg_rtx (outmode);
3314 eq_value = gen_rtx_fmt_e (unoptab->code, mode, op0);
3315 if (GET_MODE_SIZE (outmode) < GET_MODE_SIZE (mode))
3316 eq_value = simplify_gen_unary (TRUNCATE, outmode, eq_value, mode);
3317 else if (GET_MODE_SIZE (outmode) > GET_MODE_SIZE (mode))
3318 eq_value = simplify_gen_unary (ZERO_EXTEND, outmode, eq_value, mode);
3319 emit_libcall_block (insns, target, value, eq_value);
3324 /* It can't be done in this mode. Can we do it in a wider mode? */
3326 if (CLASS_HAS_WIDER_MODES_P (class))
3328 for (wider_mode = GET_MODE_WIDER_MODE (mode);
3329 wider_mode != VOIDmode;
3330 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
3332 if ((optab_handler (unoptab, wider_mode)->insn_code
3333 != CODE_FOR_nothing)
3334 || optab_libfunc (unoptab, wider_mode))
3337 rtx last = get_last_insn ();
3339 /* For certain operations, we need not actually extend
3340 the narrow operand, as long as we will truncate the
3341 results to the same narrowness. */
3343 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
3344 (unoptab == neg_optab
3345 || unoptab == one_cmpl_optab)
3346 && class == MODE_INT);
3348 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
3351 /* If we are generating clz using wider mode, adjust the
3353 if (unoptab == clz_optab && temp != 0)
3354 temp = expand_binop (wider_mode, sub_optab, temp,
3355 GEN_INT (GET_MODE_BITSIZE (wider_mode)
3356 - GET_MODE_BITSIZE (mode)),
3357 target, true, OPTAB_DIRECT);
3361 if (class != MODE_INT)
3364 target = gen_reg_rtx (mode);
3365 convert_move (target, temp, 0);
3369 return gen_lowpart (mode, temp);
3372 delete_insns_since (last);
3377 /* One final attempt at implementing negation via subtraction,
3378 this time allowing widening of the operand. */
3379 if (unoptab->code == NEG && !HONOR_SIGNED_ZEROS (mode))
3382 temp = expand_binop (mode,
3383 unoptab == negv_optab ? subv_optab : sub_optab,
3384 CONST0_RTX (mode), op0,
3385 target, unsignedp, OPTAB_LIB_WIDEN);
3393 /* Emit code to compute the absolute value of OP0, with result to
3394 TARGET if convenient. (TARGET may be 0.) The return value says
3395 where the result actually is to be found.
3397 MODE is the mode of the operand; the mode of the result is
3398 different but can be deduced from MODE.
3403 expand_abs_nojump (enum machine_mode mode, rtx op0, rtx target,
3404 int result_unsignedp)
3409 result_unsignedp = 1;
3411 /* First try to do it with a special abs instruction. */
3412 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
3417 /* For floating point modes, try clearing the sign bit. */
3418 if (SCALAR_FLOAT_MODE_P (mode))
3420 temp = expand_absneg_bit (ABS, mode, op0, target);
3425 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3426 if (optab_handler (smax_optab, mode)->insn_code != CODE_FOR_nothing
3427 && !HONOR_SIGNED_ZEROS (mode))
3429 rtx last = get_last_insn ();
3431 temp = expand_unop (mode, neg_optab, op0, NULL_RTX, 0);
3433 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3439 delete_insns_since (last);
3442 /* If this machine has expensive jumps, we can do integer absolute
3443 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3444 where W is the width of MODE. */
3446 if (GET_MODE_CLASS (mode) == MODE_INT && BRANCH_COST >= 2)
3448 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
3449 size_int (GET_MODE_BITSIZE (mode) - 1),
3452 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
3455 temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab,
3456 temp, extended, target, 0, OPTAB_LIB_WIDEN);
3466 expand_abs (enum machine_mode mode, rtx op0, rtx target,
3467 int result_unsignedp, int safe)
3472 result_unsignedp = 1;
3474 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
3478 /* If that does not win, use conditional jump and negate. */
3480 /* It is safe to use the target if it is the same
3481 as the source if this is also a pseudo register */
3482 if (op0 == target && REG_P (op0)
3483 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
3486 op1 = gen_label_rtx ();
3487 if (target == 0 || ! safe
3488 || GET_MODE (target) != mode
3489 || (MEM_P (target) && MEM_VOLATILE_P (target))
3491 && REGNO (target) < FIRST_PSEUDO_REGISTER))
3492 target = gen_reg_rtx (mode);
3494 emit_move_insn (target, op0);
3497 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
3498 NULL_RTX, NULL_RTX, op1);
3500 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3503 emit_move_insn (target, op0);
3509 /* A subroutine of expand_copysign, perform the copysign operation using the
3510 abs and neg primitives advertised to exist on the target. The assumption
3511 is that we have a split register file, and leaving op0 in fp registers,
3512 and not playing with subregs so much, will help the register allocator. */
3515 expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3516 int bitpos, bool op0_is_abs)
3518 enum machine_mode imode;
3525 /* Check if the back end provides an insn that handles signbit for the
3527 icode = (int) signbit_optab->handlers [(int) mode].insn_code;
3528 if (icode != CODE_FOR_nothing)
3530 imode = insn_data[icode].operand[0].mode;
3531 sign = gen_reg_rtx (imode);
3532 emit_unop_insn (icode, sign, op1, UNKNOWN);
3536 HOST_WIDE_INT hi, lo;
3538 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3540 imode = int_mode_for_mode (mode);
3541 if (imode == BLKmode)
3543 op1 = gen_lowpart (imode, op1);
3550 if (FLOAT_WORDS_BIG_ENDIAN)
3551 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3553 word = bitpos / BITS_PER_WORD;
3554 bitpos = bitpos % BITS_PER_WORD;
3555 op1 = operand_subword_force (op1, word, mode);
3558 if (bitpos < HOST_BITS_PER_WIDE_INT)
3561 lo = (HOST_WIDE_INT) 1 << bitpos;
3565 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
3569 sign = gen_reg_rtx (imode);
3570 sign = expand_binop (imode, and_optab, op1,
3571 immed_double_const (lo, hi, imode),
3572 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3577 op0 = expand_unop (mode, abs_optab, op0, target, 0);
3584 if (target == NULL_RTX)
3585 target = copy_to_reg (op0);
3587 emit_move_insn (target, op0);
3590 label = gen_label_rtx ();
3591 emit_cmp_and_jump_insns (sign, const0_rtx, EQ, NULL_RTX, imode, 1, label);
3593 if (GET_CODE (op0) == CONST_DOUBLE)
3594 op0 = simplify_unary_operation (NEG, mode, op0, mode);
3596 op0 = expand_unop (mode, neg_optab, op0, target, 0);
3598 emit_move_insn (target, op0);
3606 /* A subroutine of expand_copysign, perform the entire copysign operation
3607 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3608 is true if op0 is known to have its sign bit clear. */
3611 expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3612 int bitpos, bool op0_is_abs)
3614 enum machine_mode imode;
3615 HOST_WIDE_INT hi, lo;
3616 int word, nwords, i;
3619 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3621 imode = int_mode_for_mode (mode);
3622 if (imode == BLKmode)
3631 if (FLOAT_WORDS_BIG_ENDIAN)
3632 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3634 word = bitpos / BITS_PER_WORD;
3635 bitpos = bitpos % BITS_PER_WORD;
3636 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
3639 if (bitpos < HOST_BITS_PER_WIDE_INT)
3642 lo = (HOST_WIDE_INT) 1 << bitpos;
3646 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
3650 if (target == 0 || target == op0 || target == op1)
3651 target = gen_reg_rtx (mode);
3657 for (i = 0; i < nwords; ++i)
3659 rtx targ_piece = operand_subword (target, i, 1, mode);
3660 rtx op0_piece = operand_subword_force (op0, i, mode);
3665 op0_piece = expand_binop (imode, and_optab, op0_piece,
3666 immed_double_const (~lo, ~hi, imode),
3667 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3669 op1 = expand_binop (imode, and_optab,
3670 operand_subword_force (op1, i, mode),
3671 immed_double_const (lo, hi, imode),
3672 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3674 temp = expand_binop (imode, ior_optab, op0_piece, op1,
3675 targ_piece, 1, OPTAB_LIB_WIDEN);
3676 if (temp != targ_piece)
3677 emit_move_insn (targ_piece, temp);
3680 emit_move_insn (targ_piece, op0_piece);
3683 insns = get_insns ();
3690 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
3691 immed_double_const (lo, hi, imode),
3692 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3694 op0 = gen_lowpart (imode, op0);
3696 op0 = expand_binop (imode, and_optab, op0,
3697 immed_double_const (~lo, ~hi, imode),
3698 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3700 temp = expand_binop (imode, ior_optab, op0, op1,
3701 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3702 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3708 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3709 scalar floating point mode. Return NULL if we do not know how to
3710 expand the operation inline. */
3713 expand_copysign (rtx op0, rtx op1, rtx target)
3715 enum machine_mode mode = GET_MODE (op0);
3716 const struct real_format *fmt;
3720 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3721 gcc_assert (GET_MODE (op1) == mode);
3723 /* First try to do it with a special instruction. */
3724 temp = expand_binop (mode, copysign_optab, op0, op1,
3725 target, 0, OPTAB_DIRECT);
3729 fmt = REAL_MODE_FORMAT (mode);
3730 if (fmt == NULL || !fmt->has_signed_zero)
3734 if (GET_CODE (op0) == CONST_DOUBLE)
3736 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
3737 op0 = simplify_unary_operation (ABS, mode, op0, mode);
3741 if (fmt->signbit_ro >= 0
3742 && (GET_CODE (op0) == CONST_DOUBLE
3743 || (optab_handler (neg_optab, mode)->insn_code != CODE_FOR_nothing
3744 && optab_handler (abs_optab, mode)->insn_code != CODE_FOR_nothing)))
3746 temp = expand_copysign_absneg (mode, op0, op1, target,
3747 fmt->signbit_ro, op0_is_abs);
3752 if (fmt->signbit_rw < 0)
3754 return expand_copysign_bit (mode, op0, op1, target,
3755 fmt->signbit_rw, op0_is_abs);
3758 /* Generate an instruction whose insn-code is INSN_CODE,
3759 with two operands: an output TARGET and an input OP0.
3760 TARGET *must* be nonzero, and the output is always stored there.
3761 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3762 the value that is stored into TARGET. */
3765 emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code)
3768 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
3773 /* Now, if insn does not accept our operands, put them into pseudos. */
3775 if (!insn_data[icode].operand[1].predicate (op0, mode0))
3776 op0 = copy_to_mode_reg (mode0, op0);
3778 if (!insn_data[icode].operand[0].predicate (temp, GET_MODE (temp)))
3779 temp = gen_reg_rtx (GET_MODE (temp));
3781 pat = GEN_FCN (icode) (temp, op0);
3783 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && code != UNKNOWN)
3784 add_equal_note (pat, temp, code, op0, NULL_RTX);
3789 emit_move_insn (target, temp);
3792 struct no_conflict_data
3794 rtx target, first, insn;
3798 /* Called via note_stores by emit_libcall_block. Set P->must_stay if
3799 the currently examined clobber / store has to stay in the list of
3800 insns that constitute the actual libcall block. */
3802 no_conflict_move_test (rtx dest, const_rtx set, void *p0)
3804 struct no_conflict_data *p= p0;
3806 /* If this inns directly contributes to setting the target, it must stay. */
3807 if (reg_overlap_mentioned_p (p->target, dest))
3808 p->must_stay = true;
3809 /* If we haven't committed to keeping any other insns in the list yet,
3810 there is nothing more to check. */
3811 else if (p->insn == p->first)
3813 /* If this insn sets / clobbers a register that feeds one of the insns
3814 already in the list, this insn has to stay too. */
3815 else if (reg_overlap_mentioned_p (dest, PATTERN (p->first))
3816 || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest)))
3817 || reg_used_between_p (dest, p->first, p->insn)
3818 /* Likewise if this insn depends on a register set by a previous
3819 insn in the list, or if it sets a result (presumably a hard
3820 register) that is set or clobbered by a previous insn.
3821 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3822 SET_DEST perform the former check on the address, and the latter
3823 check on the MEM. */
3824 || (GET_CODE (set) == SET
3825 && (modified_in_p (SET_SRC (set), p->first)
3826 || modified_in_p (SET_DEST (set), p->first)
3827 || modified_between_p (SET_SRC (set), p->first, p->insn)
3828 || modified_between_p (SET_DEST (set), p->first, p->insn))))
3829 p->must_stay = true;
3832 /* Encapsulate the block starting at FIRST and ending with LAST, which is
3833 logically equivalent to EQUIV, so it gets manipulated as a unit if it
3834 is possible to do so. */
3837 maybe_encapsulate_block (rtx first, rtx last, rtx equiv)
3839 if (!flag_non_call_exceptions || !may_trap_p (equiv))
3841 /* We can't attach the REG_LIBCALL and REG_RETVAL notes when the
3842 encapsulated region would not be in one basic block, i.e. when
3843 there is a control_flow_insn_p insn between FIRST and LAST. */
3844 bool attach_libcall_retval_notes = true;
3845 rtx insn, next = NEXT_INSN (last);
3847 for (insn = first; insn != next; insn = NEXT_INSN (insn))
3848 if (control_flow_insn_p (insn))
3850 attach_libcall_retval_notes = false;
3854 if (attach_libcall_retval_notes)
3856 REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
3858 REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first,
3865 /* Emit code to make a call to a constant function or a library call.
3867 INSNS is a list containing all insns emitted in the call.
3868 These insns leave the result in RESULT. Our block is to copy RESULT
3869 to TARGET, which is logically equivalent to EQUIV.
3871 We first emit any insns that set a pseudo on the assumption that these are
3872 loading constants into registers; doing so allows them to be safely cse'ed
3873 between blocks. Then we emit all the other insns in the block, followed by
3874 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3875 note with an operand of EQUIV.
3877 Moving assignments to pseudos outside of the block is done to improve
3878 the generated code, but is not required to generate correct code,
3879 hence being unable to move an assignment is not grounds for not making
3880 a libcall block. There are two reasons why it is safe to leave these
3881 insns inside the block: First, we know that these pseudos cannot be
3882 used in generated RTL outside the block since they are created for
3883 temporary purposes within the block. Second, CSE will not record the
3884 values of anything set inside a libcall block, so we know they must
3885 be dead at the end of the block.
3887 Except for the first group of insns (the ones setting pseudos), the
3888 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3890 emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv)
3892 rtx final_dest = target;
3893 rtx prev, next, first, last, insn;
3895 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3896 into a MEM later. Protect the libcall block from this change. */
3897 if (! REG_P (target) || REG_USERVAR_P (target))
3898 target = gen_reg_rtx (GET_MODE (target));
3900 /* If we're using non-call exceptions, a libcall corresponding to an
3901 operation that may trap may also trap. */
3902 if (flag_non_call_exceptions && may_trap_p (equiv))
3904 for (insn = insns; insn; insn = NEXT_INSN (insn))
3907 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3909 if (note != 0 && INTVAL (XEXP (note, 0)) <= 0)
3910 remove_note (insn, note);
3914 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3915 reg note to indicate that this call cannot throw or execute a nonlocal
3916 goto (unless there is already a REG_EH_REGION note, in which case
3918 for (insn = insns; insn; insn = NEXT_INSN (insn))
3921 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3924 XEXP (note, 0) = constm1_rtx;
3926 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx,
3930 /* First emit all insns that set pseudos. Remove them from the list as
3931 we go. Avoid insns that set pseudos which were referenced in previous
3932 insns. These can be generated by move_by_pieces, for example,
3933 to update an address. Similarly, avoid insns that reference things
3934 set in previous insns. */
3936 for (insn = insns; insn; insn = next)
3938 rtx set = single_set (insn);
3941 /* Some ports (cris) create a libcall regions at their own. We must
3942 avoid any potential nesting of LIBCALLs. */
3943 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3944 remove_note (insn, note);
3945 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3946 remove_note (insn, note);
3948 next = NEXT_INSN (insn);
3950 if (set != 0 && REG_P (SET_DEST (set))
3951 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3953 struct no_conflict_data data;
3955 data.target = const0_rtx;
3959 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3960 if (! data.must_stay)
3962 if (PREV_INSN (insn))
3963 NEXT_INSN (PREV_INSN (insn)) = next;
3968 PREV_INSN (next) = PREV_INSN (insn);
3974 /* Some ports use a loop to copy large arguments onto the stack.
3975 Don't move anything outside such a loop. */
3980 prev = get_last_insn ();
3982 /* Write the remaining insns followed by the final copy. */
3984 for (insn = insns; insn; insn = next)
3986 next = NEXT_INSN (insn);
3991 last = emit_move_insn (target, result);
3992 if (optab_handler (mov_optab, GET_MODE (target))->insn_code
3993 != CODE_FOR_nothing)
3994 set_unique_reg_note (last, REG_EQUAL, copy_rtx (equiv));
3997 /* Remove any existing REG_EQUAL note from "last", or else it will
3998 be mistaken for a note referring to the full contents of the
3999 libcall value when found together with the REG_RETVAL note added
4000 below. An existing note can come from an insn expansion at
4002 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
4005 if (final_dest != target)
4006 emit_move_insn (final_dest, target);
4009 first = get_insns ();
4011 first = NEXT_INSN (prev);
4013 maybe_encapsulate_block (first, last, equiv);
4016 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
4017 PURPOSE describes how this comparison will be used. CODE is the rtx
4018 comparison code we will be using.
4020 ??? Actually, CODE is slightly weaker than that. A target is still
4021 required to implement all of the normal bcc operations, but not
4022 required to implement all (or any) of the unordered bcc operations. */
4025 can_compare_p (enum rtx_code code, enum machine_mode mode,
4026 enum can_compare_purpose purpose)
4030 if (optab_handler (cmp_optab, mode)->insn_code != CODE_FOR_nothing)
4032 if (purpose == ccp_jump)
4033 return bcc_gen_fctn[(int) code] != NULL;
4034 else if (purpose == ccp_store_flag)
4035 return setcc_gen_code[(int) code] != CODE_FOR_nothing;
4037 /* There's only one cmov entry point, and it's allowed to fail. */
4040 if (purpose == ccp_jump
4041 && optab_handler (cbranch_optab, mode)->insn_code != CODE_FOR_nothing)
4043 if (purpose == ccp_cmov
4044 && optab_handler (cmov_optab, mode)->insn_code != CODE_FOR_nothing)
4046 if (purpose == ccp_store_flag
4047 && optab_handler (cstore_optab, mode)->insn_code != CODE_FOR_nothing)
4049 mode = GET_MODE_WIDER_MODE (mode);
4051 while (mode != VOIDmode);
4056 /* This function is called when we are going to emit a compare instruction that
4057 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
4059 *PMODE is the mode of the inputs (in case they are const_int).
4060 *PUNSIGNEDP nonzero says that the operands are unsigned;
4061 this matters if they need to be widened.
4063 If they have mode BLKmode, then SIZE specifies the size of both operands.
4065 This function performs all the setup necessary so that the caller only has
4066 to emit a single comparison insn. This setup can involve doing a BLKmode
4067 comparison or emitting a library call to perform the comparison if no insn
4068 is available to handle it.
4069 The values which are passed in through pointers can be modified; the caller
4070 should perform the comparison on the modified values. Constant
4071 comparisons must have already been folded. */
4074 prepare_cmp_insn (rtx *px, rtx *py, enum rtx_code *pcomparison, rtx size,
4075 enum machine_mode *pmode, int *punsignedp,
4076 enum can_compare_purpose purpose)
4078 enum machine_mode mode = *pmode;
4079 rtx x = *px, y = *py;
4080 int unsignedp = *punsignedp;
4083 /* If we are inside an appropriately-short loop and we are optimizing,
4084 force expensive constants into a register. */
4085 if (CONSTANT_P (x) && optimize
4086 && rtx_cost (x, COMPARE) > COSTS_N_INSNS (1))
4087 x = force_reg (mode, x);
4089 if (CONSTANT_P (y) && optimize
4090 && rtx_cost (y, COMPARE) > COSTS_N_INSNS (1))
4091 y = force_reg (mode, y);
4094 /* Make sure if we have a canonical comparison. The RTL
4095 documentation states that canonical comparisons are required only
4096 for targets which have cc0. */
4097 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
4100 /* Don't let both operands fail to indicate the mode. */
4101 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
4102 x = force_reg (mode, x);
4104 /* Handle all BLKmode compares. */
4106 if (mode == BLKmode)
4108 enum machine_mode cmp_mode, result_mode;
4109 enum insn_code cmp_code;
4114 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
4118 /* Try to use a memory block compare insn - either cmpstr
4119 or cmpmem will do. */
4120 for (cmp_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
4121 cmp_mode != VOIDmode;
4122 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode))
4124 cmp_code = cmpmem_optab[cmp_mode];
4125 if (cmp_code == CODE_FOR_nothing)
4126 cmp_code = cmpstr_optab[cmp_mode];
4127 if (cmp_code == CODE_FOR_nothing)
4128 cmp_code = cmpstrn_optab[cmp_mode];
4129 if (cmp_code == CODE_FOR_nothing)
4132 /* Must make sure the size fits the insn's mode. */
4133 if ((GET_CODE (size) == CONST_INT
4134 && INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode)))
4135 || (GET_MODE_BITSIZE (GET_MODE (size))
4136 > GET_MODE_BITSIZE (cmp_mode)))
4139 result_mode = insn_data[cmp_code].operand[0].mode;
4140 result = gen_reg_rtx (result_mode);
4141 size = convert_to_mode (cmp_mode, size, 1);
4142 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
4146 *pmode = result_mode;
4150 /* Otherwise call a library function, memcmp. */
4151 libfunc = memcmp_libfunc;
4152 length_type = sizetype;
4153 result_mode = TYPE_MODE (integer_type_node);
4154 cmp_mode = TYPE_MODE (length_type);
4155 size = convert_to_mode (TYPE_MODE (length_type), size,
4156 TYPE_UNSIGNED (length_type));
4158 result = emit_library_call_value (libfunc, 0, LCT_PURE,
4165 *pmode = result_mode;
4169 /* Don't allow operands to the compare to trap, as that can put the
4170 compare and branch in different basic blocks. */
4171 if (flag_non_call_exceptions)
4174 x = force_reg (mode, x);
4176 y = force_reg (mode, y);
4181 if (can_compare_p (*pcomparison, mode, purpose))
4184 /* Handle a lib call just for the mode we are using. */
4186 libfunc = optab_libfunc (cmp_optab, mode);
4187 if (libfunc && !SCALAR_FLOAT_MODE_P (mode))
4191 /* If we want unsigned, and this mode has a distinct unsigned
4192 comparison routine, use that. */
4195 rtx ulibfunc = optab_libfunc (ucmp_optab, mode);
4200 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4201 targetm.libgcc_cmp_return_mode (),
4202 2, x, mode, y, mode);
4204 /* There are two kinds of comparison routines. Biased routines
4205 return 0/1/2, and unbiased routines return -1/0/1. Other parts
4206 of gcc expect that the comparison operation is equivalent
4207 to the modified comparison. For signed comparisons compare the
4208 result against 1 in the biased case, and zero in the unbiased
4209 case. For unsigned comparisons always compare against 1 after
4210 biasing the unbiased result by adding 1. This gives us a way to
4216 if (!TARGET_LIB_INT_CMP_BIASED)
4219 *px = plus_constant (result, 1);
4226 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
4227 prepare_float_lib_cmp (px, py, pcomparison, pmode, punsignedp);
4230 /* Before emitting an insn with code ICODE, make sure that X, which is going
4231 to be used for operand OPNUM of the insn, is converted from mode MODE to
4232 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
4233 that it is accepted by the operand predicate. Return the new value. */
4236 prepare_operand (int icode, rtx x, int opnum, enum machine_mode mode,
4237 enum machine_mode wider_mode, int unsignedp)
4239 if (mode != wider_mode)
4240 x = convert_modes (wider_mode, mode, x, unsignedp);
4242 if (!insn_data[icode].operand[opnum].predicate
4243 (x, insn_data[icode].operand[opnum].mode))
4245 if (reload_completed)
4247 x = copy_to_mode_reg (insn_data[icode].operand[opnum].mode, x);
4253 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
4254 we can do the comparison.
4255 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
4256 be NULL_RTX which indicates that only a comparison is to be generated. */
4259 emit_cmp_and_jump_insn_1 (rtx x, rtx y, enum machine_mode mode,
4260 enum rtx_code comparison, int unsignedp, rtx label)
4262 rtx test = gen_rtx_fmt_ee (comparison, mode, x, y);
4263 enum mode_class class = GET_MODE_CLASS (mode);
4264 enum machine_mode wider_mode = mode;
4266 /* Try combined insns first. */
4269 enum insn_code icode;
4270 PUT_MODE (test, wider_mode);
4274 icode = optab_handler (cbranch_optab, wider_mode)->insn_code;
4276 if (icode != CODE_FOR_nothing
4277 && insn_data[icode].operand[0].predicate (test, wider_mode))
4279 x = prepare_operand (icode, x, 1, mode, wider_mode, unsignedp);
4280 y = prepare_operand (icode, y, 2, mode, wider_mode, unsignedp);
4281 emit_jump_insn (GEN_FCN (icode) (test, x, y, label));
4286 /* Handle some compares against zero. */
4287 icode = (int) optab_handler (tst_optab, wider_mode)->insn_code;
4288 if (y == CONST0_RTX (mode) && icode != CODE_FOR_nothing)
4290 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
4291 emit_insn (GEN_FCN (icode) (x));
4293 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
4297 /* Handle compares for which there is a directly suitable insn. */
4299 icode = (int) optab_handler (cmp_optab, wider_mode)->insn_code;
4300 if (icode != CODE_FOR_nothing)
4302 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
4303 y = prepare_operand (icode, y, 1, mode, wider_mode, unsignedp);
4304 emit_insn (GEN_FCN (icode) (x, y));
4306 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
4310 if (!CLASS_HAS_WIDER_MODES_P (class))
4313 wider_mode = GET_MODE_WIDER_MODE (wider_mode);
4315 while (wider_mode != VOIDmode);
4320 /* Generate code to compare X with Y so that the condition codes are
4321 set and to jump to LABEL if the condition is true. If X is a
4322 constant and Y is not a constant, then the comparison is swapped to
4323 ensure that the comparison RTL has the canonical form.
4325 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
4326 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
4327 the proper branch condition code.
4329 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4331 MODE is the mode of the inputs (in case they are const_int).
4333 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
4334 be passed unchanged to emit_cmp_insn, then potentially converted into an
4335 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
4338 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
4339 enum machine_mode mode, int unsignedp, rtx label)
4341 rtx op0 = x, op1 = y;
4343 /* Swap operands and condition to ensure canonical RTL. */
4344 if (swap_commutative_operands_p (x, y))
4346 /* If we're not emitting a branch, callers are required to pass
4347 operands in an order conforming to canonical RTL. We relax this
4348 for commutative comparisons so callers using EQ don't need to do
4349 swapping by hand. */
4350 gcc_assert (label || (comparison == swap_condition (comparison)));
4353 comparison = swap_condition (comparison);
4357 /* If OP0 is still a constant, then both X and Y must be constants.
4358 Force X into a register to create canonical RTL. */
4359 if (CONSTANT_P (op0))
4360 op0 = force_reg (mode, op0);
4364 comparison = unsigned_condition (comparison);
4366 prepare_cmp_insn (&op0, &op1, &comparison, size, &mode, &unsignedp,
4368 emit_cmp_and_jump_insn_1 (op0, op1, mode, comparison, unsignedp, label);
4371 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
4374 emit_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
4375 enum machine_mode mode, int unsignedp)
4377 emit_cmp_and_jump_insns (x, y, comparison, size, mode, unsignedp, 0);
4380 /* Emit a library call comparison between floating point X and Y.
4381 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4384 prepare_float_lib_cmp (rtx *px, rtx *py, enum rtx_code *pcomparison,
4385 enum machine_mode *pmode, int *punsignedp)
4387 enum rtx_code comparison = *pcomparison;
4388 enum rtx_code swapped = swap_condition (comparison);
4389 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
4392 enum machine_mode orig_mode = GET_MODE (x);
4393 enum machine_mode mode, cmp_mode;
4394 rtx value, target, insns, equiv;
4396 bool reversed_p = false;
4397 cmp_mode = targetm.libgcc_cmp_return_mode ();
4399 for (mode = orig_mode;
4401 mode = GET_MODE_WIDER_MODE (mode))
4403 if ((libfunc = optab_libfunc (code_to_optab[comparison], mode)))
4406 if ((libfunc = optab_libfunc (code_to_optab[swapped] , mode)))
4409 tmp = x; x = y; y = tmp;
4410 comparison = swapped;
4414 if ((libfunc = optab_libfunc (code_to_optab[reversed], mode))
4415 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, reversed))
4417 comparison = reversed;
4423 gcc_assert (mode != VOIDmode);
4425 if (mode != orig_mode)
4427 x = convert_to_mode (mode, x, 0);
4428 y = convert_to_mode (mode, y, 0);
4431 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4432 the RTL. The allows the RTL optimizers to delete the libcall if the
4433 condition can be determined at compile-time. */
4434 if (comparison == UNORDERED)
4436 rtx temp = simplify_gen_relational (NE, cmp_mode, mode, x, x);
4437 equiv = simplify_gen_relational (NE, cmp_mode, mode, y, y);
4438 equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4439 temp, const_true_rtx, equiv);
4443 equiv = simplify_gen_relational (comparison, cmp_mode, mode, x, y);
4444 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4446 rtx true_rtx, false_rtx;
4451 true_rtx = const0_rtx;
4452 false_rtx = const_true_rtx;
4456 true_rtx = const_true_rtx;
4457 false_rtx = const0_rtx;
4461 true_rtx = const1_rtx;
4462 false_rtx = const0_rtx;
4466 true_rtx = const0_rtx;
4467 false_rtx = constm1_rtx;
4471 true_rtx = constm1_rtx;
4472 false_rtx = const0_rtx;
4476 true_rtx = const0_rtx;
4477 false_rtx = const1_rtx;
4483 equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4484 equiv, true_rtx, false_rtx);
4489 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4490 cmp_mode, 2, x, mode, y, mode);
4491 insns = get_insns ();
4494 target = gen_reg_rtx (cmp_mode);
4495 emit_libcall_block (insns, target, value, equiv);
4497 if (comparison == UNORDERED
4498 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4499 comparison = reversed_p ? EQ : NE;
4504 *pcomparison = comparison;
4508 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4511 emit_indirect_jump (rtx loc)
4513 if (!insn_data[(int) CODE_FOR_indirect_jump].operand[0].predicate
4515 loc = copy_to_mode_reg (Pmode, loc);
4517 emit_jump_insn (gen_indirect_jump (loc));
4521 #ifdef HAVE_conditional_move
4523 /* Emit a conditional move instruction if the machine supports one for that
4524 condition and machine mode.
4526 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4527 the mode to use should they be constants. If it is VOIDmode, they cannot
4530 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4531 should be stored there. MODE is the mode to use should they be constants.
4532 If it is VOIDmode, they cannot both be constants.
4534 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4535 is not supported. */
4538 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
4539 enum machine_mode cmode, rtx op2, rtx op3,
4540 enum machine_mode mode, int unsignedp)
4542 rtx tem, subtarget, comparison, insn;
4543 enum insn_code icode;
4544 enum rtx_code reversed;
4546 /* If one operand is constant, make it the second one. Only do this
4547 if the other operand is not constant as well. */
4549 if (swap_commutative_operands_p (op0, op1))
4554 code = swap_condition (code);
4557 /* get_condition will prefer to generate LT and GT even if the old
4558 comparison was against zero, so undo that canonicalization here since
4559 comparisons against zero are cheaper. */
4560 if (code == LT && op1 == const1_rtx)
4561 code = LE, op1 = const0_rtx;
4562 else if (code == GT && op1 == constm1_rtx)
4563 code = GE, op1 = const0_rtx;
4565 if (cmode == VOIDmode)
4566 cmode = GET_MODE (op0);
4568 if (swap_commutative_operands_p (op2, op3)
4569 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4578 if (mode == VOIDmode)
4579 mode = GET_MODE (op2);
4581 icode = movcc_gen_code[mode];
4583 if (icode == CODE_FOR_nothing)
4587 target = gen_reg_rtx (mode);
4591 /* If the insn doesn't accept these operands, put them in pseudos. */
4593 if (!insn_data[icode].operand[0].predicate
4594 (subtarget, insn_data[icode].operand[0].mode))
4595 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4597 if (!insn_data[icode].operand[2].predicate
4598 (op2, insn_data[icode].operand[2].mode))
4599 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4601 if (!insn_data[icode].operand[3].predicate
4602 (op3, insn_data[icode].operand[3].mode))
4603 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4605 /* Everything should now be in the suitable form, so emit the compare insn
4606 and then the conditional move. */
4609 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4611 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4612 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4613 return NULL and let the caller figure out how best to deal with this
4615 if (GET_CODE (comparison) != code)
4618 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4620 /* If that failed, then give up. */
4626 if (subtarget != target)
4627 convert_move (target, subtarget, 0);
4632 /* Return nonzero if a conditional move of mode MODE is supported.
4634 This function is for combine so it can tell whether an insn that looks
4635 like a conditional move is actually supported by the hardware. If we
4636 guess wrong we lose a bit on optimization, but that's it. */
4637 /* ??? sparc64 supports conditionally moving integers values based on fp
4638 comparisons, and vice versa. How do we handle them? */
4641 can_conditionally_move_p (enum machine_mode mode)
4643 if (movcc_gen_code[mode] != CODE_FOR_nothing)
4649 #endif /* HAVE_conditional_move */
4651 /* Emit a conditional addition instruction if the machine supports one for that
4652 condition and machine mode.
4654 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4655 the mode to use should they be constants. If it is VOIDmode, they cannot
4658 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
4659 should be stored there. MODE is the mode to use should they be constants.
4660 If it is VOIDmode, they cannot both be constants.
4662 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4663 is not supported. */
4666 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
4667 enum machine_mode cmode, rtx op2, rtx op3,
4668 enum machine_mode mode, int unsignedp)
4670 rtx tem, subtarget, comparison, insn;
4671 enum insn_code icode;
4672 enum rtx_code reversed;
4674 /* If one operand is constant, make it the second one. Only do this
4675 if the other operand is not constant as well. */
4677 if (swap_commutative_operands_p (op0, op1))
4682 code = swap_condition (code);
4685 /* get_condition will prefer to generate LT and GT even if the old
4686 comparison was against zero, so undo that canonicalization here since
4687 comparisons against zero are cheaper. */
4688 if (code == LT && op1 == const1_rtx)
4689 code = LE, op1 = const0_rtx;
4690 else if (code == GT && op1 == constm1_rtx)
4691 code = GE, op1 = const0_rtx;
4693 if (cmode == VOIDmode)
4694 cmode = GET_MODE (op0);
4696 if (swap_commutative_operands_p (op2, op3)
4697 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4706 if (mode == VOIDmode)
4707 mode = GET_MODE (op2);
4709 icode = optab_handler (addcc_optab, mode)->insn_code;
4711 if (icode == CODE_FOR_nothing)
4715 target = gen_reg_rtx (mode);
4717 /* If the insn doesn't accept these operands, put them in pseudos. */
4719 if (!insn_data[icode].operand[0].predicate
4720 (target, insn_data[icode].operand[0].mode))
4721 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4725 if (!insn_data[icode].operand[2].predicate
4726 (op2, insn_data[icode].operand[2].mode))
4727 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4729 if (!insn_data[icode].operand[3].predicate
4730 (op3, insn_data[icode].operand[3].mode))
4731 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4733 /* Everything should now be in the suitable form, so emit the compare insn
4734 and then the conditional move. */
4737 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4739 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4740 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4741 return NULL and let the caller figure out how best to deal with this
4743 if (GET_CODE (comparison) != code)
4746 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4748 /* If that failed, then give up. */
4754 if (subtarget != target)
4755 convert_move (target, subtarget, 0);
4760 /* These functions attempt to generate an insn body, rather than
4761 emitting the insn, but if the gen function already emits them, we
4762 make no attempt to turn them back into naked patterns. */
4764 /* Generate and return an insn body to add Y to X. */
4767 gen_add2_insn (rtx x, rtx y)
4769 int icode = (int) optab_handler (add_optab, GET_MODE (x))->insn_code;
4771 gcc_assert (insn_data[icode].operand[0].predicate
4772 (x, insn_data[icode].operand[0].mode));
4773 gcc_assert (insn_data[icode].operand[1].predicate
4774 (x, insn_data[icode].operand[1].mode));
4775 gcc_assert (insn_data[icode].operand[2].predicate
4776 (y, insn_data[icode].operand[2].mode));
4778 return GEN_FCN (icode) (x, x, y);
4781 /* Generate and return an insn body to add r1 and c,
4782 storing the result in r0. */
4785 gen_add3_insn (rtx r0, rtx r1, rtx c)
4787 int icode = (int) optab_handler (add_optab, GET_MODE (r0))->insn_code;
4789 if (icode == CODE_FOR_nothing
4790 || !(insn_data[icode].operand[0].predicate
4791 (r0, insn_data[icode].operand[0].mode))
4792 || !(insn_data[icode].operand[1].predicate
4793 (r1, insn_data[icode].operand[1].mode))
4794 || !(insn_data[icode].operand[2].predicate
4795 (c, insn_data[icode].operand[2].mode)))
4798 return GEN_FCN (icode) (r0, r1, c);
4802 have_add2_insn (rtx x, rtx y)
4806 gcc_assert (GET_MODE (x) != VOIDmode);
4808 icode = (int) optab_handler (add_optab, GET_MODE (x))->insn_code;
4810 if (icode == CODE_FOR_nothing)
4813 if (!(insn_data[icode].operand[0].predicate
4814 (x, insn_data[icode].operand[0].mode))
4815 || !(insn_data[icode].operand[1].predicate
4816 (x, insn_data[icode].operand[1].mode))
4817 || !(insn_data[icode].operand[2].predicate
4818 (y, insn_data[icode].operand[2].mode)))
4824 /* Generate and return an insn body to subtract Y from X. */
4827 gen_sub2_insn (rtx x, rtx y)
4829 int icode = (int) optab_handler (sub_optab, GET_MODE (x))->insn_code;
4831 gcc_assert (insn_data[icode].operand[0].predicate
4832 (x, insn_data[icode].operand[0].mode));
4833 gcc_assert (insn_data[icode].operand[1].predicate
4834 (x, insn_data[icode].operand[1].mode));
4835 gcc_assert (insn_data[icode].operand[2].predicate
4836 (y, insn_data[icode].operand[2].mode));
4838 return GEN_FCN (icode) (x, x, y);
4841 /* Generate and return an insn body to subtract r1 and c,
4842 storing the result in r0. */
4845 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4847 int icode = (int) optab_handler (sub_optab, GET_MODE (r0))->insn_code;
4849 if (icode == CODE_FOR_nothing
4850 || !(insn_data[icode].operand[0].predicate
4851 (r0, insn_data[icode].operand[0].mode))
4852 || !(insn_data[icode].operand[1].predicate
4853 (r1, insn_data[icode].operand[1].mode))
4854 || !(insn_data[icode].operand[2].predicate
4855 (c, insn_data[icode].operand[2].mode)))
4858 return GEN_FCN (icode) (r0, r1, c);
4862 have_sub2_insn (rtx x, rtx y)
4866 gcc_assert (GET_MODE (x) != VOIDmode);
4868 icode = (int) optab_handler (sub_optab, GET_MODE (x))->insn_code;
4870 if (icode == CODE_FOR_nothing)
4873 if (!(insn_data[icode].operand[0].predicate
4874 (x, insn_data[icode].operand[0].mode))
4875 || !(insn_data[icode].operand[1].predicate
4876 (x, insn_data[icode].operand[1].mode))
4877 || !(insn_data[icode].operand[2].predicate
4878 (y, insn_data[icode].operand[2].mode)))
4884 /* Generate the body of an instruction to copy Y into X.
4885 It may be a list of insns, if one insn isn't enough. */
4888 gen_move_insn (rtx x, rtx y)
4893 emit_move_insn_1 (x, y);
4899 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4900 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4901 no such operation exists, CODE_FOR_nothing will be returned. */
4904 can_extend_p (enum machine_mode to_mode, enum machine_mode from_mode,
4908 #ifdef HAVE_ptr_extend
4910 return CODE_FOR_ptr_extend;
4913 tab = unsignedp ? zext_optab : sext_optab;
4914 return convert_optab_handler (tab, to_mode, from_mode)->insn_code;
4917 /* Generate the body of an insn to extend Y (with mode MFROM)
4918 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4921 gen_extend_insn (rtx x, rtx y, enum machine_mode mto,
4922 enum machine_mode mfrom, int unsignedp)
4924 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
4925 return GEN_FCN (icode) (x, y);
4928 /* can_fix_p and can_float_p say whether the target machine
4929 can directly convert a given fixed point type to
4930 a given floating point type, or vice versa.
4931 The returned value is the CODE_FOR_... value to use,
4932 or CODE_FOR_nothing if these modes cannot be directly converted.
4934 *TRUNCP_PTR is set to 1 if it is necessary to output
4935 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4937 static enum insn_code
4938 can_fix_p (enum machine_mode fixmode, enum machine_mode fltmode,
4939 int unsignedp, int *truncp_ptr)
4942 enum insn_code icode;
4944 tab = unsignedp ? ufixtrunc_optab : sfixtrunc_optab;
4945 icode = convert_optab_handler (tab, fixmode, fltmode)->insn_code;
4946 if (icode != CODE_FOR_nothing)
4952 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4953 for this to work. We need to rework the fix* and ftrunc* patterns
4954 and documentation. */
4955 tab = unsignedp ? ufix_optab : sfix_optab;
4956 icode = convert_optab_handler (tab, fixmode, fltmode)->insn_code;
4957 if (icode != CODE_FOR_nothing
4958 && optab_handler (ftrunc_optab, fltmode)->insn_code != CODE_FOR_nothing)
4965 return CODE_FOR_nothing;
4968 static enum insn_code
4969 can_float_p (enum machine_mode fltmode, enum machine_mode fixmode,
4974 tab = unsignedp ? ufloat_optab : sfloat_optab;
4975 return convert_optab_handler (tab, fltmode, fixmode)->insn_code;
4978 /* Generate code to convert FROM to floating point
4979 and store in TO. FROM must be fixed point and not VOIDmode.
4980 UNSIGNEDP nonzero means regard FROM as unsigned.
4981 Normally this is done by correcting the final value
4982 if it is negative. */
4985 expand_float (rtx to, rtx from, int unsignedp)
4987 enum insn_code icode;
4989 enum machine_mode fmode, imode;
4990 bool can_do_signed = false;
4992 /* Crash now, because we won't be able to decide which mode to use. */
4993 gcc_assert (GET_MODE (from) != VOIDmode);
4995 /* Look for an insn to do the conversion. Do it in the specified
4996 modes if possible; otherwise convert either input, output or both to
4997 wider mode. If the integer mode is wider than the mode of FROM,
4998 we can do the conversion signed even if the input is unsigned. */
5000 for (fmode = GET_MODE (to); fmode != VOIDmode;
5001 fmode = GET_MODE_WIDER_MODE (fmode))
5002 for (imode = GET_MODE (from); imode != VOIDmode;
5003 imode = GET_MODE_WIDER_MODE (imode))
5005 int doing_unsigned = unsignedp;
5007 if (fmode != GET_MODE (to)
5008 && significand_size (fmode) < GET_MODE_BITSIZE (GET_MODE (from)))
5011 icode = can_float_p (fmode, imode, unsignedp);
5012 if (icode == CODE_FOR_nothing && unsignedp)
5014 enum insn_code scode = can_float_p (fmode, imode, 0);
5015 if (scode != CODE_FOR_nothing)
5016 can_do_signed = true;
5017 if (imode != GET_MODE (from))
5018 icode = scode, doing_unsigned = 0;
5021 if (icode != CODE_FOR_nothing)
5023 if (imode != GET_MODE (from))
5024 from = convert_to_mode (imode, from, unsignedp);
5026 if (fmode != GET_MODE (to))
5027 target = gen_reg_rtx (fmode);
5029 emit_unop_insn (icode, target, from,
5030 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
5033 convert_move (to, target, 0);
5038 /* Unsigned integer, and no way to convert directly. Convert as signed,
5039 then unconditionally adjust the result. */
5040 if (unsignedp && can_do_signed)
5042 rtx label = gen_label_rtx ();
5044 REAL_VALUE_TYPE offset;
5046 /* Look for a usable floating mode FMODE wider than the source and at
5047 least as wide as the target. Using FMODE will avoid rounding woes
5048 with unsigned values greater than the signed maximum value. */
5050 for (fmode = GET_MODE (to); fmode != VOIDmode;
5051 fmode = GET_MODE_WIDER_MODE (fmode))
5052 if (GET_MODE_BITSIZE (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
5053 && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
5056 if (fmode == VOIDmode)
5058 /* There is no such mode. Pretend the target is wide enough. */
5059 fmode = GET_MODE (to);
5061 /* Avoid double-rounding when TO is narrower than FROM. */
5062 if ((significand_size (fmode) + 1)
5063 < GET_MODE_BITSIZE (GET_MODE (from)))
5066 rtx neglabel = gen_label_rtx ();
5068 /* Don't use TARGET if it isn't a register, is a hard register,
5069 or is the wrong mode. */
5071 || REGNO (target) < FIRST_PSEUDO_REGISTER
5072 || GET_MODE (target) != fmode)
5073 target = gen_reg_rtx (fmode);
5075 imode = GET_MODE (from);
5076 do_pending_stack_adjust ();
5078 /* Test whether the sign bit is set. */
5079 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
5082 /* The sign bit is not set. Convert as signed. */
5083 expand_float (target, from, 0);
5084 emit_jump_insn (gen_jump (label));
5087 /* The sign bit is set.
5088 Convert to a usable (positive signed) value by shifting right
5089 one bit, while remembering if a nonzero bit was shifted
5090 out; i.e., compute (from & 1) | (from >> 1). */
5092 emit_label (neglabel);
5093 temp = expand_binop (imode, and_optab, from, const1_rtx,
5094 NULL_RTX, 1, OPTAB_LIB_WIDEN);
5095 temp1 = expand_shift (RSHIFT_EXPR, imode, from, integer_one_node,
5097 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
5099 expand_float (target, temp, 0);
5101 /* Multiply by 2 to undo the shift above. */
5102 temp = expand_binop (fmode, add_optab, target, target,
5103 target, 0, OPTAB_LIB_WIDEN);
5105 emit_move_insn (target, temp);
5107 do_pending_stack_adjust ();
5113 /* If we are about to do some arithmetic to correct for an
5114 unsigned operand, do it in a pseudo-register. */
5116 if (GET_MODE (to) != fmode
5117 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
5118 target = gen_reg_rtx (fmode);
5120 /* Convert as signed integer to floating. */
5121 expand_float (target, from, 0);
5123 /* If FROM is negative (and therefore TO is negative),
5124 correct its value by 2**bitwidth. */
5126 do_pending_stack_adjust ();
5127 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from),
5131 real_2expN (&offset, GET_MODE_BITSIZE (GET_MODE (from)), fmode);
5132 temp = expand_binop (fmode, add_optab, target,
5133 CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode),
5134 target, 0, OPTAB_LIB_WIDEN);
5136 emit_move_insn (target, temp);
5138 do_pending_stack_adjust ();
5143 /* No hardware instruction available; call a library routine. */
5148 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
5150 if (GET_MODE_SIZE (GET_MODE (from)) < GET_MODE_SIZE (SImode))
5151 from = convert_to_mode (SImode, from, unsignedp);
5153 libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
5154 gcc_assert (libfunc);
5158 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
5159 GET_MODE (to), 1, from,
5161 insns = get_insns ();
5164 emit_libcall_block (insns, target, value,
5165 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FLOAT : FLOAT,
5166 GET_MODE (to), from));
5171 /* Copy result to requested destination
5172 if we have been computing in a temp location. */
5176 if (GET_MODE (target) == GET_MODE (to))
5177 emit_move_insn (to, target);
5179 convert_move (to, target, 0);
5183 /* Generate code to convert FROM to fixed point and store in TO. FROM
5184 must be floating point. */
5187 expand_fix (rtx to, rtx from, int unsignedp)
5189 enum insn_code icode;
5191 enum machine_mode fmode, imode;
5194 /* We first try to find a pair of modes, one real and one integer, at
5195 least as wide as FROM and TO, respectively, in which we can open-code
5196 this conversion. If the integer mode is wider than the mode of TO,
5197 we can do the conversion either signed or unsigned. */
5199 for (fmode = GET_MODE (from); fmode != VOIDmode;
5200 fmode = GET_MODE_WIDER_MODE (fmode))
5201 for (imode = GET_MODE (to); imode != VOIDmode;
5202 imode = GET_MODE_WIDER_MODE (imode))
5204 int doing_unsigned = unsignedp;
5206 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
5207 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
5208 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
5210 if (icode != CODE_FOR_nothing)
5212 if (fmode != GET_MODE (from))
5213 from = convert_to_mode (fmode, from, 0);
5217 rtx temp = gen_reg_rtx (GET_MODE (from));
5218 from = expand_unop (GET_MODE (from), ftrunc_optab, from,
5222 if (imode != GET_MODE (to))
5223 target = gen_reg_rtx (imode);
5225 emit_unop_insn (icode, target, from,
5226 doing_unsigned ? UNSIGNED_FIX : FIX);
5228 convert_move (to, target, unsignedp);
5233 /* For an unsigned conversion, there is one more way to do it.
5234 If we have a signed conversion, we generate code that compares
5235 the real value to the largest representable positive number. If if
5236 is smaller, the conversion is done normally. Otherwise, subtract
5237 one plus the highest signed number, convert, and add it back.
5239 We only need to check all real modes, since we know we didn't find
5240 anything with a wider integer mode.
5242 This code used to extend FP value into mode wider than the destination.
5243 This is needed for decimal float modes which cannot accurately
5244 represent one plus the highest signed number of the same size, but
5245 not for binary modes. Consider, for instance conversion from SFmode
5248 The hot path through the code is dealing with inputs smaller than 2^63
5249 and doing just the conversion, so there is no bits to lose.
5251 In the other path we know the value is positive in the range 2^63..2^64-1
5252 inclusive. (as for other input overflow happens and result is undefined)
5253 So we know that the most important bit set in mantissa corresponds to
5254 2^63. The subtraction of 2^63 should not generate any rounding as it
5255 simply clears out that bit. The rest is trivial. */
5257 if (unsignedp && GET_MODE_BITSIZE (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
5258 for (fmode = GET_MODE (from); fmode != VOIDmode;
5259 fmode = GET_MODE_WIDER_MODE (fmode))
5260 if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0, &must_trunc)
5261 && (!DECIMAL_FLOAT_MODE_P (fmode)
5262 || GET_MODE_BITSIZE (fmode) > GET_MODE_BITSIZE (GET_MODE (to))))
5265 REAL_VALUE_TYPE offset;
5266 rtx limit, lab1, lab2, insn;
5268 bitsize = GET_MODE_BITSIZE (GET_MODE (to));
5269 real_2expN (&offset, bitsize - 1, fmode);
5270 limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode);
5271 lab1 = gen_label_rtx ();
5272 lab2 = gen_label_rtx ();
5274 if (fmode != GET_MODE (from))
5275 from = convert_to_mode (fmode, from, 0);
5277 /* See if we need to do the subtraction. */
5278 do_pending_stack_adjust ();
5279 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from),
5282 /* If not, do the signed "fix" and branch around fixup code. */
5283 expand_fix (to, from, 0);
5284 emit_jump_insn (gen_jump (lab2));
5287 /* Otherwise, subtract 2**(N-1), convert to signed number,
5288 then add 2**(N-1). Do the addition using XOR since this
5289 will often generate better code. */
5291 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
5292 NULL_RTX, 0, OPTAB_LIB_WIDEN);
5293 expand_fix (to, target, 0);
5294 target = expand_binop (GET_MODE (to), xor_optab, to,
5296 ((HOST_WIDE_INT) 1 << (bitsize - 1),
5298 to, 1, OPTAB_LIB_WIDEN);
5301 emit_move_insn (to, target);
5305 if (optab_handler (mov_optab, GET_MODE (to))->insn_code
5306 != CODE_FOR_nothing)
5308 /* Make a place for a REG_NOTE and add it. */
5309 insn = emit_move_insn (to, to);
5310 set_unique_reg_note (insn,
5312 gen_rtx_fmt_e (UNSIGNED_FIX,
5320 /* We can't do it with an insn, so use a library call. But first ensure
5321 that the mode of TO is at least as wide as SImode, since those are the
5322 only library calls we know about. */
5324 if (GET_MODE_SIZE (GET_MODE (to)) < GET_MODE_SIZE (SImode))
5326 target = gen_reg_rtx (SImode);
5328 expand_fix (target, from, unsignedp);
5336 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
5337 libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
5338 gcc_assert (libfunc);
5342 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
5343 GET_MODE (to), 1, from,
5345 insns = get_insns ();
5348 emit_libcall_block (insns, target, value,
5349 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
5350 GET_MODE (to), from));
5355 if (GET_MODE (to) == GET_MODE (target))
5356 emit_move_insn (to, target);
5358 convert_move (to, target, 0);
5362 /* Generate code to convert FROM or TO a fixed-point.
5363 If UINTP is true, either TO or FROM is an unsigned integer.
5364 If SATP is true, we need to saturate the result. */
5367 expand_fixed_convert (rtx to, rtx from, int uintp, int satp)
5369 enum machine_mode to_mode = GET_MODE (to);
5370 enum machine_mode from_mode = GET_MODE (from);
5372 enum rtx_code this_code;
5373 enum insn_code code;
5377 if (to_mode == from_mode)
5379 emit_move_insn (to, from);
5385 tab = satp ? satfractuns_optab : fractuns_optab;
5386 this_code = satp ? UNSIGNED_SAT_FRACT : UNSIGNED_FRACT_CONVERT;
5390 tab = satp ? satfract_optab : fract_optab;
5391 this_code = satp ? SAT_FRACT : FRACT_CONVERT;
5393 code = tab->handlers[to_mode][from_mode].insn_code;
5394 if (code != CODE_FOR_nothing)
5396 emit_unop_insn (code, to, from, this_code);
5400 libfunc = convert_optab_libfunc (tab, to_mode, from_mode);
5401 gcc_assert (libfunc);
5404 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, to_mode,
5405 1, from, from_mode);
5406 insns = get_insns ();
5409 emit_libcall_block (insns, to, value,
5410 gen_rtx_fmt_e (tab->code, to_mode, from));
5413 /* Generate code to convert FROM to fixed point and store in TO. FROM
5414 must be floating point, TO must be signed. Use the conversion optab
5415 TAB to do the conversion. */
5418 expand_sfix_optab (rtx to, rtx from, convert_optab tab)
5420 enum insn_code icode;
5422 enum machine_mode fmode, imode;
5424 /* We first try to find a pair of modes, one real and one integer, at
5425 least as wide as FROM and TO, respectively, in which we can open-code
5426 this conversion. If the integer mode is wider than the mode of TO,
5427 we can do the conversion either signed or unsigned. */
5429 for (fmode = GET_MODE (from); fmode != VOIDmode;
5430 fmode = GET_MODE_WIDER_MODE (fmode))
5431 for (imode = GET_MODE (to); imode != VOIDmode;
5432 imode = GET_MODE_WIDER_MODE (imode))
5434 icode = convert_optab_handler (tab, imode, fmode)->insn_code;
5435 if (icode != CODE_FOR_nothing)
5437 if (fmode != GET_MODE (from))
5438 from = convert_to_mode (fmode, from, 0);
5440 if (imode != GET_MODE (to))
5441 target = gen_reg_rtx (imode);
5443 emit_unop_insn (icode, target, from, UNKNOWN);
5445 convert_move (to, target, 0);
5453 /* Report whether we have an instruction to perform the operation
5454 specified by CODE on operands of mode MODE. */
5456 have_insn_for (enum rtx_code code, enum machine_mode mode)
5458 return (code_to_optab[(int) code] != 0
5459 && (optab_handler (code_to_optab[(int) code], mode)->insn_code
5460 != CODE_FOR_nothing));
5463 /* Set all insn_code fields to CODE_FOR_nothing. */
5466 init_insn_codes (void)
5470 for (i = 0; i < (unsigned int) OTI_MAX; i++)
5475 op = &optab_table[i];
5476 for (j = 0; j < NUM_MACHINE_MODES; j++)
5477 optab_handler (op, j)->insn_code = CODE_FOR_nothing;
5479 for (i = 0; i < (unsigned int) COI_MAX; i++)
5484 op = &convert_optab_table[i];
5485 for (j = 0; j < NUM_MACHINE_MODES; j++)
5486 for (k = 0; k < NUM_MACHINE_MODES; k++)
5487 convert_optab_handler (op, j, k)->insn_code = CODE_FOR_nothing;
5491 /* Initialize OP's code to CODE, and write it into the code_to_optab table. */
5493 init_optab (optab op, enum rtx_code code)
5496 code_to_optab[(int) code] = op;
5499 /* Same, but fill in its code as CODE, and do _not_ write it into
5500 the code_to_optab table. */
5502 init_optabv (optab op, enum rtx_code code)
5507 /* Conversion optabs never go in the code_to_optab table. */
5509 init_convert_optab (convert_optab op, enum rtx_code code)
5514 /* Initialize the libfunc fields of an entire group of entries in some
5515 optab. Each entry is set equal to a string consisting of a leading
5516 pair of underscores followed by a generic operation name followed by
5517 a mode name (downshifted to lowercase) followed by a single character
5518 representing the number of operands for the given operation (which is
5519 usually one of the characters '2', '3', or '4').
5521 OPTABLE is the table in which libfunc fields are to be initialized.
5522 OPNAME is the generic (string) name of the operation.
5523 SUFFIX is the character which specifies the number of operands for
5524 the given generic operation.
5525 MODE is the mode to generate for.
5529 gen_libfunc (optab optable, const char *opname, int suffix, enum machine_mode mode)
5531 unsigned opname_len = strlen (opname);
5532 const char *mname = GET_MODE_NAME (mode);
5533 unsigned mname_len = strlen (mname);
5534 char *libfunc_name = alloca (2 + opname_len + mname_len + 1 + 1);
5541 for (q = opname; *q; )
5543 for (q = mname; *q; q++)
5544 *p++ = TOLOWER (*q);
5548 set_optab_libfunc (optable, mode,
5549 ggc_alloc_string (libfunc_name, p - libfunc_name));
5552 /* Like gen_libfunc, but verify that integer operation is involved. */
5555 gen_int_libfunc (optab optable, const char *opname, char suffix,
5556 enum machine_mode mode)
5558 int maxsize = 2 * BITS_PER_WORD;
5560 if (GET_MODE_CLASS (mode) != MODE_INT)
5562 if (maxsize < LONG_LONG_TYPE_SIZE)
5563 maxsize = LONG_LONG_TYPE_SIZE;
5564 if (GET_MODE_CLASS (mode) != MODE_INT
5565 || mode < word_mode || GET_MODE_BITSIZE (mode) > maxsize)
5567 gen_libfunc (optable, opname, suffix, mode);
5570 /* Like gen_libfunc, but verify that FP and set decimal prefix if needed. */
5573 gen_fp_libfunc (optab optable, const char *opname, char suffix,
5574 enum machine_mode mode)
5578 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5579 gen_libfunc (optable, opname, suffix, mode);
5580 if (DECIMAL_FLOAT_MODE_P (mode))
5582 dec_opname = alloca (sizeof (DECIMAL_PREFIX) + strlen (opname));
5583 /* For BID support, change the name to have either a bid_ or dpd_ prefix
5584 depending on the low level floating format used. */
5585 memcpy (dec_opname, DECIMAL_PREFIX, sizeof (DECIMAL_PREFIX) - 1);
5586 strcpy (dec_opname + sizeof (DECIMAL_PREFIX) - 1, opname);
5587 gen_libfunc (optable, dec_opname, suffix, mode);
5591 /* Like gen_libfunc, but verify that fixed-point operation is involved. */
5594 gen_fixed_libfunc (optab optable, const char *opname, char suffix,
5595 enum machine_mode mode)
5597 if (!ALL_FIXED_POINT_MODE_P (mode))
5599 gen_libfunc (optable, opname, suffix, mode);
5602 /* Like gen_libfunc, but verify that signed fixed-point operation is
5606 gen_signed_fixed_libfunc (optab optable, const char *opname, char suffix,
5607 enum machine_mode mode)
5609 if (!SIGNED_FIXED_POINT_MODE_P (mode))
5611 gen_libfunc (optable, opname, suffix, mode);
5614 /* Like gen_libfunc, but verify that unsigned fixed-point operation is
5618 gen_unsigned_fixed_libfunc (optab optable, const char *opname, char suffix,
5619 enum machine_mode mode)
5621 if (!UNSIGNED_FIXED_POINT_MODE_P (mode))
5623 gen_libfunc (optable, opname, suffix, mode);
5626 /* Like gen_libfunc, but verify that FP or INT operation is involved. */
5629 gen_int_fp_libfunc (optab optable, const char *name, char suffix,
5630 enum machine_mode mode)
5632 if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5633 gen_fp_libfunc (optable, name, suffix, mode);
5634 if (INTEGRAL_MODE_P (mode))
5635 gen_int_libfunc (optable, name, suffix, mode);
5638 /* Like gen_libfunc, but verify that FP or INT operation is involved
5639 and add 'v' suffix for integer operation. */
5642 gen_intv_fp_libfunc (optab optable, const char *name, char suffix,
5643 enum machine_mode mode)
5645 if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5646 gen_fp_libfunc (optable, name, suffix, mode);
5647 if (GET_MODE_CLASS (mode) == MODE_INT)
5649 int len = strlen (name);
5650 char *v_name = alloca (len + 2);
5651 strcpy (v_name, name);
5653 v_name[len + 1] = 0;
5654 gen_int_libfunc (optable, v_name, suffix, mode);
5658 /* Like gen_libfunc, but verify that FP or INT or FIXED operation is
5662 gen_int_fp_fixed_libfunc (optab optable, const char *name, char suffix,
5663 enum machine_mode mode)
5665 if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5666 gen_fp_libfunc (optable, name, suffix, mode);
5667 if (INTEGRAL_MODE_P (mode))
5668 gen_int_libfunc (optable, name, suffix, mode);
5669 if (ALL_FIXED_POINT_MODE_P (mode))
5670 gen_fixed_libfunc (optable, name, suffix, mode);
5673 /* Like gen_libfunc, but verify that FP or INT or signed FIXED operation is
5677 gen_int_fp_signed_fixed_libfunc (optab optable, const char *name, char suffix,
5678 enum machine_mode mode)
5680 if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5681 gen_fp_libfunc (optable, name, suffix, mode);
5682 if (INTEGRAL_MODE_P (mode))
5683 gen_int_libfunc (optable, name, suffix, mode);
5684 if (SIGNED_FIXED_POINT_MODE_P (mode))
5685 gen_signed_fixed_libfunc (optable, name, suffix, mode);
5688 /* Like gen_libfunc, but verify that INT or FIXED operation is
5692 gen_int_fixed_libfunc (optab optable, const char *name, char suffix,
5693 enum machine_mode mode)
5695 if (INTEGRAL_MODE_P (mode))
5696 gen_int_libfunc (optable, name, suffix, mode);
5697 if (ALL_FIXED_POINT_MODE_P (mode))
5698 gen_fixed_libfunc (optable, name, suffix, mode);
5701 /* Like gen_libfunc, but verify that INT or signed FIXED operation is
5705 gen_int_signed_fixed_libfunc (optab optable, const char *name, char suffix,
5706 enum machine_mode mode)
5708 if (INTEGRAL_MODE_P (mode))
5709 gen_int_libfunc (optable, name, suffix, mode);
5710 if (SIGNED_FIXED_POINT_MODE_P (mode))
5711 gen_signed_fixed_libfunc (optable, name, suffix, mode);
5714 /* Like gen_libfunc, but verify that INT or unsigned FIXED operation is
5718 gen_int_unsigned_fixed_libfunc (optab optable, const char *name, char suffix,
5719 enum machine_mode mode)
5721 if (INTEGRAL_MODE_P (mode))
5722 gen_int_libfunc (optable, name, suffix, mode);
5723 if (UNSIGNED_FIXED_POINT_MODE_P (mode))
5724 gen_unsigned_fixed_libfunc (optable, name, suffix, mode);
5727 /* Initialize the libfunc fields of an entire group of entries of an
5728 inter-mode-class conversion optab. The string formation rules are
5729 similar to the ones for init_libfuncs, above, but instead of having
5730 a mode name and an operand count these functions have two mode names
5731 and no operand count. */
5734 gen_interclass_conv_libfunc (convert_optab tab,
5736 enum machine_mode tmode,
5737 enum machine_mode fmode)
5739 size_t opname_len = strlen (opname);
5740 size_t mname_len = 0;
5742 const char *fname, *tname;
5744 char *libfunc_name, *suffix;
5745 char *nondec_name, *dec_name, *nondec_suffix, *dec_suffix;
5748 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5749 depends on which underlying decimal floating point format is used. */
5750 const size_t dec_len = sizeof (DECIMAL_PREFIX) - 1;
5752 mname_len = strlen (GET_MODE_NAME (tmode)) + strlen (GET_MODE_NAME (fmode));
5754 nondec_name = alloca (2 + opname_len + mname_len + 1 + 1);
5755 nondec_name[0] = '_';
5756 nondec_name[1] = '_';
5757 memcpy (&nondec_name[2], opname, opname_len);
5758 nondec_suffix = nondec_name + opname_len + 2;
5760 dec_name = alloca (2 + dec_len + opname_len + mname_len + 1 + 1);
5763 memcpy (&dec_name[2], DECIMAL_PREFIX, dec_len);
5764 memcpy (&dec_name[2+dec_len], opname, opname_len);
5765 dec_suffix = dec_name + dec_len + opname_len + 2;
5767 fname = GET_MODE_NAME (fmode);
5768 tname = GET_MODE_NAME (tmode);
5770 if (DECIMAL_FLOAT_MODE_P(fmode) || DECIMAL_FLOAT_MODE_P(tmode))
5772 libfunc_name = dec_name;
5773 suffix = dec_suffix;
5777 libfunc_name = nondec_name;
5778 suffix = nondec_suffix;
5782 for (q = fname; *q; p++, q++)
5784 for (q = tname; *q; p++, q++)
5789 set_conv_libfunc (tab, tmode, fmode,
5790 ggc_alloc_string (libfunc_name, p - libfunc_name));
5793 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5794 int->fp conversion. */
5797 gen_int_to_fp_conv_libfunc (convert_optab tab,
5799 enum machine_mode tmode,
5800 enum machine_mode fmode)
5802 if (GET_MODE_CLASS (fmode) != MODE_INT)
5804 if (GET_MODE_CLASS (tmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (tmode))
5806 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5809 /* ufloat_optab is special by using floatun for FP and floatuns decimal fp
5813 gen_ufloat_conv_libfunc (convert_optab tab,
5814 const char *opname ATTRIBUTE_UNUSED,
5815 enum machine_mode tmode,
5816 enum machine_mode fmode)
5818 if (DECIMAL_FLOAT_MODE_P (tmode))
5819 gen_int_to_fp_conv_libfunc (tab, "floatuns", tmode, fmode);
5821 gen_int_to_fp_conv_libfunc (tab, "floatun", tmode, fmode);
5824 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5825 fp->int conversion. */
5828 gen_int_to_fp_nondecimal_conv_libfunc (convert_optab tab,
5830 enum machine_mode tmode,
5831 enum machine_mode fmode)
5833 if (GET_MODE_CLASS (fmode) != MODE_INT)
5835 if (GET_MODE_CLASS (tmode) != MODE_FLOAT)
5837 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5840 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5841 fp->int conversion with no decimal floating point involved. */
5844 gen_fp_to_int_conv_libfunc (convert_optab tab,
5846 enum machine_mode tmode,
5847 enum machine_mode fmode)
5849 if (GET_MODE_CLASS (fmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (fmode))
5851 if (GET_MODE_CLASS (tmode) != MODE_INT)
5853 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5856 /* Initialize the libfunc fiels of an of an intra-mode-class conversion optab.
5857 The string formation rules are
5858 similar to the ones for init_libfunc, above. */
5861 gen_intraclass_conv_libfunc (convert_optab tab, const char *opname,
5862 enum machine_mode tmode, enum machine_mode fmode)
5864 size_t opname_len = strlen (opname);
5865 size_t mname_len = 0;
5867 const char *fname, *tname;
5869 char *nondec_name, *dec_name, *nondec_suffix, *dec_suffix;
5870 char *libfunc_name, *suffix;
5873 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5874 depends on which underlying decimal floating point format is used. */
5875 const size_t dec_len = sizeof (DECIMAL_PREFIX) - 1;
5877 mname_len = strlen (GET_MODE_NAME (tmode)) + strlen (GET_MODE_NAME (fmode));
5879 nondec_name = alloca (2 + opname_len + mname_len + 1 + 1);
5880 nondec_name[0] = '_';
5881 nondec_name[1] = '_';
5882 memcpy (&nondec_name[2], opname, opname_len);
5883 nondec_suffix = nondec_name + opname_len + 2;
5885 dec_name = alloca (2 + dec_len + opname_len + mname_len + 1 + 1);
5888 memcpy (&dec_name[2], DECIMAL_PREFIX, dec_len);
5889 memcpy (&dec_name[2 + dec_len], opname, opname_len);
5890 dec_suffix = dec_name + dec_len + opname_len + 2;
5892 fname = GET_MODE_NAME (fmode);
5893 tname = GET_MODE_NAME (tmode);
5895 if (DECIMAL_FLOAT_MODE_P(fmode) || DECIMAL_FLOAT_MODE_P(tmode))
5897 libfunc_name = dec_name;
5898 suffix = dec_suffix;
5902 libfunc_name = nondec_name;
5903 suffix = nondec_suffix;
5907 for (q = fname; *q; p++, q++)
5909 for (q = tname; *q; p++, q++)
5915 set_conv_libfunc (tab, tmode, fmode,
5916 ggc_alloc_string (libfunc_name, p - libfunc_name));
5919 /* Pick proper libcall for trunc_optab. We need to chose if we do
5920 truncation or extension and interclass or intraclass. */
5923 gen_trunc_conv_libfunc (convert_optab tab,
5925 enum machine_mode tmode,
5926 enum machine_mode fmode)
5928 if (GET_MODE_CLASS (tmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (tmode))
5930 if (GET_MODE_CLASS (fmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (fmode))
5935 if ((GET_MODE_CLASS (tmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (fmode))
5936 || (GET_MODE_CLASS (fmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (tmode)))
5937 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5939 if (GET_MODE_PRECISION (fmode) <= GET_MODE_PRECISION (tmode))
5942 if ((GET_MODE_CLASS (tmode) == MODE_FLOAT
5943 && GET_MODE_CLASS (fmode) == MODE_FLOAT)
5944 || (DECIMAL_FLOAT_MODE_P (fmode) && DECIMAL_FLOAT_MODE_P (tmode)))
5945 gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
5948 /* Pick proper libcall for extend_optab. We need to chose if we do
5949 truncation or extension and interclass or intraclass. */
5952 gen_extend_conv_libfunc (convert_optab tab,
5953 const char *opname ATTRIBUTE_UNUSED,
5954 enum machine_mode tmode,
5955 enum machine_mode fmode)
5957 if (GET_MODE_CLASS (tmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (tmode))
5959 if (GET_MODE_CLASS (fmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (fmode))
5964 if ((GET_MODE_CLASS (tmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (fmode))
5965 || (GET_MODE_CLASS (fmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (tmode)))
5966 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5968 if (GET_MODE_PRECISION (fmode) > GET_MODE_PRECISION (tmode))
5971 if ((GET_MODE_CLASS (tmode) == MODE_FLOAT
5972 && GET_MODE_CLASS (fmode) == MODE_FLOAT)
5973 || (DECIMAL_FLOAT_MODE_P (fmode) && DECIMAL_FLOAT_MODE_P (tmode)))
5974 gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
5977 /* Pick proper libcall for fract_optab. We need to chose if we do
5978 interclass or intraclass. */
5981 gen_fract_conv_libfunc (convert_optab tab,
5983 enum machine_mode tmode,
5984 enum machine_mode fmode)
5988 if (!(ALL_FIXED_POINT_MODE_P (tmode) || ALL_FIXED_POINT_MODE_P (fmode)))
5991 if (GET_MODE_CLASS (tmode) == GET_MODE_CLASS (fmode))
5992 gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
5994 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5997 /* Pick proper libcall for fractuns_optab. */
6000 gen_fractuns_conv_libfunc (convert_optab tab,
6002 enum machine_mode tmode,
6003 enum machine_mode fmode)
6007 /* One mode must be a fixed-point mode, and the other must be an integer
6009 if (!((ALL_FIXED_POINT_MODE_P (tmode) && GET_MODE_CLASS (fmode) == MODE_INT)
6010 || (ALL_FIXED_POINT_MODE_P (fmode)
6011 && GET_MODE_CLASS (tmode) == MODE_INT)))
6014 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
6017 /* Pick proper libcall for satfract_optab. We need to chose if we do
6018 interclass or intraclass. */
6021 gen_satfract_conv_libfunc (convert_optab tab,
6023 enum machine_mode tmode,
6024 enum machine_mode fmode)
6028 /* TMODE must be a fixed-point mode. */
6029 if (!ALL_FIXED_POINT_MODE_P (tmode))
6032 if (GET_MODE_CLASS (tmode) == GET_MODE_CLASS (fmode))
6033 gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
6035 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
6038 /* Pick proper libcall for satfractuns_optab. */
6041 gen_satfractuns_conv_libfunc (convert_optab tab,
6043 enum machine_mode tmode,
6044 enum machine_mode fmode)
6048 /* TMODE must be a fixed-point mode, and FMODE must be an integer mode. */
6049 if (!(ALL_FIXED_POINT_MODE_P (tmode) && GET_MODE_CLASS (fmode) == MODE_INT))
6052 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
6056 init_one_libfunc (const char *name)
6060 /* Create a FUNCTION_DECL that can be passed to
6061 targetm.encode_section_info. */
6062 /* ??? We don't have any type information except for this is
6063 a function. Pretend this is "int foo()". */
6064 tree decl = build_decl (FUNCTION_DECL, get_identifier (name),
6065 build_function_type (integer_type_node, NULL_TREE));
6066 DECL_ARTIFICIAL (decl) = 1;
6067 DECL_EXTERNAL (decl) = 1;
6068 TREE_PUBLIC (decl) = 1;
6070 symbol = XEXP (DECL_RTL (decl), 0);
6072 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
6073 are the flags assigned by targetm.encode_section_info. */
6074 SET_SYMBOL_REF_DECL (symbol, 0);
6079 /* Call this to reset the function entry for one optab (OPTABLE) in mode
6080 MODE to NAME, which should be either 0 or a string constant. */
6082 set_optab_libfunc (optab optable, enum machine_mode mode, const char *name)
6085 struct libfunc_entry e;
6086 struct libfunc_entry **slot;
6087 e.optab = (size_t) (optable - &optab_table[0]);
6092 val = init_one_libfunc (name);
6095 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, INSERT);
6097 *slot = ggc_alloc (sizeof (struct libfunc_entry));
6098 (*slot)->optab = (size_t) (optable - &optab_table[0]);
6099 (*slot)->mode1 = mode;
6100 (*slot)->mode2 = VOIDmode;
6101 (*slot)->libfunc = val;
6104 /* Call this to reset the function entry for one conversion optab
6105 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
6106 either 0 or a string constant. */
6108 set_conv_libfunc (convert_optab optable, enum machine_mode tmode,
6109 enum machine_mode fmode, const char *name)
6112 struct libfunc_entry e;
6113 struct libfunc_entry **slot;
6114 e.optab = (size_t) (optable - &convert_optab_table[0]);
6119 val = init_one_libfunc (name);
6122 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, INSERT);
6124 *slot = ggc_alloc (sizeof (struct libfunc_entry));
6125 (*slot)->optab = (size_t) (optable - &convert_optab_table[0]);
6126 (*slot)->mode1 = tmode;
6127 (*slot)->mode2 = fmode;
6128 (*slot)->libfunc = val;
6131 /* Call this to initialize the contents of the optabs
6132 appropriately for the current target machine. */
6138 enum machine_mode int_mode;
6141 libfunc_hash = htab_create_ggc (10, hash_libfunc, eq_libfunc, NULL);
6142 /* Start by initializing all tables to contain CODE_FOR_nothing. */
6144 for (i = 0; i < NUM_RTX_CODE; i++)
6145 setcc_gen_code[i] = CODE_FOR_nothing;
6147 #ifdef HAVE_conditional_move
6148 for (i = 0; i < NUM_MACHINE_MODES; i++)
6149 movcc_gen_code[i] = CODE_FOR_nothing;
6152 for (i = 0; i < NUM_MACHINE_MODES; i++)
6154 vcond_gen_code[i] = CODE_FOR_nothing;
6155 vcondu_gen_code[i] = CODE_FOR_nothing;
6158 #if GCC_VERSION >= 4000
6159 /* We statically initialize the insn_codes with CODE_FOR_nothing. */
6166 init_optab (add_optab, PLUS);
6167 init_optabv (addv_optab, PLUS);
6168 init_optab (sub_optab, MINUS);
6169 init_optabv (subv_optab, MINUS);
6170 init_optab (ssadd_optab, SS_PLUS);
6171 init_optab (usadd_optab, US_PLUS);
6172 init_optab (sssub_optab, SS_MINUS);
6173 init_optab (ussub_optab, US_MINUS);
6174 init_optab (smul_optab, MULT);
6175 init_optab (ssmul_optab, SS_MULT);
6176 init_optab (usmul_optab, US_MULT);
6177 init_optabv (smulv_optab, MULT);
6178 init_optab (smul_highpart_optab, UNKNOWN);
6179 init_optab (umul_highpart_optab, UNKNOWN);
6180 init_optab (smul_widen_optab, UNKNOWN);
6181 init_optab (umul_widen_optab, UNKNOWN);
6182 init_optab (usmul_widen_optab, UNKNOWN);
6183 init_optab (smadd_widen_optab, UNKNOWN);
6184 init_optab (umadd_widen_optab, UNKNOWN);
6185 init_optab (ssmadd_widen_optab, UNKNOWN);
6186 init_optab (usmadd_widen_optab, UNKNOWN);
6187 init_optab (smsub_widen_optab, UNKNOWN);
6188 init_optab (umsub_widen_optab, UNKNOWN);
6189 init_optab (ssmsub_widen_optab, UNKNOWN);
6190 init_optab (usmsub_widen_optab, UNKNOWN);
6191 init_optab (sdiv_optab, DIV);
6192 init_optab (ssdiv_optab, SS_DIV);
6193 init_optab (usdiv_optab, US_DIV);
6194 init_optabv (sdivv_optab, DIV);
6195 init_optab (sdivmod_optab, UNKNOWN);
6196 init_optab (udiv_optab, UDIV);
6197 init_optab (udivmod_optab, UNKNOWN);
6198 init_optab (smod_optab, MOD);
6199 init_optab (umod_optab, UMOD);
6200 init_optab (fmod_optab, UNKNOWN);
6201 init_optab (remainder_optab, UNKNOWN);
6202 init_optab (ftrunc_optab, UNKNOWN);
6203 init_optab (and_optab, AND);
6204 init_optab (ior_optab, IOR);
6205 init_optab (xor_optab, XOR);
6206 init_optab (ashl_optab, ASHIFT);
6207 init_optab (ssashl_optab, SS_ASHIFT);
6208 init_optab (usashl_optab, US_ASHIFT);
6209 init_optab (ashr_optab, ASHIFTRT);
6210 init_optab (lshr_optab, LSHIFTRT);
6211 init_optab (rotl_optab, ROTATE);
6212 init_optab (rotr_optab, ROTATERT);
6213 init_optab (smin_optab, SMIN);
6214 init_optab (smax_optab, SMAX);
6215 init_optab (umin_optab, UMIN);
6216 init_optab (umax_optab, UMAX);
6217 init_optab (pow_optab, UNKNOWN);
6218 init_optab (atan2_optab, UNKNOWN);
6220 /* These three have codes assigned exclusively for the sake of
6222 init_optab (mov_optab, SET);
6223 init_optab (movstrict_optab, STRICT_LOW_PART);
6224 init_optab (cmp_optab, COMPARE);
6226 init_optab (storent_optab, UNKNOWN);
6228 init_optab (ucmp_optab, UNKNOWN);
6229 init_optab (tst_optab, UNKNOWN);
6231 init_optab (eq_optab, EQ);
6232 init_optab (ne_optab, NE);
6233 init_optab (gt_optab, GT);
6234 init_optab (ge_optab, GE);
6235 init_optab (lt_optab, LT);
6236 init_optab (le_optab, LE);
6237 init_optab (unord_optab, UNORDERED);
6239 init_optab (neg_optab, NEG);
6240 init_optab (ssneg_optab, SS_NEG);
6241 init_optab (usneg_optab, US_NEG);
6242 init_optabv (negv_optab, NEG);
6243 init_optab (abs_optab, ABS);
6244 init_optabv (absv_optab, ABS);
6245 init_optab (addcc_optab, UNKNOWN);
6246 init_optab (one_cmpl_optab, NOT);
6247 init_optab (bswap_optab, BSWAP);
6248 init_optab (ffs_optab, FFS);
6249 init_optab (clz_optab, CLZ);
6250 init_optab (ctz_optab, CTZ);
6251 init_optab (popcount_optab, POPCOUNT);
6252 init_optab (parity_optab, PARITY);
6253 init_optab (sqrt_optab, SQRT);
6254 init_optab (floor_optab, UNKNOWN);
6255 init_optab (ceil_optab, UNKNOWN);
6256 init_optab (round_optab, UNKNOWN);
6257 init_optab (btrunc_optab, UNKNOWN);
6258 init_optab (nearbyint_optab, UNKNOWN);
6259 init_optab (rint_optab, UNKNOWN);
6260 init_optab (sincos_optab, UNKNOWN);
6261 init_optab (sin_optab, UNKNOWN);
6262 init_optab (asin_optab, UNKNOWN);
6263 init_optab (cos_optab, UNKNOWN);
6264 init_optab (acos_optab, UNKNOWN);
6265 init_optab (exp_optab, UNKNOWN);
6266 init_optab (exp10_optab, UNKNOWN);
6267 init_optab (exp2_optab, UNKNOWN);
6268 init_optab (expm1_optab, UNKNOWN);
6269 init_optab (ldexp_optab, UNKNOWN);
6270 init_optab (scalb_optab, UNKNOWN);
6271 init_optab (logb_optab, UNKNOWN);
6272 init_optab (ilogb_optab, UNKNOWN);
6273 init_optab (log_optab, UNKNOWN);
6274 init_optab (log10_optab, UNKNOWN);
6275 init_optab (log2_optab, UNKNOWN);
6276 init_optab (log1p_optab, UNKNOWN);
6277 init_optab (tan_optab, UNKNOWN);
6278 init_optab (atan_optab, UNKNOWN);
6279 init_optab (copysign_optab, UNKNOWN);
6280 init_optab (signbit_optab, UNKNOWN);
6282 init_optab (isinf_optab, UNKNOWN);
6284 init_optab (strlen_optab, UNKNOWN);
6285 init_optab (cbranch_optab, UNKNOWN);
6286 init_optab (cmov_optab, UNKNOWN);
6287 init_optab (cstore_optab, UNKNOWN);
6288 init_optab (push_optab, UNKNOWN);
6290 init_optab (reduc_smax_optab, UNKNOWN);
6291 init_optab (reduc_umax_optab, UNKNOWN);
6292 init_optab (reduc_smin_optab, UNKNOWN);
6293 init_optab (reduc_umin_optab, UNKNOWN);
6294 init_optab (reduc_splus_optab, UNKNOWN);
6295 init_optab (reduc_uplus_optab, UNKNOWN);
6297 init_optab (ssum_widen_optab, UNKNOWN);
6298 init_optab (usum_widen_optab, UNKNOWN);
6299 init_optab (sdot_prod_optab, UNKNOWN);
6300 init_optab (udot_prod_optab, UNKNOWN);
6302 init_optab (vec_extract_optab, UNKNOWN);
6303 init_optab (vec_extract_even_optab, UNKNOWN);
6304 init_optab (vec_extract_odd_optab, UNKNOWN);
6305 init_optab (vec_interleave_high_optab, UNKNOWN);
6306 init_optab (vec_interleave_low_optab, UNKNOWN);
6307 init_optab (vec_set_optab, UNKNOWN);
6308 init_optab (vec_init_optab, UNKNOWN);
6309 init_optab (vec_shl_optab, UNKNOWN);
6310 init_optab (vec_shr_optab, UNKNOWN);
6311 init_optab (vec_realign_load_optab, UNKNOWN);
6312 init_optab (movmisalign_optab, UNKNOWN);
6313 init_optab (vec_widen_umult_hi_optab, UNKNOWN);
6314 init_optab (vec_widen_umult_lo_optab, UNKNOWN);
6315 init_optab (vec_widen_smult_hi_optab, UNKNOWN);
6316 init_optab (vec_widen_smult_lo_optab, UNKNOWN);
6317 init_optab (vec_unpacks_hi_optab, UNKNOWN);
6318 init_optab (vec_unpacks_lo_optab, UNKNOWN);
6319 init_optab (vec_unpacku_hi_optab, UNKNOWN);
6320 init_optab (vec_unpacku_lo_optab, UNKNOWN);
6321 init_optab (vec_unpacks_float_hi_optab, UNKNOWN);
6322 init_optab (vec_unpacks_float_lo_optab, UNKNOWN);
6323 init_optab (vec_unpacku_float_hi_optab, UNKNOWN);
6324 init_optab (vec_unpacku_float_lo_optab, UNKNOWN);
6325 init_optab (vec_pack_trunc_optab, UNKNOWN);
6326 init_optab (vec_pack_usat_optab, UNKNOWN);
6327 init_optab (vec_pack_ssat_optab, UNKNOWN);
6328 init_optab (vec_pack_ufix_trunc_optab, UNKNOWN);
6329 init_optab (vec_pack_sfix_trunc_optab, UNKNOWN);
6331 init_optab (powi_optab, UNKNOWN);
6334 init_convert_optab (sext_optab, SIGN_EXTEND);
6335 init_convert_optab (zext_optab, ZERO_EXTEND);
6336 init_convert_optab (trunc_optab, TRUNCATE);
6337 init_convert_optab (sfix_optab, FIX);
6338 init_convert_optab (ufix_optab, UNSIGNED_FIX);
6339 init_convert_optab (sfixtrunc_optab, UNKNOWN);
6340 init_convert_optab (ufixtrunc_optab, UNKNOWN);
6341 init_convert_optab (sfloat_optab, FLOAT);
6342 init_convert_optab (ufloat_optab, UNSIGNED_FLOAT);
6343 init_convert_optab (lrint_optab, UNKNOWN);
6344 init_convert_optab (lround_optab, UNKNOWN);
6345 init_convert_optab (lfloor_optab, UNKNOWN);
6346 init_convert_optab (lceil_optab, UNKNOWN);
6348 init_convert_optab (fract_optab, FRACT_CONVERT);
6349 init_convert_optab (fractuns_optab, UNSIGNED_FRACT_CONVERT);
6350 init_convert_optab (satfract_optab, SAT_FRACT);
6351 init_convert_optab (satfractuns_optab, UNSIGNED_SAT_FRACT);
6353 for (i = 0; i < NUM_MACHINE_MODES; i++)
6355 movmem_optab[i] = CODE_FOR_nothing;
6356 cmpstr_optab[i] = CODE_FOR_nothing;
6357 cmpstrn_optab[i] = CODE_FOR_nothing;
6358 cmpmem_optab[i] = CODE_FOR_nothing;
6359 setmem_optab[i] = CODE_FOR_nothing;
6361 sync_add_optab[i] = CODE_FOR_nothing;
6362 sync_sub_optab[i] = CODE_FOR_nothing;
6363 sync_ior_optab[i] = CODE_FOR_nothing;
6364 sync_and_optab[i] = CODE_FOR_nothing;
6365 sync_xor_optab[i] = CODE_FOR_nothing;
6366 sync_nand_optab[i] = CODE_FOR_nothing;
6367 sync_old_add_optab[i] = CODE_FOR_nothing;
6368 sync_old_sub_optab[i] = CODE_FOR_nothing;
6369 sync_old_ior_optab[i] = CODE_FOR_nothing;
6370 sync_old_and_optab[i] = CODE_FOR_nothing;
6371 sync_old_xor_optab[i] = CODE_FOR_nothing;
6372 sync_old_nand_optab[i] = CODE_FOR_nothing;
6373 sync_new_add_optab[i] = CODE_FOR_nothing;
6374 sync_new_sub_optab[i] = CODE_FOR_nothing;
6375 sync_new_ior_optab[i] = CODE_FOR_nothing;
6376 sync_new_and_optab[i] = CODE_FOR_nothing;
6377 sync_new_xor_optab[i] = CODE_FOR_nothing;
6378 sync_new_nand_optab[i] = CODE_FOR_nothing;
6379 sync_compare_and_swap[i] = CODE_FOR_nothing;
6380 sync_compare_and_swap_cc[i] = CODE_FOR_nothing;
6381 sync_lock_test_and_set[i] = CODE_FOR_nothing;
6382 sync_lock_release[i] = CODE_FOR_nothing;
6384 reload_in_optab[i] = reload_out_optab[i] = CODE_FOR_nothing;
6387 /* Fill in the optabs with the insns we support. */
6390 /* Initialize the optabs with the names of the library functions. */
6391 add_optab->libcall_basename = "add";
6392 add_optab->libcall_suffix = '3';
6393 add_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6394 addv_optab->libcall_basename = "add";
6395 addv_optab->libcall_suffix = '3';
6396 addv_optab->libcall_gen = gen_intv_fp_libfunc;
6397 ssadd_optab->libcall_basename = "ssadd";
6398 ssadd_optab->libcall_suffix = '3';
6399 ssadd_optab->libcall_gen = gen_signed_fixed_libfunc;
6400 usadd_optab->libcall_basename = "usadd";
6401 usadd_optab->libcall_suffix = '3';
6402 usadd_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6403 sub_optab->libcall_basename = "sub";
6404 sub_optab->libcall_suffix = '3';
6405 sub_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6406 subv_optab->libcall_basename = "sub";
6407 subv_optab->libcall_suffix = '3';
6408 subv_optab->libcall_gen = gen_intv_fp_libfunc;
6409 sssub_optab->libcall_basename = "sssub";
6410 sssub_optab->libcall_suffix = '3';
6411 sssub_optab->libcall_gen = gen_signed_fixed_libfunc;
6412 ussub_optab->libcall_basename = "ussub";
6413 ussub_optab->libcall_suffix = '3';
6414 ussub_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6415 smul_optab->libcall_basename = "mul";
6416 smul_optab->libcall_suffix = '3';
6417 smul_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6418 smulv_optab->libcall_basename = "mul";
6419 smulv_optab->libcall_suffix = '3';
6420 smulv_optab->libcall_gen = gen_intv_fp_libfunc;
6421 ssmul_optab->libcall_basename = "ssmul";
6422 ssmul_optab->libcall_suffix = '3';
6423 ssmul_optab->libcall_gen = gen_signed_fixed_libfunc;
6424 usmul_optab->libcall_basename = "usmul";
6425 usmul_optab->libcall_suffix = '3';
6426 usmul_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6427 sdiv_optab->libcall_basename = "div";
6428 sdiv_optab->libcall_suffix = '3';
6429 sdiv_optab->libcall_gen = gen_int_fp_signed_fixed_libfunc;
6430 sdivv_optab->libcall_basename = "divv";
6431 sdivv_optab->libcall_suffix = '3';
6432 sdivv_optab->libcall_gen = gen_int_libfunc;
6433 ssdiv_optab->libcall_basename = "ssdiv";
6434 ssdiv_optab->libcall_suffix = '3';
6435 ssdiv_optab->libcall_gen = gen_signed_fixed_libfunc;
6436 udiv_optab->libcall_basename = "udiv";
6437 udiv_optab->libcall_suffix = '3';
6438 udiv_optab->libcall_gen = gen_int_unsigned_fixed_libfunc;
6439 usdiv_optab->libcall_basename = "usdiv";
6440 usdiv_optab->libcall_suffix = '3';
6441 usdiv_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6442 sdivmod_optab->libcall_basename = "divmod";
6443 sdivmod_optab->libcall_suffix = '4';
6444 sdivmod_optab->libcall_gen = gen_int_libfunc;
6445 udivmod_optab->libcall_basename = "udivmod";
6446 udivmod_optab->libcall_suffix = '4';
6447 udivmod_optab->libcall_gen = gen_int_libfunc;
6448 smod_optab->libcall_basename = "mod";
6449 smod_optab->libcall_suffix = '3';
6450 smod_optab->libcall_gen = gen_int_libfunc;
6451 umod_optab->libcall_basename = "umod";
6452 umod_optab->libcall_suffix = '3';
6453 umod_optab->libcall_gen = gen_int_libfunc;
6454 ftrunc_optab->libcall_basename = "ftrunc";
6455 ftrunc_optab->libcall_suffix = '2';
6456 ftrunc_optab->libcall_gen = gen_fp_libfunc;
6457 and_optab->libcall_basename = "and";
6458 and_optab->libcall_suffix = '3';
6459 and_optab->libcall_gen = gen_int_libfunc;
6460 ior_optab->libcall_basename = "ior";
6461 ior_optab->libcall_suffix = '3';
6462 ior_optab->libcall_gen = gen_int_libfunc;
6463 xor_optab->libcall_basename = "xor";
6464 xor_optab->libcall_suffix = '3';
6465 xor_optab->libcall_gen = gen_int_libfunc;
6466 ashl_optab->libcall_basename = "ashl";
6467 ashl_optab->libcall_suffix = '3';
6468 ashl_optab->libcall_gen = gen_int_fixed_libfunc;
6469 ssashl_optab->libcall_basename = "ssashl";
6470 ssashl_optab->libcall_suffix = '3';
6471 ssashl_optab->libcall_gen = gen_signed_fixed_libfunc;
6472 usashl_optab->libcall_basename = "usashl";
6473 usashl_optab->libcall_suffix = '3';
6474 usashl_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6475 ashr_optab->libcall_basename = "ashr";
6476 ashr_optab->libcall_suffix = '3';
6477 ashr_optab->libcall_gen = gen_int_signed_fixed_libfunc;
6478 lshr_optab->libcall_basename = "lshr";
6479 lshr_optab->libcall_suffix = '3';
6480 lshr_optab->libcall_gen = gen_int_unsigned_fixed_libfunc;
6481 smin_optab->libcall_basename = "min";
6482 smin_optab->libcall_suffix = '3';
6483 smin_optab->libcall_gen = gen_int_fp_libfunc;
6484 smax_optab->libcall_basename = "max";
6485 smax_optab->libcall_suffix = '3';
6486 smax_optab->libcall_gen = gen_int_fp_libfunc;
6487 umin_optab->libcall_basename = "umin";
6488 umin_optab->libcall_suffix = '3';
6489 umin_optab->libcall_gen = gen_int_libfunc;
6490 umax_optab->libcall_basename = "umax";
6491 umax_optab->libcall_suffix = '3';
6492 umax_optab->libcall_gen = gen_int_libfunc;
6493 neg_optab->libcall_basename = "neg";
6494 neg_optab->libcall_suffix = '2';
6495 neg_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6496 ssneg_optab->libcall_basename = "ssneg";
6497 ssneg_optab->libcall_suffix = '2';
6498 ssneg_optab->libcall_gen = gen_signed_fixed_libfunc;
6499 usneg_optab->libcall_basename = "usneg";
6500 usneg_optab->libcall_suffix = '2';
6501 usneg_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6502 negv_optab->libcall_basename = "neg";
6503 negv_optab->libcall_suffix = '2';
6504 negv_optab->libcall_gen = gen_intv_fp_libfunc;
6505 one_cmpl_optab->libcall_basename = "one_cmpl";
6506 one_cmpl_optab->libcall_suffix = '2';
6507 one_cmpl_optab->libcall_gen = gen_int_libfunc;
6508 ffs_optab->libcall_basename = "ffs";
6509 ffs_optab->libcall_suffix = '2';
6510 ffs_optab->libcall_gen = gen_int_libfunc;
6511 clz_optab->libcall_basename = "clz";
6512 clz_optab->libcall_suffix = '2';
6513 clz_optab->libcall_gen = gen_int_libfunc;
6514 ctz_optab->libcall_basename = "ctz";
6515 ctz_optab->libcall_suffix = '2';
6516 ctz_optab->libcall_gen = gen_int_libfunc;
6517 popcount_optab->libcall_basename = "popcount";
6518 popcount_optab->libcall_suffix = '2';
6519 popcount_optab->libcall_gen = gen_int_libfunc;
6520 parity_optab->libcall_basename = "parity";
6521 parity_optab->libcall_suffix = '2';
6522 parity_optab->libcall_gen = gen_int_libfunc;
6524 /* Comparison libcalls for integers MUST come in pairs,
6526 cmp_optab->libcall_basename = "cmp";
6527 cmp_optab->libcall_suffix = '2';
6528 cmp_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6529 ucmp_optab->libcall_basename = "ucmp";
6530 ucmp_optab->libcall_suffix = '2';
6531 ucmp_optab->libcall_gen = gen_int_libfunc;
6533 /* EQ etc are floating point only. */
6534 eq_optab->libcall_basename = "eq";
6535 eq_optab->libcall_suffix = '2';
6536 eq_optab->libcall_gen = gen_fp_libfunc;
6537 ne_optab->libcall_basename = "ne";
6538 ne_optab->libcall_suffix = '2';
6539 ne_optab->libcall_gen = gen_fp_libfunc;
6540 gt_optab->libcall_basename = "gt";
6541 gt_optab->libcall_suffix = '2';
6542 gt_optab->libcall_gen = gen_fp_libfunc;
6543 ge_optab->libcall_basename = "ge";
6544 ge_optab->libcall_suffix = '2';
6545 ge_optab->libcall_gen = gen_fp_libfunc;
6546 lt_optab->libcall_basename = "lt";
6547 lt_optab->libcall_suffix = '2';
6548 lt_optab->libcall_gen = gen_fp_libfunc;
6549 le_optab->libcall_basename = "le";
6550 le_optab->libcall_suffix = '2';
6551 le_optab->libcall_gen = gen_fp_libfunc;
6552 unord_optab->libcall_basename = "unord";
6553 unord_optab->libcall_suffix = '2';
6554 unord_optab->libcall_gen = gen_fp_libfunc;
6556 powi_optab->libcall_basename = "powi";
6557 powi_optab->libcall_suffix = '2';
6558 powi_optab->libcall_gen = gen_fp_libfunc;
6561 sfloat_optab->libcall_basename = "float";
6562 sfloat_optab->libcall_gen = gen_int_to_fp_conv_libfunc;
6563 ufloat_optab->libcall_gen = gen_ufloat_conv_libfunc;
6564 sfix_optab->libcall_basename = "fix";
6565 sfix_optab->libcall_gen = gen_fp_to_int_conv_libfunc;
6566 ufix_optab->libcall_basename = "fixuns";
6567 ufix_optab->libcall_gen = gen_fp_to_int_conv_libfunc;
6568 lrint_optab->libcall_basename = "lrint";
6569 lrint_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6570 lround_optab->libcall_basename = "lround";
6571 lround_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6572 lfloor_optab->libcall_basename = "lfloor";
6573 lfloor_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6574 lceil_optab->libcall_basename = "lceil";
6575 lceil_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6577 /* trunc_optab is also used for FLOAT_EXTEND. */
6578 sext_optab->libcall_basename = "extend";
6579 sext_optab->libcall_gen = gen_extend_conv_libfunc;
6580 trunc_optab->libcall_basename = "trunc";
6581 trunc_optab->libcall_gen = gen_trunc_conv_libfunc;
6583 /* Conversions for fixed-point modes and other modes. */
6584 fract_optab->libcall_basename = "fract";
6585 fract_optab->libcall_gen = gen_fract_conv_libfunc;
6586 satfract_optab->libcall_basename = "satfract";
6587 satfract_optab->libcall_gen = gen_satfract_conv_libfunc;
6588 fractuns_optab->libcall_basename = "fractuns";
6589 fractuns_optab->libcall_gen = gen_fractuns_conv_libfunc;
6590 satfractuns_optab->libcall_basename = "satfractuns";
6591 satfractuns_optab->libcall_gen = gen_satfractuns_conv_libfunc;
6593 /* The ffs function operates on `int'. Fall back on it if we do not
6594 have a libgcc2 function for that width. */
6595 if (INT_TYPE_SIZE < BITS_PER_WORD)
6597 int_mode = mode_for_size (INT_TYPE_SIZE, MODE_INT, 0);
6598 set_optab_libfunc (ffs_optab, mode_for_size (INT_TYPE_SIZE, MODE_INT, 0),
6602 /* Explicitly initialize the bswap libfuncs since we need them to be
6603 valid for things other than word_mode. */
6604 set_optab_libfunc (bswap_optab, SImode, "__bswapsi2");
6605 set_optab_libfunc (bswap_optab, DImode, "__bswapdi2");
6607 /* Use cabs for double complex abs, since systems generally have cabs.
6608 Don't define any libcall for float complex, so that cabs will be used. */
6609 if (complex_double_type_node)
6610 set_optab_libfunc (abs_optab, TYPE_MODE (complex_double_type_node), "cabs");
6612 abort_libfunc = init_one_libfunc ("abort");
6613 memcpy_libfunc = init_one_libfunc ("memcpy");
6614 memmove_libfunc = init_one_libfunc ("memmove");
6615 memcmp_libfunc = init_one_libfunc ("memcmp");
6616 memset_libfunc = init_one_libfunc ("memset");
6617 setbits_libfunc = init_one_libfunc ("__setbits");
6619 #ifndef DONT_USE_BUILTIN_SETJMP
6620 setjmp_libfunc = init_one_libfunc ("__builtin_setjmp");
6621 longjmp_libfunc = init_one_libfunc ("__builtin_longjmp");
6623 setjmp_libfunc = init_one_libfunc ("setjmp");
6624 longjmp_libfunc = init_one_libfunc ("longjmp");
6626 unwind_sjlj_register_libfunc = init_one_libfunc ("_Unwind_SjLj_Register");
6627 unwind_sjlj_unregister_libfunc
6628 = init_one_libfunc ("_Unwind_SjLj_Unregister");
6630 /* For function entry/exit instrumentation. */
6631 profile_function_entry_libfunc
6632 = init_one_libfunc ("__cyg_profile_func_enter");
6633 profile_function_exit_libfunc
6634 = init_one_libfunc ("__cyg_profile_func_exit");
6636 gcov_flush_libfunc = init_one_libfunc ("__gcov_flush");
6638 if (HAVE_conditional_trap)
6639 trap_rtx = gen_rtx_fmt_ee (EQ, VOIDmode, NULL_RTX, NULL_RTX);
6641 /* Allow the target to add more libcalls or rename some, etc. */
6642 targetm.init_libfuncs ();
6647 /* Print information about the current contents of the optabs on
6651 debug_optab_libfuncs (void)
6657 /* Dump the arithmetic optabs. */
6658 for (i = 0; i != (int) OTI_MAX; i++)
6659 for (j = 0; j < NUM_MACHINE_MODES; ++j)
6664 o = &optab_table[i];
6665 l = optab_libfunc (o, j);
6668 gcc_assert (GET_CODE (l) == SYMBOL_REF);
6669 fprintf (stderr, "%s\t%s:\t%s\n",
6670 GET_RTX_NAME (o->code),
6676 /* Dump the conversion optabs. */
6677 for (i = 0; i < (int) COI_MAX; ++i)
6678 for (j = 0; j < NUM_MACHINE_MODES; ++j)
6679 for (k = 0; k < NUM_MACHINE_MODES; ++k)
6684 o = &convert_optab_table[i];
6685 l = convert_optab_libfunc (o, j, k);
6688 gcc_assert (GET_CODE (l) == SYMBOL_REF);
6689 fprintf (stderr, "%s\t%s\t%s:\t%s\n",
6690 GET_RTX_NAME (o->code),
6699 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
6700 CODE. Return 0 on failure. */
6703 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED, rtx op1,
6704 rtx op2 ATTRIBUTE_UNUSED, rtx tcode ATTRIBUTE_UNUSED)
6706 enum machine_mode mode = GET_MODE (op1);
6707 enum insn_code icode;
6710 if (!HAVE_conditional_trap)
6713 if (mode == VOIDmode)
6716 icode = optab_handler (cmp_optab, mode)->insn_code;
6717 if (icode == CODE_FOR_nothing)
6721 op1 = prepare_operand (icode, op1, 0, mode, mode, 0);
6722 op2 = prepare_operand (icode, op2, 1, mode, mode, 0);
6728 emit_insn (GEN_FCN (icode) (op1, op2));
6730 PUT_CODE (trap_rtx, code);
6731 gcc_assert (HAVE_conditional_trap);
6732 insn = gen_conditional_trap (trap_rtx, tcode);
6736 insn = get_insns ();
6743 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
6744 or unsigned operation code. */
6746 static enum rtx_code
6747 get_rtx_code (enum tree_code tcode, bool unsignedp)
6759 code = unsignedp ? LTU : LT;
6762 code = unsignedp ? LEU : LE;
6765 code = unsignedp ? GTU : GT;
6768 code = unsignedp ? GEU : GE;
6771 case UNORDERED_EXPR:
6802 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
6803 unsigned operators. Do not generate compare instruction. */
6806 vector_compare_rtx (tree cond, bool unsignedp, enum insn_code icode)
6808 enum rtx_code rcode;
6810 rtx rtx_op0, rtx_op1;
6812 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
6813 ensures that condition is a relational operation. */
6814 gcc_assert (COMPARISON_CLASS_P (cond));
6816 rcode = get_rtx_code (TREE_CODE (cond), unsignedp);
6817 t_op0 = TREE_OPERAND (cond, 0);
6818 t_op1 = TREE_OPERAND (cond, 1);
6820 /* Expand operands. */
6821 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)),
6823 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)),
6826 if (!insn_data[icode].operand[4].predicate (rtx_op0, GET_MODE (rtx_op0))
6827 && GET_MODE (rtx_op0) != VOIDmode)
6828 rtx_op0 = force_reg (GET_MODE (rtx_op0), rtx_op0);
6830 if (!insn_data[icode].operand[5].predicate (rtx_op1, GET_MODE (rtx_op1))
6831 && GET_MODE (rtx_op1) != VOIDmode)
6832 rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1);
6834 return gen_rtx_fmt_ee (rcode, VOIDmode, rtx_op0, rtx_op1);
6837 /* Return insn code for VEC_COND_EXPR EXPR. */
6839 static inline enum insn_code
6840 get_vcond_icode (tree expr, enum machine_mode mode)
6842 enum insn_code icode = CODE_FOR_nothing;
6844 if (TYPE_UNSIGNED (TREE_TYPE (expr)))
6845 icode = vcondu_gen_code[mode];
6847 icode = vcond_gen_code[mode];
6851 /* Return TRUE iff, appropriate vector insns are available
6852 for vector cond expr expr in VMODE mode. */
6855 expand_vec_cond_expr_p (tree expr, enum machine_mode vmode)
6857 if (get_vcond_icode (expr, vmode) == CODE_FOR_nothing)
6862 /* Generate insns for VEC_COND_EXPR. */
6865 expand_vec_cond_expr (tree vec_cond_expr, rtx target)
6867 enum insn_code icode;
6868 rtx comparison, rtx_op1, rtx_op2, cc_op0, cc_op1;
6869 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_cond_expr));
6870 bool unsignedp = TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr));
6872 icode = get_vcond_icode (vec_cond_expr, mode);
6873 if (icode == CODE_FOR_nothing)
6876 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6877 target = gen_reg_rtx (mode);
6879 /* Get comparison rtx. First expand both cond expr operands. */
6880 comparison = vector_compare_rtx (TREE_OPERAND (vec_cond_expr, 0),
6882 cc_op0 = XEXP (comparison, 0);
6883 cc_op1 = XEXP (comparison, 1);
6884 /* Expand both operands and force them in reg, if required. */
6885 rtx_op1 = expand_normal (TREE_OPERAND (vec_cond_expr, 1));
6886 if (!insn_data[icode].operand[1].predicate (rtx_op1, mode)
6887 && mode != VOIDmode)
6888 rtx_op1 = force_reg (mode, rtx_op1);
6890 rtx_op2 = expand_normal (TREE_OPERAND (vec_cond_expr, 2));
6891 if (!insn_data[icode].operand[2].predicate (rtx_op2, mode)
6892 && mode != VOIDmode)
6893 rtx_op2 = force_reg (mode, rtx_op2);
6895 /* Emit instruction! */
6896 emit_insn (GEN_FCN (icode) (target, rtx_op1, rtx_op2,
6897 comparison, cc_op0, cc_op1));
6903 /* This is an internal subroutine of the other compare_and_swap expanders.
6904 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
6905 operation. TARGET is an optional place to store the value result of
6906 the operation. ICODE is the particular instruction to expand. Return
6907 the result of the operation. */
6910 expand_val_compare_and_swap_1 (rtx mem, rtx old_val, rtx new_val,
6911 rtx target, enum insn_code icode)
6913 enum machine_mode mode = GET_MODE (mem);
6916 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6917 target = gen_reg_rtx (mode);
6919 if (GET_MODE (old_val) != VOIDmode && GET_MODE (old_val) != mode)
6920 old_val = convert_modes (mode, GET_MODE (old_val), old_val, 1);
6921 if (!insn_data[icode].operand[2].predicate (old_val, mode))
6922 old_val = force_reg (mode, old_val);
6924 if (GET_MODE (new_val) != VOIDmode && GET_MODE (new_val) != mode)
6925 new_val = convert_modes (mode, GET_MODE (new_val), new_val, 1);
6926 if (!insn_data[icode].operand[3].predicate (new_val, mode))
6927 new_val = force_reg (mode, new_val);
6929 insn = GEN_FCN (icode) (target, mem, old_val, new_val);
6930 if (insn == NULL_RTX)
6937 /* Expand a compare-and-swap operation and return its value. */
6940 expand_val_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
6942 enum machine_mode mode = GET_MODE (mem);
6943 enum insn_code icode = sync_compare_and_swap[mode];
6945 if (icode == CODE_FOR_nothing)
6948 return expand_val_compare_and_swap_1 (mem, old_val, new_val, target, icode);
6951 /* Expand a compare-and-swap operation and store true into the result if
6952 the operation was successful and false otherwise. Return the result.
6953 Unlike other routines, TARGET is not optional. */
6956 expand_bool_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
6958 enum machine_mode mode = GET_MODE (mem);
6959 enum insn_code icode;
6960 rtx subtarget, label0, label1;
6962 /* If the target supports a compare-and-swap pattern that simultaneously
6963 sets some flag for success, then use it. Otherwise use the regular
6964 compare-and-swap and follow that immediately with a compare insn. */
6965 icode = sync_compare_and_swap_cc[mode];
6969 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
6971 if (subtarget != NULL_RTX)
6975 case CODE_FOR_nothing:
6976 icode = sync_compare_and_swap[mode];
6977 if (icode == CODE_FOR_nothing)
6980 /* Ensure that if old_val == mem, that we're not comparing
6981 against an old value. */
6982 if (MEM_P (old_val))
6983 old_val = force_reg (mode, old_val);
6985 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
6987 if (subtarget == NULL_RTX)
6990 emit_cmp_insn (subtarget, old_val, EQ, const0_rtx, mode, true);
6993 /* If the target has a sane STORE_FLAG_VALUE, then go ahead and use a
6994 setcc instruction from the beginning. We don't work too hard here,
6995 but it's nice to not be stupid about initial code gen either. */
6996 if (STORE_FLAG_VALUE == 1)
6998 icode = setcc_gen_code[EQ];
6999 if (icode != CODE_FOR_nothing)
7001 enum machine_mode cmode = insn_data[icode].operand[0].mode;
7005 if (!insn_data[icode].operand[0].predicate (target, cmode))
7006 subtarget = gen_reg_rtx (cmode);
7008 insn = GEN_FCN (icode) (subtarget);
7012 if (GET_MODE (target) != GET_MODE (subtarget))
7014 convert_move (target, subtarget, 1);
7022 /* Without an appropriate setcc instruction, use a set of branches to
7023 get 1 and 0 stored into target. Presumably if the target has a
7024 STORE_FLAG_VALUE that isn't 1, then this will get cleaned up by ifcvt. */
7026 label0 = gen_label_rtx ();
7027 label1 = gen_label_rtx ();
7029 emit_jump_insn (bcc_gen_fctn[EQ] (label0));
7030 emit_move_insn (target, const0_rtx);
7031 emit_jump_insn (gen_jump (label1));
7033 emit_label (label0);
7034 emit_move_insn (target, const1_rtx);
7035 emit_label (label1);
7040 /* This is a helper function for the other atomic operations. This function
7041 emits a loop that contains SEQ that iterates until a compare-and-swap
7042 operation at the end succeeds. MEM is the memory to be modified. SEQ is
7043 a set of instructions that takes a value from OLD_REG as an input and
7044 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
7045 set to the current contents of MEM. After SEQ, a compare-and-swap will
7046 attempt to update MEM with NEW_REG. The function returns true when the
7047 loop was generated successfully. */
7050 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
7052 enum machine_mode mode = GET_MODE (mem);
7053 enum insn_code icode;
7054 rtx label, cmp_reg, subtarget;
7056 /* The loop we want to generate looks like
7062 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
7063 if (cmp_reg != old_reg)
7066 Note that we only do the plain load from memory once. Subsequent
7067 iterations use the value loaded by the compare-and-swap pattern. */
7069 label = gen_label_rtx ();
7070 cmp_reg = gen_reg_rtx (mode);
7072 emit_move_insn (cmp_reg, mem);
7074 emit_move_insn (old_reg, cmp_reg);
7078 /* If the target supports a compare-and-swap pattern that simultaneously
7079 sets some flag for success, then use it. Otherwise use the regular
7080 compare-and-swap and follow that immediately with a compare insn. */
7081 icode = sync_compare_and_swap_cc[mode];
7085 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
7087 if (subtarget != NULL_RTX)
7089 gcc_assert (subtarget == cmp_reg);
7094 case CODE_FOR_nothing:
7095 icode = sync_compare_and_swap[mode];
7096 if (icode == CODE_FOR_nothing)
7099 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
7101 if (subtarget == NULL_RTX)
7103 if (subtarget != cmp_reg)
7104 emit_move_insn (cmp_reg, subtarget);
7106 emit_cmp_insn (cmp_reg, old_reg, EQ, const0_rtx, mode, true);
7109 /* ??? Mark this jump predicted not taken? */
7110 emit_jump_insn (bcc_gen_fctn[NE] (label));
7115 /* This function generates the atomic operation MEM CODE= VAL. In this
7116 case, we do not care about any resulting value. Returns NULL if we
7117 cannot generate the operation. */
7120 expand_sync_operation (rtx mem, rtx val, enum rtx_code code)
7122 enum machine_mode mode = GET_MODE (mem);
7123 enum insn_code icode;
7126 /* Look to see if the target supports the operation directly. */
7130 icode = sync_add_optab[mode];
7133 icode = sync_ior_optab[mode];
7136 icode = sync_xor_optab[mode];
7139 icode = sync_and_optab[mode];
7142 icode = sync_nand_optab[mode];
7146 icode = sync_sub_optab[mode];
7147 if (icode == CODE_FOR_nothing || CONST_INT_P (val))
7149 icode = sync_add_optab[mode];
7150 if (icode != CODE_FOR_nothing)
7152 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
7162 /* Generate the direct operation, if present. */
7163 if (icode != CODE_FOR_nothing)
7165 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
7166 val = convert_modes (mode, GET_MODE (val), val, 1);
7167 if (!insn_data[icode].operand[1].predicate (val, mode))
7168 val = force_reg (mode, val);
7170 insn = GEN_FCN (icode) (mem, val);
7178 /* Failing that, generate a compare-and-swap loop in which we perform the
7179 operation with normal arithmetic instructions. */
7180 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
7182 rtx t0 = gen_reg_rtx (mode), t1;
7189 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
7192 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
7193 true, OPTAB_LIB_WIDEN);
7195 insn = get_insns ();
7198 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
7205 /* This function generates the atomic operation MEM CODE= VAL. In this
7206 case, we do care about the resulting value: if AFTER is true then
7207 return the value MEM holds after the operation, if AFTER is false
7208 then return the value MEM holds before the operation. TARGET is an
7209 optional place for the result value to be stored. */
7212 expand_sync_fetch_operation (rtx mem, rtx val, enum rtx_code code,
7213 bool after, rtx target)
7215 enum machine_mode mode = GET_MODE (mem);
7216 enum insn_code old_code, new_code, icode;
7220 /* Look to see if the target supports the operation directly. */
7224 old_code = sync_old_add_optab[mode];
7225 new_code = sync_new_add_optab[mode];
7228 old_code = sync_old_ior_optab[mode];
7229 new_code = sync_new_ior_optab[mode];
7232 old_code = sync_old_xor_optab[mode];
7233 new_code = sync_new_xor_optab[mode];
7236 old_code = sync_old_and_optab[mode];
7237 new_code = sync_new_and_optab[mode];
7240 old_code = sync_old_nand_optab[mode];
7241 new_code = sync_new_nand_optab[mode];
7245 old_code = sync_old_sub_optab[mode];
7246 new_code = sync_new_sub_optab[mode];
7247 if ((old_code == CODE_FOR_nothing && new_code == CODE_FOR_nothing)
7248 || CONST_INT_P (val))
7250 old_code = sync_old_add_optab[mode];
7251 new_code = sync_new_add_optab[mode];
7252 if (old_code != CODE_FOR_nothing || new_code != CODE_FOR_nothing)
7254 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
7264 /* If the target does supports the proper new/old operation, great. But
7265 if we only support the opposite old/new operation, check to see if we
7266 can compensate. In the case in which the old value is supported, then
7267 we can always perform the operation again with normal arithmetic. In
7268 the case in which the new value is supported, then we can only handle
7269 this in the case the operation is reversible. */
7274 if (icode == CODE_FOR_nothing)
7277 if (icode != CODE_FOR_nothing)
7284 if (icode == CODE_FOR_nothing
7285 && (code == PLUS || code == MINUS || code == XOR))
7288 if (icode != CODE_FOR_nothing)
7293 /* If we found something supported, great. */
7294 if (icode != CODE_FOR_nothing)
7296 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
7297 target = gen_reg_rtx (mode);
7299 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
7300 val = convert_modes (mode, GET_MODE (val), val, 1);
7301 if (!insn_data[icode].operand[2].predicate (val, mode))
7302 val = force_reg (mode, val);
7304 insn = GEN_FCN (icode) (target, mem, val);
7309 /* If we need to compensate for using an operation with the
7310 wrong return value, do so now. */
7317 else if (code == MINUS)
7322 target = expand_simple_unop (mode, NOT, target, NULL_RTX, true);
7323 target = expand_simple_binop (mode, code, target, val, NULL_RTX,
7324 true, OPTAB_LIB_WIDEN);
7331 /* Failing that, generate a compare-and-swap loop in which we perform the
7332 operation with normal arithmetic instructions. */
7333 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
7335 rtx t0 = gen_reg_rtx (mode), t1;
7337 if (!target || !register_operand (target, mode))
7338 target = gen_reg_rtx (mode);
7343 emit_move_insn (target, t0);
7347 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
7350 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
7351 true, OPTAB_LIB_WIDEN);
7353 emit_move_insn (target, t1);
7355 insn = get_insns ();
7358 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
7365 /* This function expands a test-and-set operation. Ideally we atomically
7366 store VAL in MEM and return the previous value in MEM. Some targets
7367 may not support this operation and only support VAL with the constant 1;
7368 in this case while the return value will be 0/1, but the exact value
7369 stored in MEM is target defined. TARGET is an option place to stick
7370 the return value. */
7373 expand_sync_lock_test_and_set (rtx mem, rtx val, rtx target)
7375 enum machine_mode mode = GET_MODE (mem);
7376 enum insn_code icode;
7379 /* If the target supports the test-and-set directly, great. */
7380 icode = sync_lock_test_and_set[mode];
7381 if (icode != CODE_FOR_nothing)
7383 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
7384 target = gen_reg_rtx (mode);
7386 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
7387 val = convert_modes (mode, GET_MODE (val), val, 1);
7388 if (!insn_data[icode].operand[2].predicate (val, mode))
7389 val = force_reg (mode, val);
7391 insn = GEN_FCN (icode) (target, mem, val);
7399 /* Otherwise, use a compare-and-swap loop for the exchange. */
7400 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
7402 if (!target || !register_operand (target, mode))
7403 target = gen_reg_rtx (mode);
7404 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
7405 val = convert_modes (mode, GET_MODE (val), val, 1);
7406 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
7413 #include "gt-optabs.h"