1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
45 #include "basic-block.h"
48 /* Each optab contains info on how this target machine
49 can perform a particular operation
50 for all sizes and kinds of operands.
52 The operation to be performed is often specified
53 by passing one of these optabs as an argument.
55 See expr.h for documentation of these optabs. */
57 #if GCC_VERSION >= 4000
58 __extension__ struct optab optab_table[OTI_MAX]
59 = { [0 ... OTI_MAX - 1].handlers[0 ... NUM_MACHINE_MODES - 1].insn_code
62 /* init_insn_codes will do runtime initialization otherwise. */
63 struct optab optab_table[OTI_MAX];
66 rtx libfunc_table[LTI_MAX];
68 /* Tables of patterns for converting one mode to another. */
69 #if GCC_VERSION >= 4000
70 __extension__ struct convert_optab convert_optab_table[COI_MAX]
71 = { [0 ... COI_MAX - 1].handlers[0 ... NUM_MACHINE_MODES - 1]
72 [0 ... NUM_MACHINE_MODES - 1].insn_code
75 /* init_convert_optab will do runtime initialization otherwise. */
76 struct convert_optab convert_optab_table[COI_MAX];
79 /* Contains the optab used for each rtx code. */
80 optab code_to_optab[NUM_RTX_CODE + 1];
82 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
83 gives the gen_function to make a branch to test that condition. */
85 rtxfun bcc_gen_fctn[NUM_RTX_CODE];
87 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
88 gives the insn code to make a store-condition insn
89 to test that condition. */
91 enum insn_code setcc_gen_code[NUM_RTX_CODE];
93 #ifdef HAVE_conditional_move
94 /* Indexed by the machine mode, gives the insn code to make a conditional
95 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
96 setcc_gen_code to cut down on the number of named patterns. Consider a day
97 when a lot more rtx codes are conditional (eg: for the ARM). */
99 enum insn_code movcc_gen_code[NUM_MACHINE_MODES];
102 /* Indexed by the machine mode, gives the insn code for vector conditional
105 enum insn_code vcond_gen_code[NUM_MACHINE_MODES];
106 enum insn_code vcondu_gen_code[NUM_MACHINE_MODES];
108 /* The insn generating function can not take an rtx_code argument.
109 TRAP_RTX is used as an rtx argument. Its code is replaced with
110 the code to be used in the trap insn and all other fields are ignored. */
111 static GTY(()) rtx trap_rtx;
113 static void prepare_float_lib_cmp (rtx *, rtx *, enum rtx_code *,
114 enum machine_mode *, int *);
115 static rtx expand_unop_direct (enum machine_mode, optab, rtx, rtx, int);
117 /* Debug facility for use in GDB. */
118 void debug_optab_libfuncs (void);
120 #ifndef HAVE_conditional_trap
121 #define HAVE_conditional_trap 0
122 #define gen_conditional_trap(a,b) (gcc_unreachable (), NULL_RTX)
125 /* Prefixes for the current version of decimal floating point (BID vs. DPD) */
126 #if ENABLE_DECIMAL_BID_FORMAT
127 #define DECIMAL_PREFIX "bid_"
129 #define DECIMAL_PREFIX "dpd_"
133 /* Info about libfunc. We use same hashtable for normal optabs and conversion
134 optab. In the first case mode2 is unused. */
135 struct libfunc_entry GTY(())
138 enum machine_mode mode1, mode2;
142 /* Hash table used to convert declarations into nodes. */
143 static GTY((param_is (struct libfunc_entry))) htab_t libfunc_hash;
145 /* Used for attribute_hash. */
148 hash_libfunc (const void *p)
150 const struct libfunc_entry *const e = (const struct libfunc_entry *) p;
152 return (((int) e->mode1 + (int) e->mode2 * NUM_MACHINE_MODES)
156 /* Used for optab_hash. */
159 eq_libfunc (const void *p, const void *q)
161 const struct libfunc_entry *const e1 = (const struct libfunc_entry *) p;
162 const struct libfunc_entry *const e2 = (const struct libfunc_entry *) q;
164 return (e1->optab == e2->optab
165 && e1->mode1 == e2->mode1
166 && e1->mode2 == e2->mode2);
169 /* Return libfunc corresponding operation defined by OPTAB converting
170 from MODE2 to MODE1. Trigger lazy initialization if needed, return NULL
171 if no libfunc is available. */
173 convert_optab_libfunc (convert_optab optab, enum machine_mode mode1,
174 enum machine_mode mode2)
176 struct libfunc_entry e;
177 struct libfunc_entry **slot;
179 e.optab = (size_t) (optab - &convert_optab_table[0]);
182 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, NO_INSERT);
185 if (optab->libcall_gen)
187 optab->libcall_gen (optab, optab->libcall_basename, mode1, mode2);
188 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, NO_INSERT);
190 return (*slot)->libfunc;
196 return (*slot)->libfunc;
199 /* Return libfunc corresponding operation defined by OPTAB in MODE.
200 Trigger lazy initialization if needed, return NULL if no libfunc is
203 optab_libfunc (optab optab, enum machine_mode mode)
205 struct libfunc_entry e;
206 struct libfunc_entry **slot;
208 e.optab = (size_t) (optab - &optab_table[0]);
211 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, NO_INSERT);
214 if (optab->libcall_gen)
216 optab->libcall_gen (optab, optab->libcall_basename,
217 optab->libcall_suffix, mode);
218 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash,
221 return (*slot)->libfunc;
227 return (*slot)->libfunc;
231 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
232 the result of operation CODE applied to OP0 (and OP1 if it is a binary
235 If the last insn does not set TARGET, don't do anything, but return 1.
237 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
238 don't add the REG_EQUAL note but return 0. Our caller can then try
239 again, ensuring that TARGET is not one of the operands. */
242 add_equal_note (rtx insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
244 rtx last_insn, insn, set;
247 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
249 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
250 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
251 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
252 && GET_RTX_CLASS (code) != RTX_COMPARE
253 && GET_RTX_CLASS (code) != RTX_UNARY)
256 if (GET_CODE (target) == ZERO_EXTRACT)
259 for (last_insn = insns;
260 NEXT_INSN (last_insn) != NULL_RTX;
261 last_insn = NEXT_INSN (last_insn))
264 set = single_set (last_insn);
268 if (! rtx_equal_p (SET_DEST (set), target)
269 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
270 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
271 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
274 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
275 besides the last insn. */
276 if (reg_overlap_mentioned_p (target, op0)
277 || (op1 && reg_overlap_mentioned_p (target, op1)))
279 insn = PREV_INSN (last_insn);
280 while (insn != NULL_RTX)
282 if (reg_set_p (target, insn))
285 insn = PREV_INSN (insn);
289 if (GET_RTX_CLASS (code) == RTX_UNARY)
290 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
292 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
294 set_unique_reg_note (last_insn, REG_EQUAL, note);
299 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
300 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
301 not actually do a sign-extend or zero-extend, but can leave the
302 higher-order bits of the result rtx undefined, for example, in the case
303 of logical operations, but not right shifts. */
306 widen_operand (rtx op, enum machine_mode mode, enum machine_mode oldmode,
307 int unsignedp, int no_extend)
311 /* If we don't have to extend and this is a constant, return it. */
312 if (no_extend && GET_MODE (op) == VOIDmode)
315 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
316 extend since it will be more efficient to do so unless the signedness of
317 a promoted object differs from our extension. */
319 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
320 && SUBREG_PROMOTED_UNSIGNED_P (op) == unsignedp))
321 return convert_modes (mode, oldmode, op, unsignedp);
323 /* If MODE is no wider than a single word, we return a paradoxical
325 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
326 return gen_rtx_SUBREG (mode, force_reg (GET_MODE (op), op), 0);
328 /* Otherwise, get an object of MODE, clobber it, and set the low-order
331 result = gen_reg_rtx (mode);
332 emit_clobber (result);
333 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
337 /* Return the optab used for computing the operation given by the tree code,
338 CODE and the tree EXP. This function is not always usable (for example, it
339 cannot give complete results for multiplication or division) but probably
340 ought to be relied on more widely throughout the expander. */
342 optab_for_tree_code (enum tree_code code, const_tree type,
343 enum optab_subtype subtype)
355 return one_cmpl_optab;
364 return TYPE_UNSIGNED (type) ? umod_optab : smod_optab;
372 if (TYPE_SATURATING(type))
373 return TYPE_UNSIGNED(type) ? usdiv_optab : ssdiv_optab;
374 return TYPE_UNSIGNED (type) ? udiv_optab : sdiv_optab;
377 if (VECTOR_MODE_P (TYPE_MODE (type)))
379 if (subtype == optab_vector)
380 return TYPE_SATURATING (type) ? NULL : vashl_optab;
382 gcc_assert (subtype == optab_scalar);
384 if (TYPE_SATURATING(type))
385 return TYPE_UNSIGNED(type) ? usashl_optab : ssashl_optab;
389 if (VECTOR_MODE_P (TYPE_MODE (type)))
391 if (subtype == optab_vector)
392 return TYPE_UNSIGNED (type) ? vlshr_optab : vashr_optab;
394 gcc_assert (subtype == optab_scalar);
396 return TYPE_UNSIGNED (type) ? lshr_optab : ashr_optab;
399 if (VECTOR_MODE_P (TYPE_MODE (type)))
401 if (subtype == optab_vector)
404 gcc_assert (subtype == optab_scalar);
409 if (VECTOR_MODE_P (TYPE_MODE (type)))
411 if (subtype == optab_vector)
414 gcc_assert (subtype == optab_scalar);
419 return TYPE_UNSIGNED (type) ? umax_optab : smax_optab;
422 return TYPE_UNSIGNED (type) ? umin_optab : smin_optab;
424 case REALIGN_LOAD_EXPR:
425 return vec_realign_load_optab;
428 return TYPE_UNSIGNED (type) ? usum_widen_optab : ssum_widen_optab;
431 return TYPE_UNSIGNED (type) ? udot_prod_optab : sdot_prod_optab;
434 return TYPE_UNSIGNED (type) ? reduc_umax_optab : reduc_smax_optab;
437 return TYPE_UNSIGNED (type) ? reduc_umin_optab : reduc_smin_optab;
439 case REDUC_PLUS_EXPR:
440 return TYPE_UNSIGNED (type) ? reduc_uplus_optab : reduc_splus_optab;
442 case VEC_LSHIFT_EXPR:
443 return vec_shl_optab;
445 case VEC_RSHIFT_EXPR:
446 return vec_shr_optab;
448 case VEC_WIDEN_MULT_HI_EXPR:
449 return TYPE_UNSIGNED (type) ?
450 vec_widen_umult_hi_optab : vec_widen_smult_hi_optab;
452 case VEC_WIDEN_MULT_LO_EXPR:
453 return TYPE_UNSIGNED (type) ?
454 vec_widen_umult_lo_optab : vec_widen_smult_lo_optab;
456 case VEC_UNPACK_HI_EXPR:
457 return TYPE_UNSIGNED (type) ?
458 vec_unpacku_hi_optab : vec_unpacks_hi_optab;
460 case VEC_UNPACK_LO_EXPR:
461 return TYPE_UNSIGNED (type) ?
462 vec_unpacku_lo_optab : vec_unpacks_lo_optab;
464 case VEC_UNPACK_FLOAT_HI_EXPR:
465 /* The signedness is determined from input operand. */
466 return TYPE_UNSIGNED (type) ?
467 vec_unpacku_float_hi_optab : vec_unpacks_float_hi_optab;
469 case VEC_UNPACK_FLOAT_LO_EXPR:
470 /* The signedness is determined from input operand. */
471 return TYPE_UNSIGNED (type) ?
472 vec_unpacku_float_lo_optab : vec_unpacks_float_lo_optab;
474 case VEC_PACK_TRUNC_EXPR:
475 return vec_pack_trunc_optab;
477 case VEC_PACK_SAT_EXPR:
478 return TYPE_UNSIGNED (type) ? vec_pack_usat_optab : vec_pack_ssat_optab;
480 case VEC_PACK_FIX_TRUNC_EXPR:
481 /* The signedness is determined from output operand. */
482 return TYPE_UNSIGNED (type) ?
483 vec_pack_ufix_trunc_optab : vec_pack_sfix_trunc_optab;
489 trapv = INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type);
492 case POINTER_PLUS_EXPR:
494 if (TYPE_SATURATING(type))
495 return TYPE_UNSIGNED(type) ? usadd_optab : ssadd_optab;
496 return trapv ? addv_optab : add_optab;
499 if (TYPE_SATURATING(type))
500 return TYPE_UNSIGNED(type) ? ussub_optab : sssub_optab;
501 return trapv ? subv_optab : sub_optab;
504 if (TYPE_SATURATING(type))
505 return TYPE_UNSIGNED(type) ? usmul_optab : ssmul_optab;
506 return trapv ? smulv_optab : smul_optab;
509 if (TYPE_SATURATING(type))
510 return TYPE_UNSIGNED(type) ? usneg_optab : ssneg_optab;
511 return trapv ? negv_optab : neg_optab;
514 return trapv ? absv_optab : abs_optab;
516 case VEC_EXTRACT_EVEN_EXPR:
517 return vec_extract_even_optab;
519 case VEC_EXTRACT_ODD_EXPR:
520 return vec_extract_odd_optab;
522 case VEC_INTERLEAVE_HIGH_EXPR:
523 return vec_interleave_high_optab;
525 case VEC_INTERLEAVE_LOW_EXPR:
526 return vec_interleave_low_optab;
534 /* Expand vector widening operations.
536 There are two different classes of operations handled here:
537 1) Operations whose result is wider than all the arguments to the operation.
538 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
539 In this case OP0 and optionally OP1 would be initialized,
540 but WIDE_OP wouldn't (not relevant for this case).
541 2) Operations whose result is of the same size as the last argument to the
542 operation, but wider than all the other arguments to the operation.
543 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
544 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
546 E.g, when called to expand the following operations, this is how
547 the arguments will be initialized:
549 widening-sum 2 oprnd0 - oprnd1
550 widening-dot-product 3 oprnd0 oprnd1 oprnd2
551 widening-mult 2 oprnd0 oprnd1 -
552 type-promotion (vec-unpack) 1 oprnd0 - - */
555 expand_widen_pattern_expr (tree exp, rtx op0, rtx op1, rtx wide_op, rtx target,
558 tree oprnd0, oprnd1, oprnd2;
559 enum machine_mode wmode = 0, tmode0, tmode1 = 0;
560 optab widen_pattern_optab;
562 enum machine_mode xmode0, xmode1 = 0, wxmode = 0;
565 rtx xop0, xop1, wxop;
566 int nops = TREE_OPERAND_LENGTH (exp);
568 oprnd0 = TREE_OPERAND (exp, 0);
569 tmode0 = TYPE_MODE (TREE_TYPE (oprnd0));
570 widen_pattern_optab =
571 optab_for_tree_code (TREE_CODE (exp), TREE_TYPE (oprnd0), optab_default);
572 icode = (int) optab_handler (widen_pattern_optab, tmode0)->insn_code;
573 gcc_assert (icode != CODE_FOR_nothing);
574 xmode0 = insn_data[icode].operand[1].mode;
578 oprnd1 = TREE_OPERAND (exp, 1);
579 tmode1 = TYPE_MODE (TREE_TYPE (oprnd1));
580 xmode1 = insn_data[icode].operand[2].mode;
583 /* The last operand is of a wider mode than the rest of the operands. */
591 gcc_assert (tmode1 == tmode0);
593 oprnd2 = TREE_OPERAND (exp, 2);
594 wmode = TYPE_MODE (TREE_TYPE (oprnd2));
595 wxmode = insn_data[icode].operand[3].mode;
599 wmode = wxmode = insn_data[icode].operand[0].mode;
602 || ! (*insn_data[icode].operand[0].predicate) (target, wmode))
603 temp = gen_reg_rtx (wmode);
611 /* In case the insn wants input operands in modes different from
612 those of the actual operands, convert the operands. It would
613 seem that we don't need to convert CONST_INTs, but we do, so
614 that they're properly zero-extended, sign-extended or truncated
617 if (GET_MODE (op0) != xmode0 && xmode0 != VOIDmode)
618 xop0 = convert_modes (xmode0,
619 GET_MODE (op0) != VOIDmode
625 if (GET_MODE (op1) != xmode1 && xmode1 != VOIDmode)
626 xop1 = convert_modes (xmode1,
627 GET_MODE (op1) != VOIDmode
633 if (GET_MODE (wide_op) != wxmode && wxmode != VOIDmode)
634 wxop = convert_modes (wxmode,
635 GET_MODE (wide_op) != VOIDmode
640 /* Now, if insn's predicates don't allow our operands, put them into
643 if (! (*insn_data[icode].operand[1].predicate) (xop0, xmode0)
644 && xmode0 != VOIDmode)
645 xop0 = copy_to_mode_reg (xmode0, xop0);
649 if (! (*insn_data[icode].operand[2].predicate) (xop1, xmode1)
650 && xmode1 != VOIDmode)
651 xop1 = copy_to_mode_reg (xmode1, xop1);
655 if (! (*insn_data[icode].operand[3].predicate) (wxop, wxmode)
656 && wxmode != VOIDmode)
657 wxop = copy_to_mode_reg (wxmode, wxop);
659 pat = GEN_FCN (icode) (temp, xop0, xop1, wxop);
662 pat = GEN_FCN (icode) (temp, xop0, xop1);
668 if (! (*insn_data[icode].operand[2].predicate) (wxop, wxmode)
669 && wxmode != VOIDmode)
670 wxop = copy_to_mode_reg (wxmode, wxop);
672 pat = GEN_FCN (icode) (temp, xop0, wxop);
675 pat = GEN_FCN (icode) (temp, xop0);
682 /* Generate code to perform an operation specified by TERNARY_OPTAB
683 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
685 UNSIGNEDP is for the case where we have to widen the operands
686 to perform the operation. It says to use zero-extension.
688 If TARGET is nonzero, the value
689 is generated there, if it is convenient to do so.
690 In all cases an rtx is returned for the locus of the value;
691 this may or may not be TARGET. */
694 expand_ternary_op (enum machine_mode mode, optab ternary_optab, rtx op0,
695 rtx op1, rtx op2, rtx target, int unsignedp)
697 int icode = (int) optab_handler (ternary_optab, mode)->insn_code;
698 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
699 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
700 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
703 rtx xop0 = op0, xop1 = op1, xop2 = op2;
705 gcc_assert (optab_handler (ternary_optab, mode)->insn_code
706 != CODE_FOR_nothing);
708 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
709 temp = gen_reg_rtx (mode);
713 /* In case the insn wants input operands in modes different from
714 those of the actual operands, convert the operands. It would
715 seem that we don't need to convert CONST_INTs, but we do, so
716 that they're properly zero-extended, sign-extended or truncated
719 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
720 xop0 = convert_modes (mode0,
721 GET_MODE (op0) != VOIDmode
726 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
727 xop1 = convert_modes (mode1,
728 GET_MODE (op1) != VOIDmode
733 if (GET_MODE (op2) != mode2 && mode2 != VOIDmode)
734 xop2 = convert_modes (mode2,
735 GET_MODE (op2) != VOIDmode
740 /* Now, if insn's predicates don't allow our operands, put them into
743 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
744 && mode0 != VOIDmode)
745 xop0 = copy_to_mode_reg (mode0, xop0);
747 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
748 && mode1 != VOIDmode)
749 xop1 = copy_to_mode_reg (mode1, xop1);
751 if (!insn_data[icode].operand[3].predicate (xop2, mode2)
752 && mode2 != VOIDmode)
753 xop2 = copy_to_mode_reg (mode2, xop2);
755 pat = GEN_FCN (icode) (temp, xop0, xop1, xop2);
762 /* Like expand_binop, but return a constant rtx if the result can be
763 calculated at compile time. The arguments and return value are
764 otherwise the same as for expand_binop. */
767 simplify_expand_binop (enum machine_mode mode, optab binoptab,
768 rtx op0, rtx op1, rtx target, int unsignedp,
769 enum optab_methods methods)
771 if (CONSTANT_P (op0) && CONSTANT_P (op1))
773 rtx x = simplify_binary_operation (binoptab->code, mode, op0, op1);
779 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
782 /* Like simplify_expand_binop, but always put the result in TARGET.
783 Return true if the expansion succeeded. */
786 force_expand_binop (enum machine_mode mode, optab binoptab,
787 rtx op0, rtx op1, rtx target, int unsignedp,
788 enum optab_methods methods)
790 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
791 target, unsignedp, methods);
795 emit_move_insn (target, x);
799 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
802 expand_vec_shift_expr (tree vec_shift_expr, rtx target)
804 enum insn_code icode;
805 rtx rtx_op1, rtx_op2;
806 enum machine_mode mode1;
807 enum machine_mode mode2;
808 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_shift_expr));
809 tree vec_oprnd = TREE_OPERAND (vec_shift_expr, 0);
810 tree shift_oprnd = TREE_OPERAND (vec_shift_expr, 1);
814 switch (TREE_CODE (vec_shift_expr))
816 case VEC_RSHIFT_EXPR:
817 shift_optab = vec_shr_optab;
819 case VEC_LSHIFT_EXPR:
820 shift_optab = vec_shl_optab;
826 icode = (int) optab_handler (shift_optab, mode)->insn_code;
827 gcc_assert (icode != CODE_FOR_nothing);
829 mode1 = insn_data[icode].operand[1].mode;
830 mode2 = insn_data[icode].operand[2].mode;
832 rtx_op1 = expand_normal (vec_oprnd);
833 if (!(*insn_data[icode].operand[1].predicate) (rtx_op1, mode1)
834 && mode1 != VOIDmode)
835 rtx_op1 = force_reg (mode1, rtx_op1);
837 rtx_op2 = expand_normal (shift_oprnd);
838 if (!(*insn_data[icode].operand[2].predicate) (rtx_op2, mode2)
839 && mode2 != VOIDmode)
840 rtx_op2 = force_reg (mode2, rtx_op2);
843 || ! (*insn_data[icode].operand[0].predicate) (target, mode))
844 target = gen_reg_rtx (mode);
846 /* Emit instruction */
847 pat = GEN_FCN (icode) (target, rtx_op1, rtx_op2);
854 /* This subroutine of expand_doubleword_shift handles the cases in which
855 the effective shift value is >= BITS_PER_WORD. The arguments and return
856 value are the same as for the parent routine, except that SUPERWORD_OP1
857 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
858 INTO_TARGET may be null if the caller has decided to calculate it. */
861 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
862 rtx outof_target, rtx into_target,
863 int unsignedp, enum optab_methods methods)
865 if (into_target != 0)
866 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
867 into_target, unsignedp, methods))
870 if (outof_target != 0)
872 /* For a signed right shift, we must fill OUTOF_TARGET with copies
873 of the sign bit, otherwise we must fill it with zeros. */
874 if (binoptab != ashr_optab)
875 emit_move_insn (outof_target, CONST0_RTX (word_mode));
877 if (!force_expand_binop (word_mode, binoptab,
878 outof_input, GEN_INT (BITS_PER_WORD - 1),
879 outof_target, unsignedp, methods))
885 /* This subroutine of expand_doubleword_shift handles the cases in which
886 the effective shift value is < BITS_PER_WORD. The arguments and return
887 value are the same as for the parent routine. */
890 expand_subword_shift (enum machine_mode op1_mode, optab binoptab,
891 rtx outof_input, rtx into_input, rtx op1,
892 rtx outof_target, rtx into_target,
893 int unsignedp, enum optab_methods methods,
894 unsigned HOST_WIDE_INT shift_mask)
896 optab reverse_unsigned_shift, unsigned_shift;
899 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
900 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
902 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
903 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
904 the opposite direction to BINOPTAB. */
905 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
907 carries = outof_input;
908 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
909 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
914 /* We must avoid shifting by BITS_PER_WORD bits since that is either
915 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
916 has unknown behavior. Do a single shift first, then shift by the
917 remainder. It's OK to use ~OP1 as the remainder if shift counts
918 are truncated to the mode size. */
919 carries = expand_binop (word_mode, reverse_unsigned_shift,
920 outof_input, const1_rtx, 0, unsignedp, methods);
921 if (shift_mask == BITS_PER_WORD - 1)
923 tmp = immed_double_const (-1, -1, op1_mode);
924 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
929 tmp = immed_double_const (BITS_PER_WORD - 1, 0, op1_mode);
930 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
934 if (tmp == 0 || carries == 0)
936 carries = expand_binop (word_mode, reverse_unsigned_shift,
937 carries, tmp, 0, unsignedp, methods);
941 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
942 so the result can go directly into INTO_TARGET if convenient. */
943 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
944 into_target, unsignedp, methods);
948 /* Now OR in the bits carried over from OUTOF_INPUT. */
949 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
950 into_target, unsignedp, methods))
953 /* Use a standard word_mode shift for the out-of half. */
954 if (outof_target != 0)
955 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
956 outof_target, unsignedp, methods))
963 #ifdef HAVE_conditional_move
964 /* Try implementing expand_doubleword_shift using conditional moves.
965 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
966 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
967 are the shift counts to use in the former and latter case. All other
968 arguments are the same as the parent routine. */
971 expand_doubleword_shift_condmove (enum machine_mode op1_mode, optab binoptab,
972 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
973 rtx outof_input, rtx into_input,
974 rtx subword_op1, rtx superword_op1,
975 rtx outof_target, rtx into_target,
976 int unsignedp, enum optab_methods methods,
977 unsigned HOST_WIDE_INT shift_mask)
979 rtx outof_superword, into_superword;
981 /* Put the superword version of the output into OUTOF_SUPERWORD and
983 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
984 if (outof_target != 0 && subword_op1 == superword_op1)
986 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
987 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
988 into_superword = outof_target;
989 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
990 outof_superword, 0, unsignedp, methods))
995 into_superword = gen_reg_rtx (word_mode);
996 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
997 outof_superword, into_superword,
1002 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
1003 if (!expand_subword_shift (op1_mode, binoptab,
1004 outof_input, into_input, subword_op1,
1005 outof_target, into_target,
1006 unsignedp, methods, shift_mask))
1009 /* Select between them. Do the INTO half first because INTO_SUPERWORD
1010 might be the current value of OUTOF_TARGET. */
1011 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
1012 into_target, into_superword, word_mode, false))
1015 if (outof_target != 0)
1016 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
1017 outof_target, outof_superword,
1025 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
1026 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
1027 input operand; the shift moves bits in the direction OUTOF_INPUT->
1028 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
1029 of the target. OP1 is the shift count and OP1_MODE is its mode.
1030 If OP1 is constant, it will have been truncated as appropriate
1031 and is known to be nonzero.
1033 If SHIFT_MASK is zero, the result of word shifts is undefined when the
1034 shift count is outside the range [0, BITS_PER_WORD). This routine must
1035 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
1037 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
1038 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
1039 fill with zeros or sign bits as appropriate.
1041 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
1042 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
1043 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
1044 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
1047 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
1048 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
1049 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
1050 function wants to calculate it itself.
1052 Return true if the shift could be successfully synthesized. */
1055 expand_doubleword_shift (enum machine_mode op1_mode, optab binoptab,
1056 rtx outof_input, rtx into_input, rtx op1,
1057 rtx outof_target, rtx into_target,
1058 int unsignedp, enum optab_methods methods,
1059 unsigned HOST_WIDE_INT shift_mask)
1061 rtx superword_op1, tmp, cmp1, cmp2;
1062 rtx subword_label, done_label;
1063 enum rtx_code cmp_code;
1065 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
1066 fill the result with sign or zero bits as appropriate. If so, the value
1067 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
1068 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
1069 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
1071 This isn't worthwhile for constant shifts since the optimizers will
1072 cope better with in-range shift counts. */
1073 if (shift_mask >= BITS_PER_WORD
1074 && outof_target != 0
1075 && !CONSTANT_P (op1))
1077 if (!expand_doubleword_shift (op1_mode, binoptab,
1078 outof_input, into_input, op1,
1080 unsignedp, methods, shift_mask))
1082 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
1083 outof_target, unsignedp, methods))
1088 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
1089 is true when the effective shift value is less than BITS_PER_WORD.
1090 Set SUPERWORD_OP1 to the shift count that should be used to shift
1091 OUTOF_INPUT into INTO_TARGET when the condition is false. */
1092 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
1093 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
1095 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
1096 is a subword shift count. */
1097 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
1099 cmp2 = CONST0_RTX (op1_mode);
1101 superword_op1 = op1;
1105 /* Set CMP1 to OP1 - BITS_PER_WORD. */
1106 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
1108 cmp2 = CONST0_RTX (op1_mode);
1110 superword_op1 = cmp1;
1115 /* If we can compute the condition at compile time, pick the
1116 appropriate subroutine. */
1117 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
1118 if (tmp != 0 && GET_CODE (tmp) == CONST_INT)
1120 if (tmp == const0_rtx)
1121 return expand_superword_shift (binoptab, outof_input, superword_op1,
1122 outof_target, into_target,
1123 unsignedp, methods);
1125 return expand_subword_shift (op1_mode, binoptab,
1126 outof_input, into_input, op1,
1127 outof_target, into_target,
1128 unsignedp, methods, shift_mask);
1131 #ifdef HAVE_conditional_move
1132 /* Try using conditional moves to generate straight-line code. */
1134 rtx start = get_last_insn ();
1135 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
1136 cmp_code, cmp1, cmp2,
1137 outof_input, into_input,
1139 outof_target, into_target,
1140 unsignedp, methods, shift_mask))
1142 delete_insns_since (start);
1146 /* As a last resort, use branches to select the correct alternative. */
1147 subword_label = gen_label_rtx ();
1148 done_label = gen_label_rtx ();
1151 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
1152 0, 0, subword_label);
1155 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
1156 outof_target, into_target,
1157 unsignedp, methods))
1160 emit_jump_insn (gen_jump (done_label));
1162 emit_label (subword_label);
1164 if (!expand_subword_shift (op1_mode, binoptab,
1165 outof_input, into_input, op1,
1166 outof_target, into_target,
1167 unsignedp, methods, shift_mask))
1170 emit_label (done_label);
1174 /* Subroutine of expand_binop. Perform a double word multiplication of
1175 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
1176 as the target's word_mode. This function return NULL_RTX if anything
1177 goes wrong, in which case it may have already emitted instructions
1178 which need to be deleted.
1180 If we want to multiply two two-word values and have normal and widening
1181 multiplies of single-word values, we can do this with three smaller
1184 The multiplication proceeds as follows:
1185 _______________________
1186 [__op0_high_|__op0_low__]
1187 _______________________
1188 * [__op1_high_|__op1_low__]
1189 _______________________________________________
1190 _______________________
1191 (1) [__op0_low__*__op1_low__]
1192 _______________________
1193 (2a) [__op0_low__*__op1_high_]
1194 _______________________
1195 (2b) [__op0_high_*__op1_low__]
1196 _______________________
1197 (3) [__op0_high_*__op1_high_]
1200 This gives a 4-word result. Since we are only interested in the
1201 lower 2 words, partial result (3) and the upper words of (2a) and
1202 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1203 calculated using non-widening multiplication.
1205 (1), however, needs to be calculated with an unsigned widening
1206 multiplication. If this operation is not directly supported we
1207 try using a signed widening multiplication and adjust the result.
1208 This adjustment works as follows:
1210 If both operands are positive then no adjustment is needed.
1212 If the operands have different signs, for example op0_low < 0 and
1213 op1_low >= 0, the instruction treats the most significant bit of
1214 op0_low as a sign bit instead of a bit with significance
1215 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1216 with 2**BITS_PER_WORD - op0_low, and two's complements the
1217 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1220 Similarly, if both operands are negative, we need to add
1221 (op0_low + op1_low) * 2**BITS_PER_WORD.
1223 We use a trick to adjust quickly. We logically shift op0_low right
1224 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1225 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1226 logical shift exists, we do an arithmetic right shift and subtract
1230 expand_doubleword_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
1231 bool umulp, enum optab_methods methods)
1233 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
1234 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
1235 rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
1236 rtx product, adjust, product_high, temp;
1238 rtx op0_high = operand_subword_force (op0, high, mode);
1239 rtx op0_low = operand_subword_force (op0, low, mode);
1240 rtx op1_high = operand_subword_force (op1, high, mode);
1241 rtx op1_low = operand_subword_force (op1, low, mode);
1243 /* If we're using an unsigned multiply to directly compute the product
1244 of the low-order words of the operands and perform any required
1245 adjustments of the operands, we begin by trying two more multiplications
1246 and then computing the appropriate sum.
1248 We have checked above that the required addition is provided.
1249 Full-word addition will normally always succeed, especially if
1250 it is provided at all, so we don't worry about its failure. The
1251 multiplication may well fail, however, so we do handle that. */
1255 /* ??? This could be done with emit_store_flag where available. */
1256 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
1257 NULL_RTX, 1, methods);
1259 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
1260 NULL_RTX, 0, OPTAB_DIRECT);
1263 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
1264 NULL_RTX, 0, methods);
1267 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
1268 NULL_RTX, 0, OPTAB_DIRECT);
1275 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
1276 NULL_RTX, 0, OPTAB_DIRECT);
1280 /* OP0_HIGH should now be dead. */
1284 /* ??? This could be done with emit_store_flag where available. */
1285 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
1286 NULL_RTX, 1, methods);
1288 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
1289 NULL_RTX, 0, OPTAB_DIRECT);
1292 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
1293 NULL_RTX, 0, methods);
1296 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
1297 NULL_RTX, 0, OPTAB_DIRECT);
1304 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
1305 NULL_RTX, 0, OPTAB_DIRECT);
1309 /* OP1_HIGH should now be dead. */
1311 adjust = expand_binop (word_mode, add_optab, adjust, temp,
1312 adjust, 0, OPTAB_DIRECT);
1314 if (target && !REG_P (target))
1318 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
1319 target, 1, OPTAB_DIRECT);
1321 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
1322 target, 1, OPTAB_DIRECT);
1327 product_high = operand_subword (product, high, 1, mode);
1328 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
1329 REG_P (product_high) ? product_high : adjust,
1331 emit_move_insn (product_high, adjust);
1335 /* Wrapper around expand_binop which takes an rtx code to specify
1336 the operation to perform, not an optab pointer. All other
1337 arguments are the same. */
1339 expand_simple_binop (enum machine_mode mode, enum rtx_code code, rtx op0,
1340 rtx op1, rtx target, int unsignedp,
1341 enum optab_methods methods)
1343 optab binop = code_to_optab[(int) code];
1346 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
1349 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1350 binop. Order them according to commutative_operand_precedence and, if
1351 possible, try to put TARGET or a pseudo first. */
1353 swap_commutative_operands_with_target (rtx target, rtx op0, rtx op1)
1355 int op0_prec = commutative_operand_precedence (op0);
1356 int op1_prec = commutative_operand_precedence (op1);
1358 if (op0_prec < op1_prec)
1361 if (op0_prec > op1_prec)
1364 /* With equal precedence, both orders are ok, but it is better if the
1365 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
1366 if (target == 0 || REG_P (target))
1367 return (REG_P (op1) && !REG_P (op0)) || target == op1;
1369 return rtx_equal_p (op1, target);
1372 /* Return true if BINOPTAB implements a shift operation. */
1375 shift_optab_p (optab binoptab)
1377 switch (binoptab->code)
1393 /* Return true if BINOPTAB implements a commutative binary operation. */
1396 commutative_optab_p (optab binoptab)
1398 return (GET_RTX_CLASS (binoptab->code) == RTX_COMM_ARITH
1399 || binoptab == smul_widen_optab
1400 || binoptab == umul_widen_optab
1401 || binoptab == smul_highpart_optab
1402 || binoptab == umul_highpart_optab);
1405 /* X is to be used in mode MODE as an operand to BINOPTAB. If we're
1406 optimizing, and if the operand is a constant that costs more than
1407 1 instruction, force the constant into a register and return that
1408 register. Return X otherwise. UNSIGNEDP says whether X is unsigned. */
1411 avoid_expensive_constant (enum machine_mode mode, optab binoptab,
1412 rtx x, bool unsignedp)
1414 if (mode != VOIDmode
1417 && rtx_cost (x, binoptab->code, optimize_insn_for_speed_p ())
1418 > COSTS_N_INSNS (1))
1420 if (GET_CODE (x) == CONST_INT)
1422 HOST_WIDE_INT intval = trunc_int_for_mode (INTVAL (x), mode);
1423 if (intval != INTVAL (x))
1424 x = GEN_INT (intval);
1427 x = convert_modes (mode, VOIDmode, x, unsignedp);
1428 x = force_reg (mode, x);
1433 /* Helper function for expand_binop: handle the case where there
1434 is an insn that directly implements the indicated operation.
1435 Returns null if this is not possible. */
1437 expand_binop_directly (enum machine_mode mode, optab binoptab,
1439 rtx target, int unsignedp, enum optab_methods methods,
1442 int icode = (int) optab_handler (binoptab, mode)->insn_code;
1443 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
1444 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
1445 enum machine_mode tmp_mode;
1448 rtx xop0 = op0, xop1 = op1;
1455 temp = gen_reg_rtx (mode);
1457 /* If it is a commutative operator and the modes would match
1458 if we would swap the operands, we can save the conversions. */
1459 commutative_p = commutative_optab_p (binoptab);
1461 && GET_MODE (xop0) != mode0 && GET_MODE (xop1) != mode1
1462 && GET_MODE (xop0) == mode1 && GET_MODE (xop1) == mode1)
1469 /* If we are optimizing, force expensive constants into a register. */
1470 xop0 = avoid_expensive_constant (mode0, binoptab, xop0, unsignedp);
1471 if (!shift_optab_p (binoptab))
1472 xop1 = avoid_expensive_constant (mode1, binoptab, xop1, unsignedp);
1474 /* In case the insn wants input operands in modes different from
1475 those of the actual operands, convert the operands. It would
1476 seem that we don't need to convert CONST_INTs, but we do, so
1477 that they're properly zero-extended, sign-extended or truncated
1480 if (GET_MODE (xop0) != mode0 && mode0 != VOIDmode)
1481 xop0 = convert_modes (mode0,
1482 GET_MODE (xop0) != VOIDmode
1487 if (GET_MODE (xop1) != mode1 && mode1 != VOIDmode)
1488 xop1 = convert_modes (mode1,
1489 GET_MODE (xop1) != VOIDmode
1494 /* If operation is commutative,
1495 try to make the first operand a register.
1496 Even better, try to make it the same as the target.
1497 Also try to make the last operand a constant. */
1499 && swap_commutative_operands_with_target (target, xop0, xop1))
1506 /* Now, if insn's predicates don't allow our operands, put them into
1509 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
1510 && mode0 != VOIDmode)
1511 xop0 = copy_to_mode_reg (mode0, xop0);
1513 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
1514 && mode1 != VOIDmode)
1515 xop1 = copy_to_mode_reg (mode1, xop1);
1517 if (binoptab == vec_pack_trunc_optab
1518 || binoptab == vec_pack_usat_optab
1519 || binoptab == vec_pack_ssat_optab
1520 || binoptab == vec_pack_ufix_trunc_optab
1521 || binoptab == vec_pack_sfix_trunc_optab)
1523 /* The mode of the result is different then the mode of the
1525 tmp_mode = insn_data[icode].operand[0].mode;
1526 if (GET_MODE_NUNITS (tmp_mode) != 2 * GET_MODE_NUNITS (mode))
1532 if (!insn_data[icode].operand[0].predicate (temp, tmp_mode))
1533 temp = gen_reg_rtx (tmp_mode);
1535 pat = GEN_FCN (icode) (temp, xop0, xop1);
1538 /* If PAT is composed of more than one insn, try to add an appropriate
1539 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1540 operand, call expand_binop again, this time without a target. */
1541 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1542 && ! add_equal_note (pat, temp, binoptab->code, xop0, xop1))
1544 delete_insns_since (last);
1545 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1546 unsignedp, methods);
1553 delete_insns_since (last);
1557 /* Generate code to perform an operation specified by BINOPTAB
1558 on operands OP0 and OP1, with result having machine-mode MODE.
1560 UNSIGNEDP is for the case where we have to widen the operands
1561 to perform the operation. It says to use zero-extension.
1563 If TARGET is nonzero, the value
1564 is generated there, if it is convenient to do so.
1565 In all cases an rtx is returned for the locus of the value;
1566 this may or may not be TARGET. */
1569 expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1,
1570 rtx target, int unsignedp, enum optab_methods methods)
1572 enum optab_methods next_methods
1573 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1574 ? OPTAB_WIDEN : methods);
1575 enum mode_class mclass;
1576 enum machine_mode wider_mode;
1579 rtx entry_last = get_last_insn ();
1582 mclass = GET_MODE_CLASS (mode);
1584 /* If subtracting an integer constant, convert this into an addition of
1585 the negated constant. */
1587 if (binoptab == sub_optab && GET_CODE (op1) == CONST_INT)
1589 op1 = negate_rtx (mode, op1);
1590 binoptab = add_optab;
1593 /* Record where to delete back to if we backtrack. */
1594 last = get_last_insn ();
1596 /* If we can do it with a three-operand insn, do so. */
1598 if (methods != OPTAB_MUST_WIDEN
1599 && optab_handler (binoptab, mode)->insn_code != CODE_FOR_nothing)
1601 temp = expand_binop_directly (mode, binoptab, op0, op1, target,
1602 unsignedp, methods, last);
1607 /* If we were trying to rotate, and that didn't work, try rotating
1608 the other direction before falling back to shifts and bitwise-or. */
1609 if (((binoptab == rotl_optab
1610 && optab_handler (rotr_optab, mode)->insn_code != CODE_FOR_nothing)
1611 || (binoptab == rotr_optab
1612 && optab_handler (rotl_optab, mode)->insn_code != CODE_FOR_nothing))
1613 && mclass == MODE_INT)
1615 optab otheroptab = (binoptab == rotl_optab ? rotr_optab : rotl_optab);
1617 unsigned int bits = GET_MODE_BITSIZE (mode);
1619 if (GET_CODE (op1) == CONST_INT)
1620 newop1 = GEN_INT (bits - INTVAL (op1));
1621 else if (targetm.shift_truncation_mask (mode) == bits - 1)
1622 newop1 = negate_rtx (mode, op1);
1624 newop1 = expand_binop (mode, sub_optab,
1625 GEN_INT (bits), op1,
1626 NULL_RTX, unsignedp, OPTAB_DIRECT);
1628 temp = expand_binop_directly (mode, otheroptab, op0, newop1,
1629 target, unsignedp, methods, last);
1634 /* If this is a multiply, see if we can do a widening operation that
1635 takes operands of this mode and makes a wider mode. */
1637 if (binoptab == smul_optab
1638 && GET_MODE_WIDER_MODE (mode) != VOIDmode
1639 && ((optab_handler ((unsignedp ? umul_widen_optab : smul_widen_optab),
1640 GET_MODE_WIDER_MODE (mode))->insn_code)
1641 != CODE_FOR_nothing))
1643 temp = expand_binop (GET_MODE_WIDER_MODE (mode),
1644 unsignedp ? umul_widen_optab : smul_widen_optab,
1645 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1649 if (GET_MODE_CLASS (mode) == MODE_INT
1650 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1651 GET_MODE_BITSIZE (GET_MODE (temp))))
1652 return gen_lowpart (mode, temp);
1654 return convert_to_mode (mode, temp, unsignedp);
1658 /* Look for a wider mode of the same class for which we think we
1659 can open-code the operation. Check for a widening multiply at the
1660 wider mode as well. */
1662 if (CLASS_HAS_WIDER_MODES_P (mclass)
1663 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1664 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1665 wider_mode != VOIDmode;
1666 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1668 if (optab_handler (binoptab, wider_mode)->insn_code != CODE_FOR_nothing
1669 || (binoptab == smul_optab
1670 && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
1671 && ((optab_handler ((unsignedp ? umul_widen_optab
1672 : smul_widen_optab),
1673 GET_MODE_WIDER_MODE (wider_mode))->insn_code)
1674 != CODE_FOR_nothing)))
1676 rtx xop0 = op0, xop1 = op1;
1679 /* For certain integer operations, we need not actually extend
1680 the narrow operands, as long as we will truncate
1681 the results to the same narrowness. */
1683 if ((binoptab == ior_optab || binoptab == and_optab
1684 || binoptab == xor_optab
1685 || binoptab == add_optab || binoptab == sub_optab
1686 || binoptab == smul_optab || binoptab == ashl_optab)
1687 && mclass == MODE_INT)
1690 xop0 = avoid_expensive_constant (mode, binoptab,
1692 if (binoptab != ashl_optab)
1693 xop1 = avoid_expensive_constant (mode, binoptab,
1697 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1699 /* The second operand of a shift must always be extended. */
1700 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1701 no_extend && binoptab != ashl_optab);
1703 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1704 unsignedp, OPTAB_DIRECT);
1707 if (mclass != MODE_INT
1708 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1709 GET_MODE_BITSIZE (wider_mode)))
1712 target = gen_reg_rtx (mode);
1713 convert_move (target, temp, 0);
1717 return gen_lowpart (mode, temp);
1720 delete_insns_since (last);
1724 /* If operation is commutative,
1725 try to make the first operand a register.
1726 Even better, try to make it the same as the target.
1727 Also try to make the last operand a constant. */
1728 if (commutative_optab_p (binoptab)
1729 && swap_commutative_operands_with_target (target, op0, op1))
1736 /* These can be done a word at a time. */
1737 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1738 && mclass == MODE_INT
1739 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
1740 && optab_handler (binoptab, word_mode)->insn_code != CODE_FOR_nothing)
1746 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1747 won't be accurate, so use a new target. */
1748 if (target == 0 || target == op0 || target == op1)
1749 target = gen_reg_rtx (mode);
1753 /* Do the actual arithmetic. */
1754 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
1756 rtx target_piece = operand_subword (target, i, 1, mode);
1757 rtx x = expand_binop (word_mode, binoptab,
1758 operand_subword_force (op0, i, mode),
1759 operand_subword_force (op1, i, mode),
1760 target_piece, unsignedp, next_methods);
1765 if (target_piece != x)
1766 emit_move_insn (target_piece, x);
1769 insns = get_insns ();
1772 if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
1774 if (binoptab->code != UNKNOWN)
1776 = gen_rtx_fmt_ee (binoptab->code, mode,
1777 copy_rtx (op0), copy_rtx (op1));
1786 /* Synthesize double word shifts from single word shifts. */
1787 if ((binoptab == lshr_optab || binoptab == ashl_optab
1788 || binoptab == ashr_optab)
1789 && mclass == MODE_INT
1790 && (GET_CODE (op1) == CONST_INT || optimize_insn_for_speed_p ())
1791 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1792 && optab_handler (binoptab, word_mode)->insn_code != CODE_FOR_nothing
1793 && optab_handler (ashl_optab, word_mode)->insn_code != CODE_FOR_nothing
1794 && optab_handler (lshr_optab, word_mode)->insn_code != CODE_FOR_nothing)
1796 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1797 enum machine_mode op1_mode;
1799 double_shift_mask = targetm.shift_truncation_mask (mode);
1800 shift_mask = targetm.shift_truncation_mask (word_mode);
1801 op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode;
1803 /* Apply the truncation to constant shifts. */
1804 if (double_shift_mask > 0 && GET_CODE (op1) == CONST_INT)
1805 op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
1807 if (op1 == CONST0_RTX (op1_mode))
1810 /* Make sure that this is a combination that expand_doubleword_shift
1811 can handle. See the comments there for details. */
1812 if (double_shift_mask == 0
1813 || (shift_mask == BITS_PER_WORD - 1
1814 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1817 rtx into_target, outof_target;
1818 rtx into_input, outof_input;
1819 int left_shift, outof_word;
1821 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1822 won't be accurate, so use a new target. */
1823 if (target == 0 || target == op0 || target == op1)
1824 target = gen_reg_rtx (mode);
1828 /* OUTOF_* is the word we are shifting bits away from, and
1829 INTO_* is the word that we are shifting bits towards, thus
1830 they differ depending on the direction of the shift and
1831 WORDS_BIG_ENDIAN. */
1833 left_shift = binoptab == ashl_optab;
1834 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1836 outof_target = operand_subword (target, outof_word, 1, mode);
1837 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1839 outof_input = operand_subword_force (op0, outof_word, mode);
1840 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1842 if (expand_doubleword_shift (op1_mode, binoptab,
1843 outof_input, into_input, op1,
1844 outof_target, into_target,
1845 unsignedp, next_methods, shift_mask))
1847 insns = get_insns ();
1857 /* Synthesize double word rotates from single word shifts. */
1858 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1859 && mclass == MODE_INT
1860 && GET_CODE (op1) == CONST_INT
1861 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1862 && optab_handler (ashl_optab, word_mode)->insn_code != CODE_FOR_nothing
1863 && optab_handler (lshr_optab, word_mode)->insn_code != CODE_FOR_nothing)
1866 rtx into_target, outof_target;
1867 rtx into_input, outof_input;
1869 int shift_count, left_shift, outof_word;
1871 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1872 won't be accurate, so use a new target. Do this also if target is not
1873 a REG, first because having a register instead may open optimization
1874 opportunities, and second because if target and op0 happen to be MEMs
1875 designating the same location, we would risk clobbering it too early
1876 in the code sequence we generate below. */
1877 if (target == 0 || target == op0 || target == op1 || ! REG_P (target))
1878 target = gen_reg_rtx (mode);
1882 shift_count = INTVAL (op1);
1884 /* OUTOF_* is the word we are shifting bits away from, and
1885 INTO_* is the word that we are shifting bits towards, thus
1886 they differ depending on the direction of the shift and
1887 WORDS_BIG_ENDIAN. */
1889 left_shift = (binoptab == rotl_optab);
1890 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1892 outof_target = operand_subword (target, outof_word, 1, mode);
1893 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1895 outof_input = operand_subword_force (op0, outof_word, mode);
1896 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1898 if (shift_count == BITS_PER_WORD)
1900 /* This is just a word swap. */
1901 emit_move_insn (outof_target, into_input);
1902 emit_move_insn (into_target, outof_input);
1907 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1908 rtx first_shift_count, second_shift_count;
1909 optab reverse_unsigned_shift, unsigned_shift;
1911 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1912 ? lshr_optab : ashl_optab);
1914 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1915 ? ashl_optab : lshr_optab);
1917 if (shift_count > BITS_PER_WORD)
1919 first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1920 second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1924 first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1925 second_shift_count = GEN_INT (shift_count);
1928 into_temp1 = expand_binop (word_mode, unsigned_shift,
1929 outof_input, first_shift_count,
1930 NULL_RTX, unsignedp, next_methods);
1931 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1932 into_input, second_shift_count,
1933 NULL_RTX, unsignedp, next_methods);
1935 if (into_temp1 != 0 && into_temp2 != 0)
1936 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1937 into_target, unsignedp, next_methods);
1941 if (inter != 0 && inter != into_target)
1942 emit_move_insn (into_target, inter);
1944 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1945 into_input, first_shift_count,
1946 NULL_RTX, unsignedp, next_methods);
1947 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1948 outof_input, second_shift_count,
1949 NULL_RTX, unsignedp, next_methods);
1951 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1952 inter = expand_binop (word_mode, ior_optab,
1953 outof_temp1, outof_temp2,
1954 outof_target, unsignedp, next_methods);
1956 if (inter != 0 && inter != outof_target)
1957 emit_move_insn (outof_target, inter);
1960 insns = get_insns ();
1970 /* These can be done a word at a time by propagating carries. */
1971 if ((binoptab == add_optab || binoptab == sub_optab)
1972 && mclass == MODE_INT
1973 && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
1974 && optab_handler (binoptab, word_mode)->insn_code != CODE_FOR_nothing)
1977 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1978 const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
1979 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1980 rtx xop0, xop1, xtarget;
1982 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1983 value is one of those, use it. Otherwise, use 1 since it is the
1984 one easiest to get. */
1985 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1986 int normalizep = STORE_FLAG_VALUE;
1991 /* Prepare the operands. */
1992 xop0 = force_reg (mode, op0);
1993 xop1 = force_reg (mode, op1);
1995 xtarget = gen_reg_rtx (mode);
1997 if (target == 0 || !REG_P (target))
2000 /* Indicate for flow that the entire target reg is being set. */
2002 emit_clobber (xtarget);
2004 /* Do the actual arithmetic. */
2005 for (i = 0; i < nwords; i++)
2007 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
2008 rtx target_piece = operand_subword (xtarget, index, 1, mode);
2009 rtx op0_piece = operand_subword_force (xop0, index, mode);
2010 rtx op1_piece = operand_subword_force (xop1, index, mode);
2013 /* Main add/subtract of the input operands. */
2014 x = expand_binop (word_mode, binoptab,
2015 op0_piece, op1_piece,
2016 target_piece, unsignedp, next_methods);
2022 /* Store carry from main add/subtract. */
2023 carry_out = gen_reg_rtx (word_mode);
2024 carry_out = emit_store_flag_force (carry_out,
2025 (binoptab == add_optab
2028 word_mode, 1, normalizep);
2035 /* Add/subtract previous carry to main result. */
2036 newx = expand_binop (word_mode,
2037 normalizep == 1 ? binoptab : otheroptab,
2039 NULL_RTX, 1, next_methods);
2043 /* Get out carry from adding/subtracting carry in. */
2044 rtx carry_tmp = gen_reg_rtx (word_mode);
2045 carry_tmp = emit_store_flag_force (carry_tmp,
2046 (binoptab == add_optab
2049 word_mode, 1, normalizep);
2051 /* Logical-ior the two poss. carry together. */
2052 carry_out = expand_binop (word_mode, ior_optab,
2053 carry_out, carry_tmp,
2054 carry_out, 0, next_methods);
2058 emit_move_insn (target_piece, newx);
2062 if (x != target_piece)
2063 emit_move_insn (target_piece, x);
2066 carry_in = carry_out;
2069 if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
2071 if (optab_handler (mov_optab, mode)->insn_code != CODE_FOR_nothing
2072 || ! rtx_equal_p (target, xtarget))
2074 rtx temp = emit_move_insn (target, xtarget);
2076 set_unique_reg_note (temp,
2078 gen_rtx_fmt_ee (binoptab->code, mode,
2089 delete_insns_since (last);
2092 /* Attempt to synthesize double word multiplies using a sequence of word
2093 mode multiplications. We first attempt to generate a sequence using a
2094 more efficient unsigned widening multiply, and if that fails we then
2095 try using a signed widening multiply. */
2097 if (binoptab == smul_optab
2098 && mclass == MODE_INT
2099 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
2100 && optab_handler (smul_optab, word_mode)->insn_code != CODE_FOR_nothing
2101 && optab_handler (add_optab, word_mode)->insn_code != CODE_FOR_nothing)
2103 rtx product = NULL_RTX;
2105 if (optab_handler (umul_widen_optab, mode)->insn_code
2106 != CODE_FOR_nothing)
2108 product = expand_doubleword_mult (mode, op0, op1, target,
2111 delete_insns_since (last);
2114 if (product == NULL_RTX
2115 && optab_handler (smul_widen_optab, mode)->insn_code
2116 != CODE_FOR_nothing)
2118 product = expand_doubleword_mult (mode, op0, op1, target,
2121 delete_insns_since (last);
2124 if (product != NULL_RTX)
2126 if (optab_handler (mov_optab, mode)->insn_code != CODE_FOR_nothing)
2128 temp = emit_move_insn (target ? target : product, product);
2129 set_unique_reg_note (temp,
2131 gen_rtx_fmt_ee (MULT, mode,
2139 /* It can't be open-coded in this mode.
2140 Use a library call if one is available and caller says that's ok. */
2142 libfunc = optab_libfunc (binoptab, mode);
2144 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
2148 enum machine_mode op1_mode = mode;
2153 if (shift_optab_p (binoptab))
2155 op1_mode = targetm.libgcc_shift_count_mode ();
2156 /* Specify unsigned here,
2157 since negative shift counts are meaningless. */
2158 op1x = convert_to_mode (op1_mode, op1, 1);
2161 if (GET_MODE (op0) != VOIDmode
2162 && GET_MODE (op0) != mode)
2163 op0 = convert_to_mode (mode, op0, unsignedp);
2165 /* Pass 1 for NO_QUEUE so we don't lose any increments
2166 if the libcall is cse'd or moved. */
2167 value = emit_library_call_value (libfunc,
2168 NULL_RTX, LCT_CONST, mode, 2,
2169 op0, mode, op1x, op1_mode);
2171 insns = get_insns ();
2174 target = gen_reg_rtx (mode);
2175 emit_libcall_block (insns, target, value,
2176 gen_rtx_fmt_ee (binoptab->code, mode, op0, op1));
2181 delete_insns_since (last);
2183 /* It can't be done in this mode. Can we do it in a wider mode? */
2185 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
2186 || methods == OPTAB_MUST_WIDEN))
2188 /* Caller says, don't even try. */
2189 delete_insns_since (entry_last);
2193 /* Compute the value of METHODS to pass to recursive calls.
2194 Don't allow widening to be tried recursively. */
2196 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
2198 /* Look for a wider mode of the same class for which it appears we can do
2201 if (CLASS_HAS_WIDER_MODES_P (mclass))
2203 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2204 wider_mode != VOIDmode;
2205 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2207 if ((optab_handler (binoptab, wider_mode)->insn_code
2208 != CODE_FOR_nothing)
2209 || (methods == OPTAB_LIB
2210 && optab_libfunc (binoptab, wider_mode)))
2212 rtx xop0 = op0, xop1 = op1;
2215 /* For certain integer operations, we need not actually extend
2216 the narrow operands, as long as we will truncate
2217 the results to the same narrowness. */
2219 if ((binoptab == ior_optab || binoptab == and_optab
2220 || binoptab == xor_optab
2221 || binoptab == add_optab || binoptab == sub_optab
2222 || binoptab == smul_optab || binoptab == ashl_optab)
2223 && mclass == MODE_INT)
2226 xop0 = widen_operand (xop0, wider_mode, mode,
2227 unsignedp, no_extend);
2229 /* The second operand of a shift must always be extended. */
2230 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
2231 no_extend && binoptab != ashl_optab);
2233 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
2234 unsignedp, methods);
2237 if (mclass != MODE_INT
2238 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
2239 GET_MODE_BITSIZE (wider_mode)))
2242 target = gen_reg_rtx (mode);
2243 convert_move (target, temp, 0);
2247 return gen_lowpart (mode, temp);
2250 delete_insns_since (last);
2255 delete_insns_since (entry_last);
2259 /* Expand a binary operator which has both signed and unsigned forms.
2260 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2263 If we widen unsigned operands, we may use a signed wider operation instead
2264 of an unsigned wider operation, since the result would be the same. */
2267 sign_expand_binop (enum machine_mode mode, optab uoptab, optab soptab,
2268 rtx op0, rtx op1, rtx target, int unsignedp,
2269 enum optab_methods methods)
2272 optab direct_optab = unsignedp ? uoptab : soptab;
2273 struct optab wide_soptab;
2275 /* Do it without widening, if possible. */
2276 temp = expand_binop (mode, direct_optab, op0, op1, target,
2277 unsignedp, OPTAB_DIRECT);
2278 if (temp || methods == OPTAB_DIRECT)
2281 /* Try widening to a signed int. Make a fake signed optab that
2282 hides any signed insn for direct use. */
2283 wide_soptab = *soptab;
2284 optab_handler (&wide_soptab, mode)->insn_code = CODE_FOR_nothing;
2285 /* We don't want to generate new hash table entries from this fake
2287 wide_soptab.libcall_gen = NULL;
2289 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2290 unsignedp, OPTAB_WIDEN);
2292 /* For unsigned operands, try widening to an unsigned int. */
2293 if (temp == 0 && unsignedp)
2294 temp = expand_binop (mode, uoptab, op0, op1, target,
2295 unsignedp, OPTAB_WIDEN);
2296 if (temp || methods == OPTAB_WIDEN)
2299 /* Use the right width lib call if that exists. */
2300 temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_LIB);
2301 if (temp || methods == OPTAB_LIB)
2304 /* Must widen and use a lib call, use either signed or unsigned. */
2305 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2306 unsignedp, methods);
2310 return expand_binop (mode, uoptab, op0, op1, target,
2311 unsignedp, methods);
2315 /* Generate code to perform an operation specified by UNOPPTAB
2316 on operand OP0, with two results to TARG0 and TARG1.
2317 We assume that the order of the operands for the instruction
2318 is TARG0, TARG1, OP0.
2320 Either TARG0 or TARG1 may be zero, but what that means is that
2321 the result is not actually wanted. We will generate it into
2322 a dummy pseudo-reg and discard it. They may not both be zero.
2324 Returns 1 if this operation can be performed; 0 if not. */
2327 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
2330 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2331 enum mode_class mclass;
2332 enum machine_mode wider_mode;
2333 rtx entry_last = get_last_insn ();
2336 mclass = GET_MODE_CLASS (mode);
2339 targ0 = gen_reg_rtx (mode);
2341 targ1 = gen_reg_rtx (mode);
2343 /* Record where to go back to if we fail. */
2344 last = get_last_insn ();
2346 if (optab_handler (unoptab, mode)->insn_code != CODE_FOR_nothing)
2348 int icode = (int) optab_handler (unoptab, mode)->insn_code;
2349 enum machine_mode mode0 = insn_data[icode].operand[2].mode;
2353 if (GET_MODE (xop0) != VOIDmode
2354 && GET_MODE (xop0) != mode0)
2355 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2357 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2358 if (!insn_data[icode].operand[2].predicate (xop0, mode0))
2359 xop0 = copy_to_mode_reg (mode0, xop0);
2361 /* We could handle this, but we should always be called with a pseudo
2362 for our targets and all insns should take them as outputs. */
2363 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2364 gcc_assert (insn_data[icode].operand[1].predicate (targ1, mode));
2366 pat = GEN_FCN (icode) (targ0, targ1, xop0);
2373 delete_insns_since (last);
2376 /* It can't be done in this mode. Can we do it in a wider mode? */
2378 if (CLASS_HAS_WIDER_MODES_P (mclass))
2380 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2381 wider_mode != VOIDmode;
2382 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2384 if (optab_handler (unoptab, wider_mode)->insn_code
2385 != CODE_FOR_nothing)
2387 rtx t0 = gen_reg_rtx (wider_mode);
2388 rtx t1 = gen_reg_rtx (wider_mode);
2389 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2391 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
2393 convert_move (targ0, t0, unsignedp);
2394 convert_move (targ1, t1, unsignedp);
2398 delete_insns_since (last);
2403 delete_insns_since (entry_last);
2407 /* Generate code to perform an operation specified by BINOPTAB
2408 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2409 We assume that the order of the operands for the instruction
2410 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2411 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2413 Either TARG0 or TARG1 may be zero, but what that means is that
2414 the result is not actually wanted. We will generate it into
2415 a dummy pseudo-reg and discard it. They may not both be zero.
2417 Returns 1 if this operation can be performed; 0 if not. */
2420 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
2423 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2424 enum mode_class mclass;
2425 enum machine_mode wider_mode;
2426 rtx entry_last = get_last_insn ();
2429 mclass = GET_MODE_CLASS (mode);
2432 targ0 = gen_reg_rtx (mode);
2434 targ1 = gen_reg_rtx (mode);
2436 /* Record where to go back to if we fail. */
2437 last = get_last_insn ();
2439 if (optab_handler (binoptab, mode)->insn_code != CODE_FOR_nothing)
2441 int icode = (int) optab_handler (binoptab, mode)->insn_code;
2442 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2443 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
2445 rtx xop0 = op0, xop1 = op1;
2447 /* If we are optimizing, force expensive constants into a register. */
2448 xop0 = avoid_expensive_constant (mode0, binoptab, xop0, unsignedp);
2449 xop1 = avoid_expensive_constant (mode1, binoptab, xop1, unsignedp);
2451 /* In case the insn wants input operands in modes different from
2452 those of the actual operands, convert the operands. It would
2453 seem that we don't need to convert CONST_INTs, but we do, so
2454 that they're properly zero-extended, sign-extended or truncated
2457 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
2458 xop0 = convert_modes (mode0,
2459 GET_MODE (op0) != VOIDmode
2464 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
2465 xop1 = convert_modes (mode1,
2466 GET_MODE (op1) != VOIDmode
2471 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2472 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2473 xop0 = copy_to_mode_reg (mode0, xop0);
2475 if (!insn_data[icode].operand[2].predicate (xop1, mode1))
2476 xop1 = copy_to_mode_reg (mode1, xop1);
2478 /* We could handle this, but we should always be called with a pseudo
2479 for our targets and all insns should take them as outputs. */
2480 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2481 gcc_assert (insn_data[icode].operand[3].predicate (targ1, mode));
2483 pat = GEN_FCN (icode) (targ0, xop0, xop1, targ1);
2490 delete_insns_since (last);
2493 /* It can't be done in this mode. Can we do it in a wider mode? */
2495 if (CLASS_HAS_WIDER_MODES_P (mclass))
2497 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2498 wider_mode != VOIDmode;
2499 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2501 if (optab_handler (binoptab, wider_mode)->insn_code
2502 != CODE_FOR_nothing)
2504 rtx t0 = gen_reg_rtx (wider_mode);
2505 rtx t1 = gen_reg_rtx (wider_mode);
2506 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2507 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2509 if (expand_twoval_binop (binoptab, cop0, cop1,
2512 convert_move (targ0, t0, unsignedp);
2513 convert_move (targ1, t1, unsignedp);
2517 delete_insns_since (last);
2522 delete_insns_since (entry_last);
2526 /* Expand the two-valued library call indicated by BINOPTAB, but
2527 preserve only one of the values. If TARG0 is non-NULL, the first
2528 value is placed into TARG0; otherwise the second value is placed
2529 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2530 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2531 This routine assumes that the value returned by the library call is
2532 as if the return value was of an integral mode twice as wide as the
2533 mode of OP0. Returns 1 if the call was successful. */
2536 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2537 rtx targ0, rtx targ1, enum rtx_code code)
2539 enum machine_mode mode;
2540 enum machine_mode libval_mode;
2545 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2546 gcc_assert (!targ0 != !targ1);
2548 mode = GET_MODE (op0);
2549 libfunc = optab_libfunc (binoptab, mode);
2553 /* The value returned by the library function will have twice as
2554 many bits as the nominal MODE. */
2555 libval_mode = smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode),
2558 libval = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
2562 /* Get the part of VAL containing the value that we want. */
2563 libval = simplify_gen_subreg (mode, libval, libval_mode,
2564 targ0 ? 0 : GET_MODE_SIZE (mode));
2565 insns = get_insns ();
2567 /* Move the into the desired location. */
2568 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2569 gen_rtx_fmt_ee (code, mode, op0, op1));
2575 /* Wrapper around expand_unop which takes an rtx code to specify
2576 the operation to perform, not an optab pointer. All other
2577 arguments are the same. */
2579 expand_simple_unop (enum machine_mode mode, enum rtx_code code, rtx op0,
2580 rtx target, int unsignedp)
2582 optab unop = code_to_optab[(int) code];
2585 return expand_unop (mode, unop, op0, target, unsignedp);
2591 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2593 widen_clz (enum machine_mode mode, rtx op0, rtx target)
2595 enum mode_class mclass = GET_MODE_CLASS (mode);
2596 if (CLASS_HAS_WIDER_MODES_P (mclass))
2598 enum machine_mode wider_mode;
2599 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2600 wider_mode != VOIDmode;
2601 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2603 if (optab_handler (clz_optab, wider_mode)->insn_code
2604 != CODE_FOR_nothing)
2606 rtx xop0, temp, last;
2608 last = get_last_insn ();
2611 target = gen_reg_rtx (mode);
2612 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2613 temp = expand_unop (wider_mode, clz_optab, xop0, NULL_RTX, true);
2615 temp = expand_binop (wider_mode, sub_optab, temp,
2616 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2617 - GET_MODE_BITSIZE (mode)),
2618 target, true, OPTAB_DIRECT);
2620 delete_insns_since (last);
2629 /* Try calculating clz of a double-word quantity as two clz's of word-sized
2630 quantities, choosing which based on whether the high word is nonzero. */
2632 expand_doubleword_clz (enum machine_mode mode, rtx op0, rtx target)
2634 rtx xop0 = force_reg (mode, op0);
2635 rtx subhi = gen_highpart (word_mode, xop0);
2636 rtx sublo = gen_lowpart (word_mode, xop0);
2637 rtx hi0_label = gen_label_rtx ();
2638 rtx after_label = gen_label_rtx ();
2639 rtx seq, temp, result;
2641 /* If we were not given a target, use a word_mode register, not a
2642 'mode' register. The result will fit, and nobody is expecting
2643 anything bigger (the return type of __builtin_clz* is int). */
2645 target = gen_reg_rtx (word_mode);
2647 /* In any case, write to a word_mode scratch in both branches of the
2648 conditional, so we can ensure there is a single move insn setting
2649 'target' to tag a REG_EQUAL note on. */
2650 result = gen_reg_rtx (word_mode);
2654 /* If the high word is not equal to zero,
2655 then clz of the full value is clz of the high word. */
2656 emit_cmp_and_jump_insns (subhi, CONST0_RTX (word_mode), EQ, 0,
2657 word_mode, true, hi0_label);
2659 temp = expand_unop_direct (word_mode, clz_optab, subhi, result, true);
2664 convert_move (result, temp, true);
2666 emit_jump_insn (gen_jump (after_label));
2669 /* Else clz of the full value is clz of the low word plus the number
2670 of bits in the high word. */
2671 emit_label (hi0_label);
2673 temp = expand_unop_direct (word_mode, clz_optab, sublo, 0, true);
2676 temp = expand_binop (word_mode, add_optab, temp,
2677 GEN_INT (GET_MODE_BITSIZE (word_mode)),
2678 result, true, OPTAB_DIRECT);
2682 convert_move (result, temp, true);
2684 emit_label (after_label);
2685 convert_move (target, result, true);
2690 add_equal_note (seq, target, CLZ, xop0, 0);
2702 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2704 widen_bswap (enum machine_mode mode, rtx op0, rtx target)
2706 enum mode_class mclass = GET_MODE_CLASS (mode);
2707 enum machine_mode wider_mode;
2710 if (!CLASS_HAS_WIDER_MODES_P (mclass))
2713 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2714 wider_mode != VOIDmode;
2715 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2716 if (optab_handler (bswap_optab, wider_mode)->insn_code != CODE_FOR_nothing)
2721 last = get_last_insn ();
2723 x = widen_operand (op0, wider_mode, mode, true, true);
2724 x = expand_unop (wider_mode, bswap_optab, x, NULL_RTX, true);
2727 x = expand_shift (RSHIFT_EXPR, wider_mode, x,
2728 size_int (GET_MODE_BITSIZE (wider_mode)
2729 - GET_MODE_BITSIZE (mode)),
2735 target = gen_reg_rtx (mode);
2736 emit_move_insn (target, gen_lowpart (mode, x));
2739 delete_insns_since (last);
2744 /* Try calculating bswap as two bswaps of two word-sized operands. */
2747 expand_doubleword_bswap (enum machine_mode mode, rtx op, rtx target)
2751 t1 = expand_unop (word_mode, bswap_optab,
2752 operand_subword_force (op, 0, mode), NULL_RTX, true);
2753 t0 = expand_unop (word_mode, bswap_optab,
2754 operand_subword_force (op, 1, mode), NULL_RTX, true);
2757 target = gen_reg_rtx (mode);
2759 emit_clobber (target);
2760 emit_move_insn (operand_subword (target, 0, 1, mode), t0);
2761 emit_move_insn (operand_subword (target, 1, 1, mode), t1);
2766 /* Try calculating (parity x) as (and (popcount x) 1), where
2767 popcount can also be done in a wider mode. */
2769 expand_parity (enum machine_mode mode, rtx op0, rtx target)
2771 enum mode_class mclass = GET_MODE_CLASS (mode);
2772 if (CLASS_HAS_WIDER_MODES_P (mclass))
2774 enum machine_mode wider_mode;
2775 for (wider_mode = mode; wider_mode != VOIDmode;
2776 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2778 if (optab_handler (popcount_optab, wider_mode)->insn_code
2779 != CODE_FOR_nothing)
2781 rtx xop0, temp, last;
2783 last = get_last_insn ();
2786 target = gen_reg_rtx (mode);
2787 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2788 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2791 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2792 target, true, OPTAB_DIRECT);
2794 delete_insns_since (last);
2803 /* Try calculating ctz(x) as K - clz(x & -x) ,
2804 where K is GET_MODE_BITSIZE(mode) - 1.
2806 Both __builtin_ctz and __builtin_clz are undefined at zero, so we
2807 don't have to worry about what the hardware does in that case. (If
2808 the clz instruction produces the usual value at 0, which is K, the
2809 result of this code sequence will be -1; expand_ffs, below, relies
2810 on this. It might be nice to have it be K instead, for consistency
2811 with the (very few) processors that provide a ctz with a defined
2812 value, but that would take one more instruction, and it would be
2813 less convenient for expand_ffs anyway. */
2816 expand_ctz (enum machine_mode mode, rtx op0, rtx target)
2820 if (optab_handler (clz_optab, mode)->insn_code == CODE_FOR_nothing)
2825 temp = expand_unop_direct (mode, neg_optab, op0, NULL_RTX, true);
2827 temp = expand_binop (mode, and_optab, op0, temp, NULL_RTX,
2828 true, OPTAB_DIRECT);
2830 temp = expand_unop_direct (mode, clz_optab, temp, NULL_RTX, true);
2832 temp = expand_binop (mode, sub_optab, GEN_INT (GET_MODE_BITSIZE (mode) - 1),
2834 true, OPTAB_DIRECT);
2844 add_equal_note (seq, temp, CTZ, op0, 0);
2850 /* Try calculating ffs(x) using ctz(x) if we have that instruction, or
2851 else with the sequence used by expand_clz.
2853 The ffs builtin promises to return zero for a zero value and ctz/clz
2854 may have an undefined value in that case. If they do not give us a
2855 convenient value, we have to generate a test and branch. */
2857 expand_ffs (enum machine_mode mode, rtx op0, rtx target)
2859 HOST_WIDE_INT val = 0;
2860 bool defined_at_zero = false;
2863 if (optab_handler (ctz_optab, mode)->insn_code != CODE_FOR_nothing)
2867 temp = expand_unop_direct (mode, ctz_optab, op0, 0, true);
2871 defined_at_zero = (CTZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2);
2873 else if (optab_handler (clz_optab, mode)->insn_code != CODE_FOR_nothing)
2876 temp = expand_ctz (mode, op0, 0);
2880 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2)
2882 defined_at_zero = true;
2883 val = (GET_MODE_BITSIZE (mode) - 1) - val;
2889 if (defined_at_zero && val == -1)
2890 /* No correction needed at zero. */;
2893 /* We don't try to do anything clever with the situation found
2894 on some processors (eg Alpha) where ctz(0:mode) ==
2895 bitsize(mode). If someone can think of a way to send N to -1
2896 and leave alone all values in the range 0..N-1 (where N is a
2897 power of two), cheaper than this test-and-branch, please add it.
2899 The test-and-branch is done after the operation itself, in case
2900 the operation sets condition codes that can be recycled for this.
2901 (This is true on i386, for instance.) */
2903 rtx nonzero_label = gen_label_rtx ();
2904 emit_cmp_and_jump_insns (op0, CONST0_RTX (mode), NE, 0,
2905 mode, true, nonzero_label);
2907 convert_move (temp, GEN_INT (-1), false);
2908 emit_label (nonzero_label);
2911 /* temp now has a value in the range -1..bitsize-1. ffs is supposed
2912 to produce a value in the range 0..bitsize. */
2913 temp = expand_binop (mode, add_optab, temp, GEN_INT (1),
2914 target, false, OPTAB_DIRECT);
2921 add_equal_note (seq, temp, FFS, op0, 0);
2930 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2931 conditions, VAL may already be a SUBREG against which we cannot generate
2932 a further SUBREG. In this case, we expect forcing the value into a
2933 register will work around the situation. */
2936 lowpart_subreg_maybe_copy (enum machine_mode omode, rtx val,
2937 enum machine_mode imode)
2940 ret = lowpart_subreg (omode, val, imode);
2943 val = force_reg (imode, val);
2944 ret = lowpart_subreg (omode, val, imode);
2945 gcc_assert (ret != NULL);
2950 /* Expand a floating point absolute value or negation operation via a
2951 logical operation on the sign bit. */
2954 expand_absneg_bit (enum rtx_code code, enum machine_mode mode,
2955 rtx op0, rtx target)
2957 const struct real_format *fmt;
2958 int bitpos, word, nwords, i;
2959 enum machine_mode imode;
2960 HOST_WIDE_INT hi, lo;
2963 /* The format has to have a simple sign bit. */
2964 fmt = REAL_MODE_FORMAT (mode);
2968 bitpos = fmt->signbit_rw;
2972 /* Don't create negative zeros if the format doesn't support them. */
2973 if (code == NEG && !fmt->has_signed_zero)
2976 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2978 imode = int_mode_for_mode (mode);
2979 if (imode == BLKmode)
2988 if (FLOAT_WORDS_BIG_ENDIAN)
2989 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2991 word = bitpos / BITS_PER_WORD;
2992 bitpos = bitpos % BITS_PER_WORD;
2993 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2996 if (bitpos < HOST_BITS_PER_WIDE_INT)
2999 lo = (HOST_WIDE_INT) 1 << bitpos;
3003 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
3009 if (target == 0 || target == op0)
3010 target = gen_reg_rtx (mode);
3016 for (i = 0; i < nwords; ++i)
3018 rtx targ_piece = operand_subword (target, i, 1, mode);
3019 rtx op0_piece = operand_subword_force (op0, i, mode);
3023 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
3025 immed_double_const (lo, hi, imode),
3026 targ_piece, 1, OPTAB_LIB_WIDEN);
3027 if (temp != targ_piece)
3028 emit_move_insn (targ_piece, temp);
3031 emit_move_insn (targ_piece, op0_piece);
3034 insns = get_insns ();
3041 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
3042 gen_lowpart (imode, op0),
3043 immed_double_const (lo, hi, imode),
3044 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3045 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3047 set_unique_reg_note (get_last_insn (), REG_EQUAL,
3048 gen_rtx_fmt_e (code, mode, copy_rtx (op0)));
3054 /* As expand_unop, but will fail rather than attempt the operation in a
3055 different mode or with a libcall. */
3057 expand_unop_direct (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
3060 if (optab_handler (unoptab, mode)->insn_code != CODE_FOR_nothing)
3062 int icode = (int) optab_handler (unoptab, mode)->insn_code;
3063 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
3065 rtx last = get_last_insn ();
3071 temp = gen_reg_rtx (mode);
3073 if (GET_MODE (xop0) != VOIDmode
3074 && GET_MODE (xop0) != mode0)
3075 xop0 = convert_to_mode (mode0, xop0, unsignedp);
3077 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
3079 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
3080 xop0 = copy_to_mode_reg (mode0, xop0);
3082 if (!insn_data[icode].operand[0].predicate (temp, mode))
3083 temp = gen_reg_rtx (mode);
3085 pat = GEN_FCN (icode) (temp, xop0);
3088 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
3089 && ! add_equal_note (pat, temp, unoptab->code, xop0, NULL_RTX))
3091 delete_insns_since (last);
3092 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
3100 delete_insns_since (last);
3105 /* Generate code to perform an operation specified by UNOPTAB
3106 on operand OP0, with result having machine-mode MODE.
3108 UNSIGNEDP is for the case where we have to widen the operands
3109 to perform the operation. It says to use zero-extension.
3111 If TARGET is nonzero, the value
3112 is generated there, if it is convenient to do so.
3113 In all cases an rtx is returned for the locus of the value;
3114 this may or may not be TARGET. */
3117 expand_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
3120 enum mode_class mclass = GET_MODE_CLASS (mode);
3121 enum machine_mode wider_mode;
3125 temp = expand_unop_direct (mode, unoptab, op0, target, unsignedp);
3129 /* It can't be done in this mode. Can we open-code it in a wider mode? */
3131 /* Widening (or narrowing) clz needs special treatment. */
3132 if (unoptab == clz_optab)
3134 temp = widen_clz (mode, op0, target);
3138 if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
3139 && optab_handler (unoptab, word_mode)->insn_code != CODE_FOR_nothing)
3141 temp = expand_doubleword_clz (mode, op0, target);
3149 /* Widening (or narrowing) bswap needs special treatment. */
3150 if (unoptab == bswap_optab)
3152 temp = widen_bswap (mode, op0, target);
3156 if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
3157 && optab_handler (unoptab, word_mode)->insn_code != CODE_FOR_nothing)
3159 temp = expand_doubleword_bswap (mode, op0, target);
3167 if (CLASS_HAS_WIDER_MODES_P (mclass))
3168 for (wider_mode = GET_MODE_WIDER_MODE (mode);
3169 wider_mode != VOIDmode;
3170 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
3172 if (optab_handler (unoptab, wider_mode)->insn_code != CODE_FOR_nothing)
3175 rtx last = get_last_insn ();
3177 /* For certain operations, we need not actually extend
3178 the narrow operand, as long as we will truncate the
3179 results to the same narrowness. */
3181 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
3182 (unoptab == neg_optab
3183 || unoptab == one_cmpl_optab)
3184 && mclass == MODE_INT);
3186 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
3191 if (mclass != MODE_INT
3192 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
3193 GET_MODE_BITSIZE (wider_mode)))
3196 target = gen_reg_rtx (mode);
3197 convert_move (target, temp, 0);
3201 return gen_lowpart (mode, temp);
3204 delete_insns_since (last);
3208 /* These can be done a word at a time. */
3209 if (unoptab == one_cmpl_optab
3210 && mclass == MODE_INT
3211 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
3212 && optab_handler (unoptab, word_mode)->insn_code != CODE_FOR_nothing)
3217 if (target == 0 || target == op0)
3218 target = gen_reg_rtx (mode);
3222 /* Do the actual arithmetic. */
3223 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
3225 rtx target_piece = operand_subword (target, i, 1, mode);
3226 rtx x = expand_unop (word_mode, unoptab,
3227 operand_subword_force (op0, i, mode),
3228 target_piece, unsignedp);
3230 if (target_piece != x)
3231 emit_move_insn (target_piece, x);
3234 insns = get_insns ();
3241 if (unoptab->code == NEG)
3243 /* Try negating floating point values by flipping the sign bit. */
3244 if (SCALAR_FLOAT_MODE_P (mode))
3246 temp = expand_absneg_bit (NEG, mode, op0, target);
3251 /* If there is no negation pattern, and we have no negative zero,
3252 try subtracting from zero. */
3253 if (!HONOR_SIGNED_ZEROS (mode))
3255 temp = expand_binop (mode, (unoptab == negv_optab
3256 ? subv_optab : sub_optab),
3257 CONST0_RTX (mode), op0, target,
3258 unsignedp, OPTAB_DIRECT);
3264 /* Try calculating parity (x) as popcount (x) % 2. */
3265 if (unoptab == parity_optab)
3267 temp = expand_parity (mode, op0, target);
3272 /* Try implementing ffs (x) in terms of clz (x). */
3273 if (unoptab == ffs_optab)
3275 temp = expand_ffs (mode, op0, target);
3280 /* Try implementing ctz (x) in terms of clz (x). */
3281 if (unoptab == ctz_optab)
3283 temp = expand_ctz (mode, op0, target);
3289 /* Now try a library call in this mode. */
3290 libfunc = optab_libfunc (unoptab, mode);
3296 enum machine_mode outmode = mode;
3298 /* All of these functions return small values. Thus we choose to
3299 have them return something that isn't a double-word. */
3300 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
3301 || unoptab == popcount_optab || unoptab == parity_optab)
3303 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node)));
3307 /* Pass 1 for NO_QUEUE so we don't lose any increments
3308 if the libcall is cse'd or moved. */
3309 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, outmode,
3311 insns = get_insns ();
3314 target = gen_reg_rtx (outmode);
3315 eq_value = gen_rtx_fmt_e (unoptab->code, mode, op0);
3316 if (GET_MODE_SIZE (outmode) < GET_MODE_SIZE (mode))
3317 eq_value = simplify_gen_unary (TRUNCATE, outmode, eq_value, mode);
3318 else if (GET_MODE_SIZE (outmode) > GET_MODE_SIZE (mode))
3319 eq_value = simplify_gen_unary (ZERO_EXTEND, outmode, eq_value, mode);
3320 emit_libcall_block (insns, target, value, eq_value);
3325 /* It can't be done in this mode. Can we do it in a wider mode? */
3327 if (CLASS_HAS_WIDER_MODES_P (mclass))
3329 for (wider_mode = GET_MODE_WIDER_MODE (mode);
3330 wider_mode != VOIDmode;
3331 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
3333 if ((optab_handler (unoptab, wider_mode)->insn_code
3334 != CODE_FOR_nothing)
3335 || optab_libfunc (unoptab, wider_mode))
3338 rtx last = get_last_insn ();
3340 /* For certain operations, we need not actually extend
3341 the narrow operand, as long as we will truncate the
3342 results to the same narrowness. */
3344 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
3345 (unoptab == neg_optab
3346 || unoptab == one_cmpl_optab)
3347 && mclass == MODE_INT);
3349 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
3352 /* If we are generating clz using wider mode, adjust the
3354 if (unoptab == clz_optab && temp != 0)
3355 temp = expand_binop (wider_mode, sub_optab, temp,
3356 GEN_INT (GET_MODE_BITSIZE (wider_mode)
3357 - GET_MODE_BITSIZE (mode)),
3358 target, true, OPTAB_DIRECT);
3362 if (mclass != MODE_INT)
3365 target = gen_reg_rtx (mode);
3366 convert_move (target, temp, 0);
3370 return gen_lowpart (mode, temp);
3373 delete_insns_since (last);
3378 /* One final attempt at implementing negation via subtraction,
3379 this time allowing widening of the operand. */
3380 if (unoptab->code == NEG && !HONOR_SIGNED_ZEROS (mode))
3383 temp = expand_binop (mode,
3384 unoptab == negv_optab ? subv_optab : sub_optab,
3385 CONST0_RTX (mode), op0,
3386 target, unsignedp, OPTAB_LIB_WIDEN);
3394 /* Emit code to compute the absolute value of OP0, with result to
3395 TARGET if convenient. (TARGET may be 0.) The return value says
3396 where the result actually is to be found.
3398 MODE is the mode of the operand; the mode of the result is
3399 different but can be deduced from MODE.
3404 expand_abs_nojump (enum machine_mode mode, rtx op0, rtx target,
3405 int result_unsignedp)
3410 result_unsignedp = 1;
3412 /* First try to do it with a special abs instruction. */
3413 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
3418 /* For floating point modes, try clearing the sign bit. */
3419 if (SCALAR_FLOAT_MODE_P (mode))
3421 temp = expand_absneg_bit (ABS, mode, op0, target);
3426 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3427 if (optab_handler (smax_optab, mode)->insn_code != CODE_FOR_nothing
3428 && !HONOR_SIGNED_ZEROS (mode))
3430 rtx last = get_last_insn ();
3432 temp = expand_unop (mode, neg_optab, op0, NULL_RTX, 0);
3434 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3440 delete_insns_since (last);
3443 /* If this machine has expensive jumps, we can do integer absolute
3444 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3445 where W is the width of MODE. */
3447 if (GET_MODE_CLASS (mode) == MODE_INT
3448 && BRANCH_COST (optimize_insn_for_speed_p (),
3451 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
3452 size_int (GET_MODE_BITSIZE (mode) - 1),
3455 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
3458 temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab,
3459 temp, extended, target, 0, OPTAB_LIB_WIDEN);
3469 expand_abs (enum machine_mode mode, rtx op0, rtx target,
3470 int result_unsignedp, int safe)
3475 result_unsignedp = 1;
3477 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
3481 /* If that does not win, use conditional jump and negate. */
3483 /* It is safe to use the target if it is the same
3484 as the source if this is also a pseudo register */
3485 if (op0 == target && REG_P (op0)
3486 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
3489 op1 = gen_label_rtx ();
3490 if (target == 0 || ! safe
3491 || GET_MODE (target) != mode
3492 || (MEM_P (target) && MEM_VOLATILE_P (target))
3494 && REGNO (target) < FIRST_PSEUDO_REGISTER))
3495 target = gen_reg_rtx (mode);
3497 emit_move_insn (target, op0);
3500 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
3501 NULL_RTX, NULL_RTX, op1);
3503 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3506 emit_move_insn (target, op0);
3512 /* A subroutine of expand_copysign, perform the copysign operation using the
3513 abs and neg primitives advertised to exist on the target. The assumption
3514 is that we have a split register file, and leaving op0 in fp registers,
3515 and not playing with subregs so much, will help the register allocator. */
3518 expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3519 int bitpos, bool op0_is_abs)
3521 enum machine_mode imode;
3528 /* Check if the back end provides an insn that handles signbit for the
3530 icode = (int) signbit_optab->handlers [(int) mode].insn_code;
3531 if (icode != CODE_FOR_nothing)
3533 imode = insn_data[icode].operand[0].mode;
3534 sign = gen_reg_rtx (imode);
3535 emit_unop_insn (icode, sign, op1, UNKNOWN);
3539 HOST_WIDE_INT hi, lo;
3541 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3543 imode = int_mode_for_mode (mode);
3544 if (imode == BLKmode)
3546 op1 = gen_lowpart (imode, op1);
3553 if (FLOAT_WORDS_BIG_ENDIAN)
3554 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3556 word = bitpos / BITS_PER_WORD;
3557 bitpos = bitpos % BITS_PER_WORD;
3558 op1 = operand_subword_force (op1, word, mode);
3561 if (bitpos < HOST_BITS_PER_WIDE_INT)
3564 lo = (HOST_WIDE_INT) 1 << bitpos;
3568 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
3572 sign = gen_reg_rtx (imode);
3573 sign = expand_binop (imode, and_optab, op1,
3574 immed_double_const (lo, hi, imode),
3575 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3580 op0 = expand_unop (mode, abs_optab, op0, target, 0);
3587 if (target == NULL_RTX)
3588 target = copy_to_reg (op0);
3590 emit_move_insn (target, op0);
3593 label = gen_label_rtx ();
3594 emit_cmp_and_jump_insns (sign, const0_rtx, EQ, NULL_RTX, imode, 1, label);
3596 if (GET_CODE (op0) == CONST_DOUBLE)
3597 op0 = simplify_unary_operation (NEG, mode, op0, mode);
3599 op0 = expand_unop (mode, neg_optab, op0, target, 0);
3601 emit_move_insn (target, op0);
3609 /* A subroutine of expand_copysign, perform the entire copysign operation
3610 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3611 is true if op0 is known to have its sign bit clear. */
3614 expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3615 int bitpos, bool op0_is_abs)
3617 enum machine_mode imode;
3618 HOST_WIDE_INT hi, lo;
3619 int word, nwords, i;
3622 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3624 imode = int_mode_for_mode (mode);
3625 if (imode == BLKmode)
3634 if (FLOAT_WORDS_BIG_ENDIAN)
3635 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3637 word = bitpos / BITS_PER_WORD;
3638 bitpos = bitpos % BITS_PER_WORD;
3639 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
3642 if (bitpos < HOST_BITS_PER_WIDE_INT)
3645 lo = (HOST_WIDE_INT) 1 << bitpos;
3649 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
3653 if (target == 0 || target == op0 || target == op1)
3654 target = gen_reg_rtx (mode);
3660 for (i = 0; i < nwords; ++i)
3662 rtx targ_piece = operand_subword (target, i, 1, mode);
3663 rtx op0_piece = operand_subword_force (op0, i, mode);
3668 op0_piece = expand_binop (imode, and_optab, op0_piece,
3669 immed_double_const (~lo, ~hi, imode),
3670 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3672 op1 = expand_binop (imode, and_optab,
3673 operand_subword_force (op1, i, mode),
3674 immed_double_const (lo, hi, imode),
3675 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3677 temp = expand_binop (imode, ior_optab, op0_piece, op1,
3678 targ_piece, 1, OPTAB_LIB_WIDEN);
3679 if (temp != targ_piece)
3680 emit_move_insn (targ_piece, temp);
3683 emit_move_insn (targ_piece, op0_piece);
3686 insns = get_insns ();
3693 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
3694 immed_double_const (lo, hi, imode),
3695 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3697 op0 = gen_lowpart (imode, op0);
3699 op0 = expand_binop (imode, and_optab, op0,
3700 immed_double_const (~lo, ~hi, imode),
3701 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3703 temp = expand_binop (imode, ior_optab, op0, op1,
3704 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3705 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3711 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3712 scalar floating point mode. Return NULL if we do not know how to
3713 expand the operation inline. */
3716 expand_copysign (rtx op0, rtx op1, rtx target)
3718 enum machine_mode mode = GET_MODE (op0);
3719 const struct real_format *fmt;
3723 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3724 gcc_assert (GET_MODE (op1) == mode);
3726 /* First try to do it with a special instruction. */
3727 temp = expand_binop (mode, copysign_optab, op0, op1,
3728 target, 0, OPTAB_DIRECT);
3732 fmt = REAL_MODE_FORMAT (mode);
3733 if (fmt == NULL || !fmt->has_signed_zero)
3737 if (GET_CODE (op0) == CONST_DOUBLE)
3739 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
3740 op0 = simplify_unary_operation (ABS, mode, op0, mode);
3744 if (fmt->signbit_ro >= 0
3745 && (GET_CODE (op0) == CONST_DOUBLE
3746 || (optab_handler (neg_optab, mode)->insn_code != CODE_FOR_nothing
3747 && optab_handler (abs_optab, mode)->insn_code != CODE_FOR_nothing)))
3749 temp = expand_copysign_absneg (mode, op0, op1, target,
3750 fmt->signbit_ro, op0_is_abs);
3755 if (fmt->signbit_rw < 0)
3757 return expand_copysign_bit (mode, op0, op1, target,
3758 fmt->signbit_rw, op0_is_abs);
3761 /* Generate an instruction whose insn-code is INSN_CODE,
3762 with two operands: an output TARGET and an input OP0.
3763 TARGET *must* be nonzero, and the output is always stored there.
3764 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3765 the value that is stored into TARGET.
3767 Return false if expansion failed. */
3770 maybe_emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code)
3773 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
3775 rtx last = get_last_insn ();
3779 /* Now, if insn does not accept our operands, put them into pseudos. */
3781 if (!insn_data[icode].operand[1].predicate (op0, mode0))
3782 op0 = copy_to_mode_reg (mode0, op0);
3784 if (!insn_data[icode].operand[0].predicate (temp, GET_MODE (temp)))
3785 temp = gen_reg_rtx (GET_MODE (temp));
3787 pat = GEN_FCN (icode) (temp, op0);
3790 delete_insns_since (last);
3794 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && code != UNKNOWN)
3795 add_equal_note (pat, temp, code, op0, NULL_RTX);
3800 emit_move_insn (target, temp);
3803 /* Generate an instruction whose insn-code is INSN_CODE,
3804 with two operands: an output TARGET and an input OP0.
3805 TARGET *must* be nonzero, and the output is always stored there.
3806 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3807 the value that is stored into TARGET. */
3810 emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code)
3812 bool ok = maybe_emit_unop_insn (icode, target, op0, code);
3816 struct no_conflict_data
3818 rtx target, first, insn;
3822 /* Called via note_stores by emit_libcall_block. Set P->must_stay if
3823 the currently examined clobber / store has to stay in the list of
3824 insns that constitute the actual libcall block. */
3826 no_conflict_move_test (rtx dest, const_rtx set, void *p0)
3828 struct no_conflict_data *p= (struct no_conflict_data *) p0;
3830 /* If this inns directly contributes to setting the target, it must stay. */
3831 if (reg_overlap_mentioned_p (p->target, dest))
3832 p->must_stay = true;
3833 /* If we haven't committed to keeping any other insns in the list yet,
3834 there is nothing more to check. */
3835 else if (p->insn == p->first)
3837 /* If this insn sets / clobbers a register that feeds one of the insns
3838 already in the list, this insn has to stay too. */
3839 else if (reg_overlap_mentioned_p (dest, PATTERN (p->first))
3840 || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest)))
3841 || reg_used_between_p (dest, p->first, p->insn)
3842 /* Likewise if this insn depends on a register set by a previous
3843 insn in the list, or if it sets a result (presumably a hard
3844 register) that is set or clobbered by a previous insn.
3845 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3846 SET_DEST perform the former check on the address, and the latter
3847 check on the MEM. */
3848 || (GET_CODE (set) == SET
3849 && (modified_in_p (SET_SRC (set), p->first)
3850 || modified_in_p (SET_DEST (set), p->first)
3851 || modified_between_p (SET_SRC (set), p->first, p->insn)
3852 || modified_between_p (SET_DEST (set), p->first, p->insn))))
3853 p->must_stay = true;
3857 /* Emit code to make a call to a constant function or a library call.
3859 INSNS is a list containing all insns emitted in the call.
3860 These insns leave the result in RESULT. Our block is to copy RESULT
3861 to TARGET, which is logically equivalent to EQUIV.
3863 We first emit any insns that set a pseudo on the assumption that these are
3864 loading constants into registers; doing so allows them to be safely cse'ed
3865 between blocks. Then we emit all the other insns in the block, followed by
3866 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3867 note with an operand of EQUIV. */
3870 emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv)
3872 rtx final_dest = target;
3873 rtx prev, next, last, insn;
3875 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3876 into a MEM later. Protect the libcall block from this change. */
3877 if (! REG_P (target) || REG_USERVAR_P (target))
3878 target = gen_reg_rtx (GET_MODE (target));
3880 /* If we're using non-call exceptions, a libcall corresponding to an
3881 operation that may trap may also trap. */
3882 if (flag_non_call_exceptions && may_trap_p (equiv))
3884 for (insn = insns; insn; insn = NEXT_INSN (insn))
3887 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3889 if (note != 0 && INTVAL (XEXP (note, 0)) <= 0)
3890 remove_note (insn, note);
3894 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3895 reg note to indicate that this call cannot throw or execute a nonlocal
3896 goto (unless there is already a REG_EH_REGION note, in which case
3898 for (insn = insns; insn; insn = NEXT_INSN (insn))
3901 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3904 XEXP (note, 0) = constm1_rtx;
3906 add_reg_note (insn, REG_EH_REGION, constm1_rtx);
3909 /* First emit all insns that set pseudos. Remove them from the list as
3910 we go. Avoid insns that set pseudos which were referenced in previous
3911 insns. These can be generated by move_by_pieces, for example,
3912 to update an address. Similarly, avoid insns that reference things
3913 set in previous insns. */
3915 for (insn = insns; insn; insn = next)
3917 rtx set = single_set (insn);
3919 next = NEXT_INSN (insn);
3921 if (set != 0 && REG_P (SET_DEST (set))
3922 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3924 struct no_conflict_data data;
3926 data.target = const0_rtx;
3930 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3931 if (! data.must_stay)
3933 if (PREV_INSN (insn))
3934 NEXT_INSN (PREV_INSN (insn)) = next;
3939 PREV_INSN (next) = PREV_INSN (insn);
3945 /* Some ports use a loop to copy large arguments onto the stack.
3946 Don't move anything outside such a loop. */
3951 prev = get_last_insn ();
3953 /* Write the remaining insns followed by the final copy. */
3955 for (insn = insns; insn; insn = next)
3957 next = NEXT_INSN (insn);
3962 last = emit_move_insn (target, result);
3963 if (optab_handler (mov_optab, GET_MODE (target))->insn_code
3964 != CODE_FOR_nothing)
3965 set_unique_reg_note (last, REG_EQUAL, copy_rtx (equiv));
3967 if (final_dest != target)
3968 emit_move_insn (final_dest, target);
3971 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3972 PURPOSE describes how this comparison will be used. CODE is the rtx
3973 comparison code we will be using.
3975 ??? Actually, CODE is slightly weaker than that. A target is still
3976 required to implement all of the normal bcc operations, but not
3977 required to implement all (or any) of the unordered bcc operations. */
3980 can_compare_p (enum rtx_code code, enum machine_mode mode,
3981 enum can_compare_purpose purpose)
3984 test = gen_rtx_fmt_ee (code, mode, const0_rtx, const0_rtx);
3989 if (optab_handler (cmp_optab, mode)->insn_code != CODE_FOR_nothing)
3991 if (purpose == ccp_jump)
3992 return bcc_gen_fctn[(int) code] != NULL;
3993 else if (purpose == ccp_store_flag)
3994 return setcc_gen_code[(int) code] != CODE_FOR_nothing;
3996 /* There's only one cmov entry point, and it's allowed to fail. */
3999 if (purpose == ccp_jump
4000 && (icode = optab_handler (cbranch_optab, mode)->insn_code) != CODE_FOR_nothing
4001 && insn_data[icode].operand[0].predicate (test, mode))
4003 if (purpose == ccp_store_flag
4004 && (icode = optab_handler (cstore_optab, mode)->insn_code) != CODE_FOR_nothing
4005 && insn_data[icode].operand[1].predicate (test, mode))
4007 if (purpose == ccp_cmov
4008 && optab_handler (cmov_optab, mode)->insn_code != CODE_FOR_nothing)
4011 mode = GET_MODE_WIDER_MODE (mode);
4012 PUT_MODE (test, mode);
4014 while (mode != VOIDmode);
4019 /* This function is called when we are going to emit a compare instruction that
4020 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
4022 *PMODE is the mode of the inputs (in case they are const_int).
4023 *PUNSIGNEDP nonzero says that the operands are unsigned;
4024 this matters if they need to be widened.
4026 If they have mode BLKmode, then SIZE specifies the size of both operands.
4028 This function performs all the setup necessary so that the caller only has
4029 to emit a single comparison insn. This setup can involve doing a BLKmode
4030 comparison or emitting a library call to perform the comparison if no insn
4031 is available to handle it.
4032 The values which are passed in through pointers can be modified; the caller
4033 should perform the comparison on the modified values. Constant
4034 comparisons must have already been folded. */
4037 prepare_cmp_insn (rtx *px, rtx *py, enum rtx_code *pcomparison, rtx size,
4038 enum machine_mode *pmode, int *punsignedp,
4039 enum can_compare_purpose purpose)
4041 enum machine_mode mode = *pmode;
4042 rtx x = *px, y = *py;
4043 int unsignedp = *punsignedp;
4046 /* If we are inside an appropriately-short loop and we are optimizing,
4047 force expensive constants into a register. */
4048 if (CONSTANT_P (x) && optimize
4049 && (rtx_cost (x, COMPARE, optimize_insn_for_speed_p ())
4050 > COSTS_N_INSNS (1)))
4051 x = force_reg (mode, x);
4053 if (CONSTANT_P (y) && optimize
4054 && (rtx_cost (y, COMPARE, optimize_insn_for_speed_p ())
4055 > COSTS_N_INSNS (1)))
4056 y = force_reg (mode, y);
4059 /* Make sure if we have a canonical comparison. The RTL
4060 documentation states that canonical comparisons are required only
4061 for targets which have cc0. */
4062 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
4065 /* Don't let both operands fail to indicate the mode. */
4066 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
4067 x = force_reg (mode, x);
4069 /* Handle all BLKmode compares. */
4071 if (mode == BLKmode)
4073 enum machine_mode cmp_mode, result_mode;
4074 enum insn_code cmp_code;
4079 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
4083 /* Try to use a memory block compare insn - either cmpstr
4084 or cmpmem will do. */
4085 for (cmp_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
4086 cmp_mode != VOIDmode;
4087 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode))
4089 cmp_code = cmpmem_optab[cmp_mode];
4090 if (cmp_code == CODE_FOR_nothing)
4091 cmp_code = cmpstr_optab[cmp_mode];
4092 if (cmp_code == CODE_FOR_nothing)
4093 cmp_code = cmpstrn_optab[cmp_mode];
4094 if (cmp_code == CODE_FOR_nothing)
4097 /* Must make sure the size fits the insn's mode. */
4098 if ((GET_CODE (size) == CONST_INT
4099 && INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode)))
4100 || (GET_MODE_BITSIZE (GET_MODE (size))
4101 > GET_MODE_BITSIZE (cmp_mode)))
4104 result_mode = insn_data[cmp_code].operand[0].mode;
4105 result = gen_reg_rtx (result_mode);
4106 size = convert_to_mode (cmp_mode, size, 1);
4107 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
4111 *pmode = result_mode;
4115 /* Otherwise call a library function, memcmp. */
4116 libfunc = memcmp_libfunc;
4117 length_type = sizetype;
4118 result_mode = TYPE_MODE (integer_type_node);
4119 cmp_mode = TYPE_MODE (length_type);
4120 size = convert_to_mode (TYPE_MODE (length_type), size,
4121 TYPE_UNSIGNED (length_type));
4123 result = emit_library_call_value (libfunc, 0, LCT_PURE,
4130 *pmode = result_mode;
4134 /* Don't allow operands to the compare to trap, as that can put the
4135 compare and branch in different basic blocks. */
4136 if (flag_non_call_exceptions)
4139 x = force_reg (mode, x);
4141 y = force_reg (mode, y);
4146 if (can_compare_p (*pcomparison, mode, purpose))
4149 /* Handle a lib call just for the mode we are using. */
4151 libfunc = optab_libfunc (cmp_optab, mode);
4152 if (libfunc && !SCALAR_FLOAT_MODE_P (mode))
4156 /* If we want unsigned, and this mode has a distinct unsigned
4157 comparison routine, use that. */
4160 rtx ulibfunc = optab_libfunc (ucmp_optab, mode);
4165 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4166 targetm.libgcc_cmp_return_mode (),
4167 2, x, mode, y, mode);
4169 /* There are two kinds of comparison routines. Biased routines
4170 return 0/1/2, and unbiased routines return -1/0/1. Other parts
4171 of gcc expect that the comparison operation is equivalent
4172 to the modified comparison. For signed comparisons compare the
4173 result against 1 in the biased case, and zero in the unbiased
4174 case. For unsigned comparisons always compare against 1 after
4175 biasing the unbiased result by adding 1. This gives us a way to
4181 if (!TARGET_LIB_INT_CMP_BIASED)
4184 *px = plus_constant (result, 1);
4191 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
4192 prepare_float_lib_cmp (px, py, pcomparison, pmode, punsignedp);
4195 /* Before emitting an insn with code ICODE, make sure that X, which is going
4196 to be used for operand OPNUM of the insn, is converted from mode MODE to
4197 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
4198 that it is accepted by the operand predicate. Return the new value. */
4201 prepare_operand (int icode, rtx x, int opnum, enum machine_mode mode,
4202 enum machine_mode wider_mode, int unsignedp)
4204 if (mode != wider_mode)
4205 x = convert_modes (wider_mode, mode, x, unsignedp);
4207 if (!insn_data[icode].operand[opnum].predicate
4208 (x, insn_data[icode].operand[opnum].mode))
4210 if (reload_completed)
4212 x = copy_to_mode_reg (insn_data[icode].operand[opnum].mode, x);
4218 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
4219 we can do the comparison.
4220 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
4221 be NULL_RTX which indicates that only a comparison is to be generated. */
4224 emit_cmp_and_jump_insn_1 (rtx x, rtx y, enum machine_mode mode,
4225 enum rtx_code comparison, int unsignedp, rtx label)
4227 rtx test = gen_rtx_fmt_ee (comparison, mode, x, y);
4228 enum mode_class mclass = GET_MODE_CLASS (mode);
4229 enum machine_mode wider_mode = mode;
4231 /* Try combined insns first. */
4234 enum insn_code icode;
4235 PUT_MODE (test, wider_mode);
4239 icode = optab_handler (cbranch_optab, wider_mode)->insn_code;
4241 if (icode != CODE_FOR_nothing
4242 && insn_data[icode].operand[0].predicate (test, wider_mode))
4244 x = prepare_operand (icode, x, 1, mode, wider_mode, unsignedp);
4245 y = prepare_operand (icode, y, 2, mode, wider_mode, unsignedp);
4246 emit_jump_insn (GEN_FCN (icode) (test, x, y, label));
4251 /* Handle some compares against zero. */
4252 icode = (int) optab_handler (tst_optab, wider_mode)->insn_code;
4253 if (y == CONST0_RTX (mode) && icode != CODE_FOR_nothing)
4255 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
4256 emit_insn (GEN_FCN (icode) (x));
4258 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
4262 /* Handle compares for which there is a directly suitable insn. */
4264 icode = (int) optab_handler (cmp_optab, wider_mode)->insn_code;
4265 if (icode != CODE_FOR_nothing)
4267 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
4268 y = prepare_operand (icode, y, 1, mode, wider_mode, unsignedp);
4269 emit_insn (GEN_FCN (icode) (x, y));
4271 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
4275 if (!CLASS_HAS_WIDER_MODES_P (mclass))
4278 wider_mode = GET_MODE_WIDER_MODE (wider_mode);
4280 while (wider_mode != VOIDmode);
4285 /* Generate code to compare X with Y so that the condition codes are
4286 set and to jump to LABEL if the condition is true. If X is a
4287 constant and Y is not a constant, then the comparison is swapped to
4288 ensure that the comparison RTL has the canonical form.
4290 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
4291 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
4292 the proper branch condition code.
4294 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4296 MODE is the mode of the inputs (in case they are const_int).
4298 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
4299 be passed unchanged to emit_cmp_insn, then potentially converted into an
4300 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
4303 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
4304 enum machine_mode mode, int unsignedp, rtx label)
4306 rtx op0 = x, op1 = y;
4308 /* Swap operands and condition to ensure canonical RTL. */
4309 if (swap_commutative_operands_p (x, y))
4311 /* If we're not emitting a branch, callers are required to pass
4312 operands in an order conforming to canonical RTL. We relax this
4313 for commutative comparisons so callers using EQ don't need to do
4314 swapping by hand. */
4315 gcc_assert (label || (comparison == swap_condition (comparison)));
4318 comparison = swap_condition (comparison);
4322 /* If OP0 is still a constant, then both X and Y must be constants.
4323 Force X into a register to create canonical RTL. */
4324 if (CONSTANT_P (op0))
4325 op0 = force_reg (mode, op0);
4329 comparison = unsigned_condition (comparison);
4331 prepare_cmp_insn (&op0, &op1, &comparison, size, &mode, &unsignedp,
4333 emit_cmp_and_jump_insn_1 (op0, op1, mode, comparison, unsignedp, label);
4336 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
4339 emit_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
4340 enum machine_mode mode, int unsignedp)
4342 emit_cmp_and_jump_insns (x, y, comparison, size, mode, unsignedp, 0);
4345 /* Emit a library call comparison between floating point X and Y.
4346 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4349 prepare_float_lib_cmp (rtx *px, rtx *py, enum rtx_code *pcomparison,
4350 enum machine_mode *pmode, int *punsignedp)
4352 enum rtx_code comparison = *pcomparison;
4353 enum rtx_code swapped = swap_condition (comparison);
4354 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
4357 enum machine_mode orig_mode = GET_MODE (x);
4358 enum machine_mode mode, cmp_mode;
4359 rtx value, target, insns, equiv;
4361 bool reversed_p = false;
4362 cmp_mode = targetm.libgcc_cmp_return_mode ();
4364 for (mode = orig_mode;
4366 mode = GET_MODE_WIDER_MODE (mode))
4368 if ((libfunc = optab_libfunc (code_to_optab[comparison], mode)))
4371 if ((libfunc = optab_libfunc (code_to_optab[swapped] , mode)))
4374 tmp = x; x = y; y = tmp;
4375 comparison = swapped;
4379 if ((libfunc = optab_libfunc (code_to_optab[reversed], mode))
4380 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, reversed))
4382 comparison = reversed;
4388 gcc_assert (mode != VOIDmode);
4390 if (mode != orig_mode)
4392 x = convert_to_mode (mode, x, 0);
4393 y = convert_to_mode (mode, y, 0);
4396 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4397 the RTL. The allows the RTL optimizers to delete the libcall if the
4398 condition can be determined at compile-time. */
4399 if (comparison == UNORDERED)
4401 rtx temp = simplify_gen_relational (NE, cmp_mode, mode, x, x);
4402 equiv = simplify_gen_relational (NE, cmp_mode, mode, y, y);
4403 equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4404 temp, const_true_rtx, equiv);
4408 equiv = simplify_gen_relational (comparison, cmp_mode, mode, x, y);
4409 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4411 rtx true_rtx, false_rtx;
4416 true_rtx = const0_rtx;
4417 false_rtx = const_true_rtx;
4421 true_rtx = const_true_rtx;
4422 false_rtx = const0_rtx;
4426 true_rtx = const1_rtx;
4427 false_rtx = const0_rtx;
4431 true_rtx = const0_rtx;
4432 false_rtx = constm1_rtx;
4436 true_rtx = constm1_rtx;
4437 false_rtx = const0_rtx;
4441 true_rtx = const0_rtx;
4442 false_rtx = const1_rtx;
4448 equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4449 equiv, true_rtx, false_rtx);
4454 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4455 cmp_mode, 2, x, mode, y, mode);
4456 insns = get_insns ();
4459 target = gen_reg_rtx (cmp_mode);
4460 emit_libcall_block (insns, target, value, equiv);
4462 if (comparison == UNORDERED
4463 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4464 comparison = reversed_p ? EQ : NE;
4469 *pcomparison = comparison;
4473 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4476 emit_indirect_jump (rtx loc)
4478 if (!insn_data[(int) CODE_FOR_indirect_jump].operand[0].predicate
4480 loc = copy_to_mode_reg (Pmode, loc);
4482 emit_jump_insn (gen_indirect_jump (loc));
4486 #ifdef HAVE_conditional_move
4488 /* Emit a conditional move instruction if the machine supports one for that
4489 condition and machine mode.
4491 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4492 the mode to use should they be constants. If it is VOIDmode, they cannot
4495 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4496 should be stored there. MODE is the mode to use should they be constants.
4497 If it is VOIDmode, they cannot both be constants.
4499 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4500 is not supported. */
4503 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
4504 enum machine_mode cmode, rtx op2, rtx op3,
4505 enum machine_mode mode, int unsignedp)
4507 rtx tem, subtarget, comparison, insn;
4508 enum insn_code icode;
4509 enum rtx_code reversed;
4511 /* If one operand is constant, make it the second one. Only do this
4512 if the other operand is not constant as well. */
4514 if (swap_commutative_operands_p (op0, op1))
4519 code = swap_condition (code);
4522 /* get_condition will prefer to generate LT and GT even if the old
4523 comparison was against zero, so undo that canonicalization here since
4524 comparisons against zero are cheaper. */
4525 if (code == LT && op1 == const1_rtx)
4526 code = LE, op1 = const0_rtx;
4527 else if (code == GT && op1 == constm1_rtx)
4528 code = GE, op1 = const0_rtx;
4530 if (cmode == VOIDmode)
4531 cmode = GET_MODE (op0);
4533 if (swap_commutative_operands_p (op2, op3)
4534 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4543 if (mode == VOIDmode)
4544 mode = GET_MODE (op2);
4546 icode = movcc_gen_code[mode];
4548 if (icode == CODE_FOR_nothing)
4552 target = gen_reg_rtx (mode);
4556 /* If the insn doesn't accept these operands, put them in pseudos. */
4558 if (!insn_data[icode].operand[0].predicate
4559 (subtarget, insn_data[icode].operand[0].mode))
4560 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4562 if (!insn_data[icode].operand[2].predicate
4563 (op2, insn_data[icode].operand[2].mode))
4564 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4566 if (!insn_data[icode].operand[3].predicate
4567 (op3, insn_data[icode].operand[3].mode))
4568 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4570 /* Everything should now be in the suitable form, so emit the compare insn
4571 and then the conditional move. */
4574 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4576 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4577 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4578 return NULL and let the caller figure out how best to deal with this
4580 if (GET_CODE (comparison) != code)
4583 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4585 /* If that failed, then give up. */
4591 if (subtarget != target)
4592 convert_move (target, subtarget, 0);
4597 /* Return nonzero if a conditional move of mode MODE is supported.
4599 This function is for combine so it can tell whether an insn that looks
4600 like a conditional move is actually supported by the hardware. If we
4601 guess wrong we lose a bit on optimization, but that's it. */
4602 /* ??? sparc64 supports conditionally moving integers values based on fp
4603 comparisons, and vice versa. How do we handle them? */
4606 can_conditionally_move_p (enum machine_mode mode)
4608 if (movcc_gen_code[mode] != CODE_FOR_nothing)
4614 #endif /* HAVE_conditional_move */
4616 /* Emit a conditional addition instruction if the machine supports one for that
4617 condition and machine mode.
4619 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4620 the mode to use should they be constants. If it is VOIDmode, they cannot
4623 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
4624 should be stored there. MODE is the mode to use should they be constants.
4625 If it is VOIDmode, they cannot both be constants.
4627 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4628 is not supported. */
4631 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
4632 enum machine_mode cmode, rtx op2, rtx op3,
4633 enum machine_mode mode, int unsignedp)
4635 rtx tem, subtarget, comparison, insn;
4636 enum insn_code icode;
4637 enum rtx_code reversed;
4639 /* If one operand is constant, make it the second one. Only do this
4640 if the other operand is not constant as well. */
4642 if (swap_commutative_operands_p (op0, op1))
4647 code = swap_condition (code);
4650 /* get_condition will prefer to generate LT and GT even if the old
4651 comparison was against zero, so undo that canonicalization here since
4652 comparisons against zero are cheaper. */
4653 if (code == LT && op1 == const1_rtx)
4654 code = LE, op1 = const0_rtx;
4655 else if (code == GT && op1 == constm1_rtx)
4656 code = GE, op1 = const0_rtx;
4658 if (cmode == VOIDmode)
4659 cmode = GET_MODE (op0);
4661 if (swap_commutative_operands_p (op2, op3)
4662 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4671 if (mode == VOIDmode)
4672 mode = GET_MODE (op2);
4674 icode = optab_handler (addcc_optab, mode)->insn_code;
4676 if (icode == CODE_FOR_nothing)
4680 target = gen_reg_rtx (mode);
4682 /* If the insn doesn't accept these operands, put them in pseudos. */
4684 if (!insn_data[icode].operand[0].predicate
4685 (target, insn_data[icode].operand[0].mode))
4686 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4690 if (!insn_data[icode].operand[2].predicate
4691 (op2, insn_data[icode].operand[2].mode))
4692 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4694 if (!insn_data[icode].operand[3].predicate
4695 (op3, insn_data[icode].operand[3].mode))
4696 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4698 /* Everything should now be in the suitable form, so emit the compare insn
4699 and then the conditional move. */
4702 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4704 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4705 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4706 return NULL and let the caller figure out how best to deal with this
4708 if (GET_CODE (comparison) != code)
4711 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4713 /* If that failed, then give up. */
4719 if (subtarget != target)
4720 convert_move (target, subtarget, 0);
4725 /* These functions attempt to generate an insn body, rather than
4726 emitting the insn, but if the gen function already emits them, we
4727 make no attempt to turn them back into naked patterns. */
4729 /* Generate and return an insn body to add Y to X. */
4732 gen_add2_insn (rtx x, rtx y)
4734 int icode = (int) optab_handler (add_optab, GET_MODE (x))->insn_code;
4736 gcc_assert (insn_data[icode].operand[0].predicate
4737 (x, insn_data[icode].operand[0].mode));
4738 gcc_assert (insn_data[icode].operand[1].predicate
4739 (x, insn_data[icode].operand[1].mode));
4740 gcc_assert (insn_data[icode].operand[2].predicate
4741 (y, insn_data[icode].operand[2].mode));
4743 return GEN_FCN (icode) (x, x, y);
4746 /* Generate and return an insn body to add r1 and c,
4747 storing the result in r0. */
4750 gen_add3_insn (rtx r0, rtx r1, rtx c)
4752 int icode = (int) optab_handler (add_optab, GET_MODE (r0))->insn_code;
4754 if (icode == CODE_FOR_nothing
4755 || !(insn_data[icode].operand[0].predicate
4756 (r0, insn_data[icode].operand[0].mode))
4757 || !(insn_data[icode].operand[1].predicate
4758 (r1, insn_data[icode].operand[1].mode))
4759 || !(insn_data[icode].operand[2].predicate
4760 (c, insn_data[icode].operand[2].mode)))
4763 return GEN_FCN (icode) (r0, r1, c);
4767 have_add2_insn (rtx x, rtx y)
4771 gcc_assert (GET_MODE (x) != VOIDmode);
4773 icode = (int) optab_handler (add_optab, GET_MODE (x))->insn_code;
4775 if (icode == CODE_FOR_nothing)
4778 if (!(insn_data[icode].operand[0].predicate
4779 (x, insn_data[icode].operand[0].mode))
4780 || !(insn_data[icode].operand[1].predicate
4781 (x, insn_data[icode].operand[1].mode))
4782 || !(insn_data[icode].operand[2].predicate
4783 (y, insn_data[icode].operand[2].mode)))
4789 /* Generate and return an insn body to subtract Y from X. */
4792 gen_sub2_insn (rtx x, rtx y)
4794 int icode = (int) optab_handler (sub_optab, GET_MODE (x))->insn_code;
4796 gcc_assert (insn_data[icode].operand[0].predicate
4797 (x, insn_data[icode].operand[0].mode));
4798 gcc_assert (insn_data[icode].operand[1].predicate
4799 (x, insn_data[icode].operand[1].mode));
4800 gcc_assert (insn_data[icode].operand[2].predicate
4801 (y, insn_data[icode].operand[2].mode));
4803 return GEN_FCN (icode) (x, x, y);
4806 /* Generate and return an insn body to subtract r1 and c,
4807 storing the result in r0. */
4810 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4812 int icode = (int) optab_handler (sub_optab, GET_MODE (r0))->insn_code;
4814 if (icode == CODE_FOR_nothing
4815 || !(insn_data[icode].operand[0].predicate
4816 (r0, insn_data[icode].operand[0].mode))
4817 || !(insn_data[icode].operand[1].predicate
4818 (r1, insn_data[icode].operand[1].mode))
4819 || !(insn_data[icode].operand[2].predicate
4820 (c, insn_data[icode].operand[2].mode)))
4823 return GEN_FCN (icode) (r0, r1, c);
4827 have_sub2_insn (rtx x, rtx y)
4831 gcc_assert (GET_MODE (x) != VOIDmode);
4833 icode = (int) optab_handler (sub_optab, GET_MODE (x))->insn_code;
4835 if (icode == CODE_FOR_nothing)
4838 if (!(insn_data[icode].operand[0].predicate
4839 (x, insn_data[icode].operand[0].mode))
4840 || !(insn_data[icode].operand[1].predicate
4841 (x, insn_data[icode].operand[1].mode))
4842 || !(insn_data[icode].operand[2].predicate
4843 (y, insn_data[icode].operand[2].mode)))
4849 /* Generate the body of an instruction to copy Y into X.
4850 It may be a list of insns, if one insn isn't enough. */
4853 gen_move_insn (rtx x, rtx y)
4858 emit_move_insn_1 (x, y);
4864 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4865 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4866 no such operation exists, CODE_FOR_nothing will be returned. */
4869 can_extend_p (enum machine_mode to_mode, enum machine_mode from_mode,
4873 #ifdef HAVE_ptr_extend
4875 return CODE_FOR_ptr_extend;
4878 tab = unsignedp ? zext_optab : sext_optab;
4879 return convert_optab_handler (tab, to_mode, from_mode)->insn_code;
4882 /* Generate the body of an insn to extend Y (with mode MFROM)
4883 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4886 gen_extend_insn (rtx x, rtx y, enum machine_mode mto,
4887 enum machine_mode mfrom, int unsignedp)
4889 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
4890 return GEN_FCN (icode) (x, y);
4893 /* can_fix_p and can_float_p say whether the target machine
4894 can directly convert a given fixed point type to
4895 a given floating point type, or vice versa.
4896 The returned value is the CODE_FOR_... value to use,
4897 or CODE_FOR_nothing if these modes cannot be directly converted.
4899 *TRUNCP_PTR is set to 1 if it is necessary to output
4900 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4902 static enum insn_code
4903 can_fix_p (enum machine_mode fixmode, enum machine_mode fltmode,
4904 int unsignedp, int *truncp_ptr)
4907 enum insn_code icode;
4909 tab = unsignedp ? ufixtrunc_optab : sfixtrunc_optab;
4910 icode = convert_optab_handler (tab, fixmode, fltmode)->insn_code;
4911 if (icode != CODE_FOR_nothing)
4917 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4918 for this to work. We need to rework the fix* and ftrunc* patterns
4919 and documentation. */
4920 tab = unsignedp ? ufix_optab : sfix_optab;
4921 icode = convert_optab_handler (tab, fixmode, fltmode)->insn_code;
4922 if (icode != CODE_FOR_nothing
4923 && optab_handler (ftrunc_optab, fltmode)->insn_code != CODE_FOR_nothing)
4930 return CODE_FOR_nothing;
4933 static enum insn_code
4934 can_float_p (enum machine_mode fltmode, enum machine_mode fixmode,
4939 tab = unsignedp ? ufloat_optab : sfloat_optab;
4940 return convert_optab_handler (tab, fltmode, fixmode)->insn_code;
4943 /* Generate code to convert FROM to floating point
4944 and store in TO. FROM must be fixed point and not VOIDmode.
4945 UNSIGNEDP nonzero means regard FROM as unsigned.
4946 Normally this is done by correcting the final value
4947 if it is negative. */
4950 expand_float (rtx to, rtx from, int unsignedp)
4952 enum insn_code icode;
4954 enum machine_mode fmode, imode;
4955 bool can_do_signed = false;
4957 /* Crash now, because we won't be able to decide which mode to use. */
4958 gcc_assert (GET_MODE (from) != VOIDmode);
4960 /* Look for an insn to do the conversion. Do it in the specified
4961 modes if possible; otherwise convert either input, output or both to
4962 wider mode. If the integer mode is wider than the mode of FROM,
4963 we can do the conversion signed even if the input is unsigned. */
4965 for (fmode = GET_MODE (to); fmode != VOIDmode;
4966 fmode = GET_MODE_WIDER_MODE (fmode))
4967 for (imode = GET_MODE (from); imode != VOIDmode;
4968 imode = GET_MODE_WIDER_MODE (imode))
4970 int doing_unsigned = unsignedp;
4972 if (fmode != GET_MODE (to)
4973 && significand_size (fmode) < GET_MODE_BITSIZE (GET_MODE (from)))
4976 icode = can_float_p (fmode, imode, unsignedp);
4977 if (icode == CODE_FOR_nothing && unsignedp)
4979 enum insn_code scode = can_float_p (fmode, imode, 0);
4980 if (scode != CODE_FOR_nothing)
4981 can_do_signed = true;
4982 if (imode != GET_MODE (from))
4983 icode = scode, doing_unsigned = 0;
4986 if (icode != CODE_FOR_nothing)
4988 if (imode != GET_MODE (from))
4989 from = convert_to_mode (imode, from, unsignedp);
4991 if (fmode != GET_MODE (to))
4992 target = gen_reg_rtx (fmode);
4994 emit_unop_insn (icode, target, from,
4995 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
4998 convert_move (to, target, 0);
5003 /* Unsigned integer, and no way to convert directly. Convert as signed,
5004 then unconditionally adjust the result. */
5005 if (unsignedp && can_do_signed)
5007 rtx label = gen_label_rtx ();
5009 REAL_VALUE_TYPE offset;
5011 /* Look for a usable floating mode FMODE wider than the source and at
5012 least as wide as the target. Using FMODE will avoid rounding woes
5013 with unsigned values greater than the signed maximum value. */
5015 for (fmode = GET_MODE (to); fmode != VOIDmode;
5016 fmode = GET_MODE_WIDER_MODE (fmode))
5017 if (GET_MODE_BITSIZE (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
5018 && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
5021 if (fmode == VOIDmode)
5023 /* There is no such mode. Pretend the target is wide enough. */
5024 fmode = GET_MODE (to);
5026 /* Avoid double-rounding when TO is narrower than FROM. */
5027 if ((significand_size (fmode) + 1)
5028 < GET_MODE_BITSIZE (GET_MODE (from)))
5031 rtx neglabel = gen_label_rtx ();
5033 /* Don't use TARGET if it isn't a register, is a hard register,
5034 or is the wrong mode. */
5036 || REGNO (target) < FIRST_PSEUDO_REGISTER
5037 || GET_MODE (target) != fmode)
5038 target = gen_reg_rtx (fmode);
5040 imode = GET_MODE (from);
5041 do_pending_stack_adjust ();
5043 /* Test whether the sign bit is set. */
5044 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
5047 /* The sign bit is not set. Convert as signed. */
5048 expand_float (target, from, 0);
5049 emit_jump_insn (gen_jump (label));
5052 /* The sign bit is set.
5053 Convert to a usable (positive signed) value by shifting right
5054 one bit, while remembering if a nonzero bit was shifted
5055 out; i.e., compute (from & 1) | (from >> 1). */
5057 emit_label (neglabel);
5058 temp = expand_binop (imode, and_optab, from, const1_rtx,
5059 NULL_RTX, 1, OPTAB_LIB_WIDEN);
5060 temp1 = expand_shift (RSHIFT_EXPR, imode, from, integer_one_node,
5062 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
5064 expand_float (target, temp, 0);
5066 /* Multiply by 2 to undo the shift above. */
5067 temp = expand_binop (fmode, add_optab, target, target,
5068 target, 0, OPTAB_LIB_WIDEN);
5070 emit_move_insn (target, temp);
5072 do_pending_stack_adjust ();
5078 /* If we are about to do some arithmetic to correct for an
5079 unsigned operand, do it in a pseudo-register. */
5081 if (GET_MODE (to) != fmode
5082 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
5083 target = gen_reg_rtx (fmode);
5085 /* Convert as signed integer to floating. */
5086 expand_float (target, from, 0);
5088 /* If FROM is negative (and therefore TO is negative),
5089 correct its value by 2**bitwidth. */
5091 do_pending_stack_adjust ();
5092 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from),
5096 real_2expN (&offset, GET_MODE_BITSIZE (GET_MODE (from)), fmode);
5097 temp = expand_binop (fmode, add_optab, target,
5098 CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode),
5099 target, 0, OPTAB_LIB_WIDEN);
5101 emit_move_insn (target, temp);
5103 do_pending_stack_adjust ();
5108 /* No hardware instruction available; call a library routine. */
5113 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
5115 if (GET_MODE_SIZE (GET_MODE (from)) < GET_MODE_SIZE (SImode))
5116 from = convert_to_mode (SImode, from, unsignedp);
5118 libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
5119 gcc_assert (libfunc);
5123 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
5124 GET_MODE (to), 1, from,
5126 insns = get_insns ();
5129 emit_libcall_block (insns, target, value,
5130 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FLOAT : FLOAT,
5131 GET_MODE (to), from));
5136 /* Copy result to requested destination
5137 if we have been computing in a temp location. */
5141 if (GET_MODE (target) == GET_MODE (to))
5142 emit_move_insn (to, target);
5144 convert_move (to, target, 0);
5148 /* Generate code to convert FROM to fixed point and store in TO. FROM
5149 must be floating point. */
5152 expand_fix (rtx to, rtx from, int unsignedp)
5154 enum insn_code icode;
5156 enum machine_mode fmode, imode;
5159 /* We first try to find a pair of modes, one real and one integer, at
5160 least as wide as FROM and TO, respectively, in which we can open-code
5161 this conversion. If the integer mode is wider than the mode of TO,
5162 we can do the conversion either signed or unsigned. */
5164 for (fmode = GET_MODE (from); fmode != VOIDmode;
5165 fmode = GET_MODE_WIDER_MODE (fmode))
5166 for (imode = GET_MODE (to); imode != VOIDmode;
5167 imode = GET_MODE_WIDER_MODE (imode))
5169 int doing_unsigned = unsignedp;
5171 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
5172 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
5173 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
5175 if (icode != CODE_FOR_nothing)
5177 rtx last = get_last_insn ();
5178 if (fmode != GET_MODE (from))
5179 from = convert_to_mode (fmode, from, 0);
5183 rtx temp = gen_reg_rtx (GET_MODE (from));
5184 from = expand_unop (GET_MODE (from), ftrunc_optab, from,
5188 if (imode != GET_MODE (to))
5189 target = gen_reg_rtx (imode);
5191 if (maybe_emit_unop_insn (icode, target, from,
5192 doing_unsigned ? UNSIGNED_FIX : FIX))
5195 convert_move (to, target, unsignedp);
5198 delete_insns_since (last);
5202 /* For an unsigned conversion, there is one more way to do it.
5203 If we have a signed conversion, we generate code that compares
5204 the real value to the largest representable positive number. If if
5205 is smaller, the conversion is done normally. Otherwise, subtract
5206 one plus the highest signed number, convert, and add it back.
5208 We only need to check all real modes, since we know we didn't find
5209 anything with a wider integer mode.
5211 This code used to extend FP value into mode wider than the destination.
5212 This is needed for decimal float modes which cannot accurately
5213 represent one plus the highest signed number of the same size, but
5214 not for binary modes. Consider, for instance conversion from SFmode
5217 The hot path through the code is dealing with inputs smaller than 2^63
5218 and doing just the conversion, so there is no bits to lose.
5220 In the other path we know the value is positive in the range 2^63..2^64-1
5221 inclusive. (as for other input overflow happens and result is undefined)
5222 So we know that the most important bit set in mantissa corresponds to
5223 2^63. The subtraction of 2^63 should not generate any rounding as it
5224 simply clears out that bit. The rest is trivial. */
5226 if (unsignedp && GET_MODE_BITSIZE (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
5227 for (fmode = GET_MODE (from); fmode != VOIDmode;
5228 fmode = GET_MODE_WIDER_MODE (fmode))
5229 if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0, &must_trunc)
5230 && (!DECIMAL_FLOAT_MODE_P (fmode)
5231 || GET_MODE_BITSIZE (fmode) > GET_MODE_BITSIZE (GET_MODE (to))))
5234 REAL_VALUE_TYPE offset;
5235 rtx limit, lab1, lab2, insn;
5237 bitsize = GET_MODE_BITSIZE (GET_MODE (to));
5238 real_2expN (&offset, bitsize - 1, fmode);
5239 limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode);
5240 lab1 = gen_label_rtx ();
5241 lab2 = gen_label_rtx ();
5243 if (fmode != GET_MODE (from))
5244 from = convert_to_mode (fmode, from, 0);
5246 /* See if we need to do the subtraction. */
5247 do_pending_stack_adjust ();
5248 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from),
5251 /* If not, do the signed "fix" and branch around fixup code. */
5252 expand_fix (to, from, 0);
5253 emit_jump_insn (gen_jump (lab2));
5256 /* Otherwise, subtract 2**(N-1), convert to signed number,
5257 then add 2**(N-1). Do the addition using XOR since this
5258 will often generate better code. */
5260 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
5261 NULL_RTX, 0, OPTAB_LIB_WIDEN);
5262 expand_fix (to, target, 0);
5263 target = expand_binop (GET_MODE (to), xor_optab, to,
5265 ((HOST_WIDE_INT) 1 << (bitsize - 1),
5267 to, 1, OPTAB_LIB_WIDEN);
5270 emit_move_insn (to, target);
5274 if (optab_handler (mov_optab, GET_MODE (to))->insn_code
5275 != CODE_FOR_nothing)
5277 /* Make a place for a REG_NOTE and add it. */
5278 insn = emit_move_insn (to, to);
5279 set_unique_reg_note (insn,
5281 gen_rtx_fmt_e (UNSIGNED_FIX,
5289 /* We can't do it with an insn, so use a library call. But first ensure
5290 that the mode of TO is at least as wide as SImode, since those are the
5291 only library calls we know about. */
5293 if (GET_MODE_SIZE (GET_MODE (to)) < GET_MODE_SIZE (SImode))
5295 target = gen_reg_rtx (SImode);
5297 expand_fix (target, from, unsignedp);
5305 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
5306 libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
5307 gcc_assert (libfunc);
5311 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
5312 GET_MODE (to), 1, from,
5314 insns = get_insns ();
5317 emit_libcall_block (insns, target, value,
5318 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
5319 GET_MODE (to), from));
5324 if (GET_MODE (to) == GET_MODE (target))
5325 emit_move_insn (to, target);
5327 convert_move (to, target, 0);
5331 /* Generate code to convert FROM or TO a fixed-point.
5332 If UINTP is true, either TO or FROM is an unsigned integer.
5333 If SATP is true, we need to saturate the result. */
5336 expand_fixed_convert (rtx to, rtx from, int uintp, int satp)
5338 enum machine_mode to_mode = GET_MODE (to);
5339 enum machine_mode from_mode = GET_MODE (from);
5341 enum rtx_code this_code;
5342 enum insn_code code;
5346 if (to_mode == from_mode)
5348 emit_move_insn (to, from);
5354 tab = satp ? satfractuns_optab : fractuns_optab;
5355 this_code = satp ? UNSIGNED_SAT_FRACT : UNSIGNED_FRACT_CONVERT;
5359 tab = satp ? satfract_optab : fract_optab;
5360 this_code = satp ? SAT_FRACT : FRACT_CONVERT;
5362 code = tab->handlers[to_mode][from_mode].insn_code;
5363 if (code != CODE_FOR_nothing)
5365 emit_unop_insn (code, to, from, this_code);
5369 libfunc = convert_optab_libfunc (tab, to_mode, from_mode);
5370 gcc_assert (libfunc);
5373 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, to_mode,
5374 1, from, from_mode);
5375 insns = get_insns ();
5378 emit_libcall_block (insns, to, value,
5379 gen_rtx_fmt_e (tab->code, to_mode, from));
5382 /* Generate code to convert FROM to fixed point and store in TO. FROM
5383 must be floating point, TO must be signed. Use the conversion optab
5384 TAB to do the conversion. */
5387 expand_sfix_optab (rtx to, rtx from, convert_optab tab)
5389 enum insn_code icode;
5391 enum machine_mode fmode, imode;
5393 /* We first try to find a pair of modes, one real and one integer, at
5394 least as wide as FROM and TO, respectively, in which we can open-code
5395 this conversion. If the integer mode is wider than the mode of TO,
5396 we can do the conversion either signed or unsigned. */
5398 for (fmode = GET_MODE (from); fmode != VOIDmode;
5399 fmode = GET_MODE_WIDER_MODE (fmode))
5400 for (imode = GET_MODE (to); imode != VOIDmode;
5401 imode = GET_MODE_WIDER_MODE (imode))
5403 icode = convert_optab_handler (tab, imode, fmode)->insn_code;
5404 if (icode != CODE_FOR_nothing)
5406 rtx last = get_last_insn ();
5407 if (fmode != GET_MODE (from))
5408 from = convert_to_mode (fmode, from, 0);
5410 if (imode != GET_MODE (to))
5411 target = gen_reg_rtx (imode);
5413 if (!maybe_emit_unop_insn (icode, target, from, UNKNOWN))
5415 delete_insns_since (last);
5419 convert_move (to, target, 0);
5427 /* Report whether we have an instruction to perform the operation
5428 specified by CODE on operands of mode MODE. */
5430 have_insn_for (enum rtx_code code, enum machine_mode mode)
5432 return (code_to_optab[(int) code] != 0
5433 && (optab_handler (code_to_optab[(int) code], mode)->insn_code
5434 != CODE_FOR_nothing));
5437 /* Set all insn_code fields to CODE_FOR_nothing. */
5440 init_insn_codes (void)
5444 for (i = 0; i < (unsigned int) OTI_MAX; i++)
5449 op = &optab_table[i];
5450 for (j = 0; j < NUM_MACHINE_MODES; j++)
5451 optab_handler (op, j)->insn_code = CODE_FOR_nothing;
5453 for (i = 0; i < (unsigned int) COI_MAX; i++)
5458 op = &convert_optab_table[i];
5459 for (j = 0; j < NUM_MACHINE_MODES; j++)
5460 for (k = 0; k < NUM_MACHINE_MODES; k++)
5461 convert_optab_handler (op, j, k)->insn_code = CODE_FOR_nothing;
5465 /* Initialize OP's code to CODE, and write it into the code_to_optab table. */
5467 init_optab (optab op, enum rtx_code code)
5470 code_to_optab[(int) code] = op;
5473 /* Same, but fill in its code as CODE, and do _not_ write it into
5474 the code_to_optab table. */
5476 init_optabv (optab op, enum rtx_code code)
5481 /* Conversion optabs never go in the code_to_optab table. */
5483 init_convert_optab (convert_optab op, enum rtx_code code)
5488 /* Initialize the libfunc fields of an entire group of entries in some
5489 optab. Each entry is set equal to a string consisting of a leading
5490 pair of underscores followed by a generic operation name followed by
5491 a mode name (downshifted to lowercase) followed by a single character
5492 representing the number of operands for the given operation (which is
5493 usually one of the characters '2', '3', or '4').
5495 OPTABLE is the table in which libfunc fields are to be initialized.
5496 OPNAME is the generic (string) name of the operation.
5497 SUFFIX is the character which specifies the number of operands for
5498 the given generic operation.
5499 MODE is the mode to generate for.
5503 gen_libfunc (optab optable, const char *opname, int suffix, enum machine_mode mode)
5505 unsigned opname_len = strlen (opname);
5506 const char *mname = GET_MODE_NAME (mode);
5507 unsigned mname_len = strlen (mname);
5508 char *libfunc_name = XALLOCAVEC (char, 2 + opname_len + mname_len + 1 + 1);
5515 for (q = opname; *q; )
5517 for (q = mname; *q; q++)
5518 *p++ = TOLOWER (*q);
5522 set_optab_libfunc (optable, mode,
5523 ggc_alloc_string (libfunc_name, p - libfunc_name));
5526 /* Like gen_libfunc, but verify that integer operation is involved. */
5529 gen_int_libfunc (optab optable, const char *opname, char suffix,
5530 enum machine_mode mode)
5532 int maxsize = 2 * BITS_PER_WORD;
5534 if (GET_MODE_CLASS (mode) != MODE_INT)
5536 if (maxsize < LONG_LONG_TYPE_SIZE)
5537 maxsize = LONG_LONG_TYPE_SIZE;
5538 if (GET_MODE_CLASS (mode) != MODE_INT
5539 || mode < word_mode || GET_MODE_BITSIZE (mode) > maxsize)
5541 gen_libfunc (optable, opname, suffix, mode);
5544 /* Like gen_libfunc, but verify that FP and set decimal prefix if needed. */
5547 gen_fp_libfunc (optab optable, const char *opname, char suffix,
5548 enum machine_mode mode)
5552 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5553 gen_libfunc (optable, opname, suffix, mode);
5554 if (DECIMAL_FLOAT_MODE_P (mode))
5556 dec_opname = XALLOCAVEC (char, sizeof (DECIMAL_PREFIX) + strlen (opname));
5557 /* For BID support, change the name to have either a bid_ or dpd_ prefix
5558 depending on the low level floating format used. */
5559 memcpy (dec_opname, DECIMAL_PREFIX, sizeof (DECIMAL_PREFIX) - 1);
5560 strcpy (dec_opname + sizeof (DECIMAL_PREFIX) - 1, opname);
5561 gen_libfunc (optable, dec_opname, suffix, mode);
5565 /* Like gen_libfunc, but verify that fixed-point operation is involved. */
5568 gen_fixed_libfunc (optab optable, const char *opname, char suffix,
5569 enum machine_mode mode)
5571 if (!ALL_FIXED_POINT_MODE_P (mode))
5573 gen_libfunc (optable, opname, suffix, mode);
5576 /* Like gen_libfunc, but verify that signed fixed-point operation is
5580 gen_signed_fixed_libfunc (optab optable, const char *opname, char suffix,
5581 enum machine_mode mode)
5583 if (!SIGNED_FIXED_POINT_MODE_P (mode))
5585 gen_libfunc (optable, opname, suffix, mode);
5588 /* Like gen_libfunc, but verify that unsigned fixed-point operation is
5592 gen_unsigned_fixed_libfunc (optab optable, const char *opname, char suffix,
5593 enum machine_mode mode)
5595 if (!UNSIGNED_FIXED_POINT_MODE_P (mode))
5597 gen_libfunc (optable, opname, suffix, mode);
5600 /* Like gen_libfunc, but verify that FP or INT operation is involved. */
5603 gen_int_fp_libfunc (optab optable, const char *name, char suffix,
5604 enum machine_mode mode)
5606 if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5607 gen_fp_libfunc (optable, name, suffix, mode);
5608 if (INTEGRAL_MODE_P (mode))
5609 gen_int_libfunc (optable, name, suffix, mode);
5612 /* Like gen_libfunc, but verify that FP or INT operation is involved
5613 and add 'v' suffix for integer operation. */
5616 gen_intv_fp_libfunc (optab optable, const char *name, char suffix,
5617 enum machine_mode mode)
5619 if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5620 gen_fp_libfunc (optable, name, suffix, mode);
5621 if (GET_MODE_CLASS (mode) == MODE_INT)
5623 int len = strlen (name);
5624 char *v_name = XALLOCAVEC (char, len + 2);
5625 strcpy (v_name, name);
5627 v_name[len + 1] = 0;
5628 gen_int_libfunc (optable, v_name, suffix, mode);
5632 /* Like gen_libfunc, but verify that FP or INT or FIXED operation is
5636 gen_int_fp_fixed_libfunc (optab optable, const char *name, char suffix,
5637 enum machine_mode mode)
5639 if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5640 gen_fp_libfunc (optable, name, suffix, mode);
5641 if (INTEGRAL_MODE_P (mode))
5642 gen_int_libfunc (optable, name, suffix, mode);
5643 if (ALL_FIXED_POINT_MODE_P (mode))
5644 gen_fixed_libfunc (optable, name, suffix, mode);
5647 /* Like gen_libfunc, but verify that FP or INT or signed FIXED operation is
5651 gen_int_fp_signed_fixed_libfunc (optab optable, const char *name, char suffix,
5652 enum machine_mode mode)
5654 if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5655 gen_fp_libfunc (optable, name, suffix, mode);
5656 if (INTEGRAL_MODE_P (mode))
5657 gen_int_libfunc (optable, name, suffix, mode);
5658 if (SIGNED_FIXED_POINT_MODE_P (mode))
5659 gen_signed_fixed_libfunc (optable, name, suffix, mode);
5662 /* Like gen_libfunc, but verify that INT or FIXED operation is
5666 gen_int_fixed_libfunc (optab optable, const char *name, char suffix,
5667 enum machine_mode mode)
5669 if (INTEGRAL_MODE_P (mode))
5670 gen_int_libfunc (optable, name, suffix, mode);
5671 if (ALL_FIXED_POINT_MODE_P (mode))
5672 gen_fixed_libfunc (optable, name, suffix, mode);
5675 /* Like gen_libfunc, but verify that INT or signed FIXED operation is
5679 gen_int_signed_fixed_libfunc (optab optable, const char *name, char suffix,
5680 enum machine_mode mode)
5682 if (INTEGRAL_MODE_P (mode))
5683 gen_int_libfunc (optable, name, suffix, mode);
5684 if (SIGNED_FIXED_POINT_MODE_P (mode))
5685 gen_signed_fixed_libfunc (optable, name, suffix, mode);
5688 /* Like gen_libfunc, but verify that INT or unsigned FIXED operation is
5692 gen_int_unsigned_fixed_libfunc (optab optable, const char *name, char suffix,
5693 enum machine_mode mode)
5695 if (INTEGRAL_MODE_P (mode))
5696 gen_int_libfunc (optable, name, suffix, mode);
5697 if (UNSIGNED_FIXED_POINT_MODE_P (mode))
5698 gen_unsigned_fixed_libfunc (optable, name, suffix, mode);
5701 /* Initialize the libfunc fields of an entire group of entries of an
5702 inter-mode-class conversion optab. The string formation rules are
5703 similar to the ones for init_libfuncs, above, but instead of having
5704 a mode name and an operand count these functions have two mode names
5705 and no operand count. */
5708 gen_interclass_conv_libfunc (convert_optab tab,
5710 enum machine_mode tmode,
5711 enum machine_mode fmode)
5713 size_t opname_len = strlen (opname);
5714 size_t mname_len = 0;
5716 const char *fname, *tname;
5718 char *libfunc_name, *suffix;
5719 char *nondec_name, *dec_name, *nondec_suffix, *dec_suffix;
5722 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5723 depends on which underlying decimal floating point format is used. */
5724 const size_t dec_len = sizeof (DECIMAL_PREFIX) - 1;
5726 mname_len = strlen (GET_MODE_NAME (tmode)) + strlen (GET_MODE_NAME (fmode));
5728 nondec_name = XALLOCAVEC (char, 2 + opname_len + mname_len + 1 + 1);
5729 nondec_name[0] = '_';
5730 nondec_name[1] = '_';
5731 memcpy (&nondec_name[2], opname, opname_len);
5732 nondec_suffix = nondec_name + opname_len + 2;
5734 dec_name = XALLOCAVEC (char, 2 + dec_len + opname_len + mname_len + 1 + 1);
5737 memcpy (&dec_name[2], DECIMAL_PREFIX, dec_len);
5738 memcpy (&dec_name[2+dec_len], opname, opname_len);
5739 dec_suffix = dec_name + dec_len + opname_len + 2;
5741 fname = GET_MODE_NAME (fmode);
5742 tname = GET_MODE_NAME (tmode);
5744 if (DECIMAL_FLOAT_MODE_P(fmode) || DECIMAL_FLOAT_MODE_P(tmode))
5746 libfunc_name = dec_name;
5747 suffix = dec_suffix;
5751 libfunc_name = nondec_name;
5752 suffix = nondec_suffix;
5756 for (q = fname; *q; p++, q++)
5758 for (q = tname; *q; p++, q++)
5763 set_conv_libfunc (tab, tmode, fmode,
5764 ggc_alloc_string (libfunc_name, p - libfunc_name));
5767 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5768 int->fp conversion. */
5771 gen_int_to_fp_conv_libfunc (convert_optab tab,
5773 enum machine_mode tmode,
5774 enum machine_mode fmode)
5776 if (GET_MODE_CLASS (fmode) != MODE_INT)
5778 if (GET_MODE_CLASS (tmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (tmode))
5780 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5783 /* ufloat_optab is special by using floatun for FP and floatuns decimal fp
5787 gen_ufloat_conv_libfunc (convert_optab tab,
5788 const char *opname ATTRIBUTE_UNUSED,
5789 enum machine_mode tmode,
5790 enum machine_mode fmode)
5792 if (DECIMAL_FLOAT_MODE_P (tmode))
5793 gen_int_to_fp_conv_libfunc (tab, "floatuns", tmode, fmode);
5795 gen_int_to_fp_conv_libfunc (tab, "floatun", tmode, fmode);
5798 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5799 fp->int conversion. */
5802 gen_int_to_fp_nondecimal_conv_libfunc (convert_optab tab,
5804 enum machine_mode tmode,
5805 enum machine_mode fmode)
5807 if (GET_MODE_CLASS (fmode) != MODE_INT)
5809 if (GET_MODE_CLASS (tmode) != MODE_FLOAT)
5811 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5814 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5815 fp->int conversion with no decimal floating point involved. */
5818 gen_fp_to_int_conv_libfunc (convert_optab tab,
5820 enum machine_mode tmode,
5821 enum machine_mode fmode)
5823 if (GET_MODE_CLASS (fmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (fmode))
5825 if (GET_MODE_CLASS (tmode) != MODE_INT)
5827 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5830 /* Initialize the libfunc fields of an of an intra-mode-class conversion optab.
5831 The string formation rules are
5832 similar to the ones for init_libfunc, above. */
5835 gen_intraclass_conv_libfunc (convert_optab tab, const char *opname,
5836 enum machine_mode tmode, enum machine_mode fmode)
5838 size_t opname_len = strlen (opname);
5839 size_t mname_len = 0;
5841 const char *fname, *tname;
5843 char *nondec_name, *dec_name, *nondec_suffix, *dec_suffix;
5844 char *libfunc_name, *suffix;
5847 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5848 depends on which underlying decimal floating point format is used. */
5849 const size_t dec_len = sizeof (DECIMAL_PREFIX) - 1;
5851 mname_len = strlen (GET_MODE_NAME (tmode)) + strlen (GET_MODE_NAME (fmode));
5853 nondec_name = XALLOCAVEC (char, 2 + opname_len + mname_len + 1 + 1);
5854 nondec_name[0] = '_';
5855 nondec_name[1] = '_';
5856 memcpy (&nondec_name[2], opname, opname_len);
5857 nondec_suffix = nondec_name + opname_len + 2;
5859 dec_name = XALLOCAVEC (char, 2 + dec_len + opname_len + mname_len + 1 + 1);
5862 memcpy (&dec_name[2], DECIMAL_PREFIX, dec_len);
5863 memcpy (&dec_name[2 + dec_len], opname, opname_len);
5864 dec_suffix = dec_name + dec_len + opname_len + 2;
5866 fname = GET_MODE_NAME (fmode);
5867 tname = GET_MODE_NAME (tmode);
5869 if (DECIMAL_FLOAT_MODE_P(fmode) || DECIMAL_FLOAT_MODE_P(tmode))
5871 libfunc_name = dec_name;
5872 suffix = dec_suffix;
5876 libfunc_name = nondec_name;
5877 suffix = nondec_suffix;
5881 for (q = fname; *q; p++, q++)
5883 for (q = tname; *q; p++, q++)
5889 set_conv_libfunc (tab, tmode, fmode,
5890 ggc_alloc_string (libfunc_name, p - libfunc_name));
5893 /* Pick proper libcall for trunc_optab. We need to chose if we do
5894 truncation or extension and interclass or intraclass. */
5897 gen_trunc_conv_libfunc (convert_optab tab,
5899 enum machine_mode tmode,
5900 enum machine_mode fmode)
5902 if (GET_MODE_CLASS (tmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (tmode))
5904 if (GET_MODE_CLASS (fmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (fmode))
5909 if ((GET_MODE_CLASS (tmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (fmode))
5910 || (GET_MODE_CLASS (fmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (tmode)))
5911 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5913 if (GET_MODE_PRECISION (fmode) <= GET_MODE_PRECISION (tmode))
5916 if ((GET_MODE_CLASS (tmode) == MODE_FLOAT
5917 && GET_MODE_CLASS (fmode) == MODE_FLOAT)
5918 || (DECIMAL_FLOAT_MODE_P (fmode) && DECIMAL_FLOAT_MODE_P (tmode)))
5919 gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
5922 /* Pick proper libcall for extend_optab. We need to chose if we do
5923 truncation or extension and interclass or intraclass. */
5926 gen_extend_conv_libfunc (convert_optab tab,
5927 const char *opname ATTRIBUTE_UNUSED,
5928 enum machine_mode tmode,
5929 enum machine_mode fmode)
5931 if (GET_MODE_CLASS (tmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (tmode))
5933 if (GET_MODE_CLASS (fmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (fmode))
5938 if ((GET_MODE_CLASS (tmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (fmode))
5939 || (GET_MODE_CLASS (fmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (tmode)))
5940 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5942 if (GET_MODE_PRECISION (fmode) > GET_MODE_PRECISION (tmode))
5945 if ((GET_MODE_CLASS (tmode) == MODE_FLOAT
5946 && GET_MODE_CLASS (fmode) == MODE_FLOAT)
5947 || (DECIMAL_FLOAT_MODE_P (fmode) && DECIMAL_FLOAT_MODE_P (tmode)))
5948 gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
5951 /* Pick proper libcall for fract_optab. We need to chose if we do
5952 interclass or intraclass. */
5955 gen_fract_conv_libfunc (convert_optab tab,
5957 enum machine_mode tmode,
5958 enum machine_mode fmode)
5962 if (!(ALL_FIXED_POINT_MODE_P (tmode) || ALL_FIXED_POINT_MODE_P (fmode)))
5965 if (GET_MODE_CLASS (tmode) == GET_MODE_CLASS (fmode))
5966 gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
5968 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5971 /* Pick proper libcall for fractuns_optab. */
5974 gen_fractuns_conv_libfunc (convert_optab tab,
5976 enum machine_mode tmode,
5977 enum machine_mode fmode)
5981 /* One mode must be a fixed-point mode, and the other must be an integer
5983 if (!((ALL_FIXED_POINT_MODE_P (tmode) && GET_MODE_CLASS (fmode) == MODE_INT)
5984 || (ALL_FIXED_POINT_MODE_P (fmode)
5985 && GET_MODE_CLASS (tmode) == MODE_INT)))
5988 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5991 /* Pick proper libcall for satfract_optab. We need to chose if we do
5992 interclass or intraclass. */
5995 gen_satfract_conv_libfunc (convert_optab tab,
5997 enum machine_mode tmode,
5998 enum machine_mode fmode)
6002 /* TMODE must be a fixed-point mode. */
6003 if (!ALL_FIXED_POINT_MODE_P (tmode))
6006 if (GET_MODE_CLASS (tmode) == GET_MODE_CLASS (fmode))
6007 gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
6009 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
6012 /* Pick proper libcall for satfractuns_optab. */
6015 gen_satfractuns_conv_libfunc (convert_optab tab,
6017 enum machine_mode tmode,
6018 enum machine_mode fmode)
6022 /* TMODE must be a fixed-point mode, and FMODE must be an integer mode. */
6023 if (!(ALL_FIXED_POINT_MODE_P (tmode) && GET_MODE_CLASS (fmode) == MODE_INT))
6026 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
6029 /* A table of previously-created libfuncs, hashed by name. */
6030 static GTY ((param_is (union tree_node))) htab_t libfunc_decls;
6032 /* Hashtable callbacks for libfunc_decls. */
6035 libfunc_decl_hash (const void *entry)
6037 return htab_hash_string (IDENTIFIER_POINTER (DECL_NAME ((const_tree) entry)));
6041 libfunc_decl_eq (const void *entry1, const void *entry2)
6043 return DECL_NAME ((const_tree) entry1) == (const_tree) entry2;
6047 init_one_libfunc (const char *name)
6053 if (libfunc_decls == NULL)
6054 libfunc_decls = htab_create_ggc (37, libfunc_decl_hash,
6055 libfunc_decl_eq, NULL);
6057 /* See if we have already created a libfunc decl for this function. */
6058 id = get_identifier (name);
6059 hash = htab_hash_string (name);
6060 slot = htab_find_slot_with_hash (libfunc_decls, id, hash, INSERT);
6061 decl = (tree) *slot;
6064 /* Create a new decl, so that it can be passed to
6065 targetm.encode_section_info. */
6066 /* ??? We don't have any type information except for this is
6067 a function. Pretend this is "int foo()". */
6068 decl = build_decl (FUNCTION_DECL, get_identifier (name),
6069 build_function_type (integer_type_node, NULL_TREE));
6070 DECL_ARTIFICIAL (decl) = 1;
6071 DECL_EXTERNAL (decl) = 1;
6072 TREE_PUBLIC (decl) = 1;
6074 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
6075 are the flags assigned by targetm.encode_section_info. */
6076 SET_SYMBOL_REF_DECL (XEXP (DECL_RTL (decl), 0), NULL);
6080 return XEXP (DECL_RTL (decl), 0);
6083 /* Adjust the assembler name of libfunc NAME to ASMSPEC. */
6086 set_user_assembler_libfunc (const char *name, const char *asmspec)
6092 id = get_identifier (name);
6093 hash = htab_hash_string (name);
6094 slot = htab_find_slot_with_hash (libfunc_decls, id, hash, NO_INSERT);
6096 decl = (tree) *slot;
6097 set_user_assembler_name (decl, asmspec);
6098 return XEXP (DECL_RTL (decl), 0);
6101 /* Call this to reset the function entry for one optab (OPTABLE) in mode
6102 MODE to NAME, which should be either 0 or a string constant. */
6104 set_optab_libfunc (optab optable, enum machine_mode mode, const char *name)
6107 struct libfunc_entry e;
6108 struct libfunc_entry **slot;
6109 e.optab = (size_t) (optable - &optab_table[0]);
6114 val = init_one_libfunc (name);
6117 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, INSERT);
6119 *slot = GGC_NEW (struct libfunc_entry);
6120 (*slot)->optab = (size_t) (optable - &optab_table[0]);
6121 (*slot)->mode1 = mode;
6122 (*slot)->mode2 = VOIDmode;
6123 (*slot)->libfunc = val;
6126 /* Call this to reset the function entry for one conversion optab
6127 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
6128 either 0 or a string constant. */
6130 set_conv_libfunc (convert_optab optable, enum machine_mode tmode,
6131 enum machine_mode fmode, const char *name)
6134 struct libfunc_entry e;
6135 struct libfunc_entry **slot;
6136 e.optab = (size_t) (optable - &convert_optab_table[0]);
6141 val = init_one_libfunc (name);
6144 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, INSERT);
6146 *slot = GGC_NEW (struct libfunc_entry);
6147 (*slot)->optab = (size_t) (optable - &convert_optab_table[0]);
6148 (*slot)->mode1 = tmode;
6149 (*slot)->mode2 = fmode;
6150 (*slot)->libfunc = val;
6153 /* Call this to initialize the contents of the optabs
6154 appropriately for the current target machine. */
6160 enum machine_mode int_mode;
6163 libfunc_hash = htab_create_ggc (10, hash_libfunc, eq_libfunc, NULL);
6164 /* Start by initializing all tables to contain CODE_FOR_nothing. */
6166 for (i = 0; i < NUM_RTX_CODE; i++)
6167 setcc_gen_code[i] = CODE_FOR_nothing;
6169 #ifdef HAVE_conditional_move
6170 for (i = 0; i < NUM_MACHINE_MODES; i++)
6171 movcc_gen_code[i] = CODE_FOR_nothing;
6174 for (i = 0; i < NUM_MACHINE_MODES; i++)
6176 vcond_gen_code[i] = CODE_FOR_nothing;
6177 vcondu_gen_code[i] = CODE_FOR_nothing;
6180 #if GCC_VERSION >= 4000
6181 /* We statically initialize the insn_codes with CODE_FOR_nothing. */
6188 init_optab (add_optab, PLUS);
6189 init_optabv (addv_optab, PLUS);
6190 init_optab (sub_optab, MINUS);
6191 init_optabv (subv_optab, MINUS);
6192 init_optab (ssadd_optab, SS_PLUS);
6193 init_optab (usadd_optab, US_PLUS);
6194 init_optab (sssub_optab, SS_MINUS);
6195 init_optab (ussub_optab, US_MINUS);
6196 init_optab (smul_optab, MULT);
6197 init_optab (ssmul_optab, SS_MULT);
6198 init_optab (usmul_optab, US_MULT);
6199 init_optabv (smulv_optab, MULT);
6200 init_optab (smul_highpart_optab, UNKNOWN);
6201 init_optab (umul_highpart_optab, UNKNOWN);
6202 init_optab (smul_widen_optab, UNKNOWN);
6203 init_optab (umul_widen_optab, UNKNOWN);
6204 init_optab (usmul_widen_optab, UNKNOWN);
6205 init_optab (smadd_widen_optab, UNKNOWN);
6206 init_optab (umadd_widen_optab, UNKNOWN);
6207 init_optab (ssmadd_widen_optab, UNKNOWN);
6208 init_optab (usmadd_widen_optab, UNKNOWN);
6209 init_optab (smsub_widen_optab, UNKNOWN);
6210 init_optab (umsub_widen_optab, UNKNOWN);
6211 init_optab (ssmsub_widen_optab, UNKNOWN);
6212 init_optab (usmsub_widen_optab, UNKNOWN);
6213 init_optab (sdiv_optab, DIV);
6214 init_optab (ssdiv_optab, SS_DIV);
6215 init_optab (usdiv_optab, US_DIV);
6216 init_optabv (sdivv_optab, DIV);
6217 init_optab (sdivmod_optab, UNKNOWN);
6218 init_optab (udiv_optab, UDIV);
6219 init_optab (udivmod_optab, UNKNOWN);
6220 init_optab (smod_optab, MOD);
6221 init_optab (umod_optab, UMOD);
6222 init_optab (fmod_optab, UNKNOWN);
6223 init_optab (remainder_optab, UNKNOWN);
6224 init_optab (ftrunc_optab, UNKNOWN);
6225 init_optab (and_optab, AND);
6226 init_optab (ior_optab, IOR);
6227 init_optab (xor_optab, XOR);
6228 init_optab (ashl_optab, ASHIFT);
6229 init_optab (ssashl_optab, SS_ASHIFT);
6230 init_optab (usashl_optab, US_ASHIFT);
6231 init_optab (ashr_optab, ASHIFTRT);
6232 init_optab (lshr_optab, LSHIFTRT);
6233 init_optab (rotl_optab, ROTATE);
6234 init_optab (rotr_optab, ROTATERT);
6235 init_optab (smin_optab, SMIN);
6236 init_optab (smax_optab, SMAX);
6237 init_optab (umin_optab, UMIN);
6238 init_optab (umax_optab, UMAX);
6239 init_optab (pow_optab, UNKNOWN);
6240 init_optab (atan2_optab, UNKNOWN);
6242 /* These three have codes assigned exclusively for the sake of
6244 init_optab (mov_optab, SET);
6245 init_optab (movstrict_optab, STRICT_LOW_PART);
6246 init_optab (cmp_optab, COMPARE);
6248 init_optab (storent_optab, UNKNOWN);
6250 init_optab (ucmp_optab, UNKNOWN);
6251 init_optab (tst_optab, UNKNOWN);
6253 init_optab (eq_optab, EQ);
6254 init_optab (ne_optab, NE);
6255 init_optab (gt_optab, GT);
6256 init_optab (ge_optab, GE);
6257 init_optab (lt_optab, LT);
6258 init_optab (le_optab, LE);
6259 init_optab (unord_optab, UNORDERED);
6261 init_optab (neg_optab, NEG);
6262 init_optab (ssneg_optab, SS_NEG);
6263 init_optab (usneg_optab, US_NEG);
6264 init_optabv (negv_optab, NEG);
6265 init_optab (abs_optab, ABS);
6266 init_optabv (absv_optab, ABS);
6267 init_optab (addcc_optab, UNKNOWN);
6268 init_optab (one_cmpl_optab, NOT);
6269 init_optab (bswap_optab, BSWAP);
6270 init_optab (ffs_optab, FFS);
6271 init_optab (clz_optab, CLZ);
6272 init_optab (ctz_optab, CTZ);
6273 init_optab (popcount_optab, POPCOUNT);
6274 init_optab (parity_optab, PARITY);
6275 init_optab (sqrt_optab, SQRT);
6276 init_optab (floor_optab, UNKNOWN);
6277 init_optab (ceil_optab, UNKNOWN);
6278 init_optab (round_optab, UNKNOWN);
6279 init_optab (btrunc_optab, UNKNOWN);
6280 init_optab (nearbyint_optab, UNKNOWN);
6281 init_optab (rint_optab, UNKNOWN);
6282 init_optab (sincos_optab, UNKNOWN);
6283 init_optab (sin_optab, UNKNOWN);
6284 init_optab (asin_optab, UNKNOWN);
6285 init_optab (cos_optab, UNKNOWN);
6286 init_optab (acos_optab, UNKNOWN);
6287 init_optab (exp_optab, UNKNOWN);
6288 init_optab (exp10_optab, UNKNOWN);
6289 init_optab (exp2_optab, UNKNOWN);
6290 init_optab (expm1_optab, UNKNOWN);
6291 init_optab (ldexp_optab, UNKNOWN);
6292 init_optab (scalb_optab, UNKNOWN);
6293 init_optab (logb_optab, UNKNOWN);
6294 init_optab (ilogb_optab, UNKNOWN);
6295 init_optab (log_optab, UNKNOWN);
6296 init_optab (log10_optab, UNKNOWN);
6297 init_optab (log2_optab, UNKNOWN);
6298 init_optab (log1p_optab, UNKNOWN);
6299 init_optab (tan_optab, UNKNOWN);
6300 init_optab (atan_optab, UNKNOWN);
6301 init_optab (copysign_optab, UNKNOWN);
6302 init_optab (signbit_optab, UNKNOWN);
6304 init_optab (isinf_optab, UNKNOWN);
6306 init_optab (strlen_optab, UNKNOWN);
6307 init_optab (cbranch_optab, UNKNOWN);
6308 init_optab (cmov_optab, UNKNOWN);
6309 init_optab (cstore_optab, UNKNOWN);
6310 init_optab (push_optab, UNKNOWN);
6312 init_optab (reduc_smax_optab, UNKNOWN);
6313 init_optab (reduc_umax_optab, UNKNOWN);
6314 init_optab (reduc_smin_optab, UNKNOWN);
6315 init_optab (reduc_umin_optab, UNKNOWN);
6316 init_optab (reduc_splus_optab, UNKNOWN);
6317 init_optab (reduc_uplus_optab, UNKNOWN);
6319 init_optab (ssum_widen_optab, UNKNOWN);
6320 init_optab (usum_widen_optab, UNKNOWN);
6321 init_optab (sdot_prod_optab, UNKNOWN);
6322 init_optab (udot_prod_optab, UNKNOWN);
6324 init_optab (vec_extract_optab, UNKNOWN);
6325 init_optab (vec_extract_even_optab, UNKNOWN);
6326 init_optab (vec_extract_odd_optab, UNKNOWN);
6327 init_optab (vec_interleave_high_optab, UNKNOWN);
6328 init_optab (vec_interleave_low_optab, UNKNOWN);
6329 init_optab (vec_set_optab, UNKNOWN);
6330 init_optab (vec_init_optab, UNKNOWN);
6331 init_optab (vec_shl_optab, UNKNOWN);
6332 init_optab (vec_shr_optab, UNKNOWN);
6333 init_optab (vec_realign_load_optab, UNKNOWN);
6334 init_optab (movmisalign_optab, UNKNOWN);
6335 init_optab (vec_widen_umult_hi_optab, UNKNOWN);
6336 init_optab (vec_widen_umult_lo_optab, UNKNOWN);
6337 init_optab (vec_widen_smult_hi_optab, UNKNOWN);
6338 init_optab (vec_widen_smult_lo_optab, UNKNOWN);
6339 init_optab (vec_unpacks_hi_optab, UNKNOWN);
6340 init_optab (vec_unpacks_lo_optab, UNKNOWN);
6341 init_optab (vec_unpacku_hi_optab, UNKNOWN);
6342 init_optab (vec_unpacku_lo_optab, UNKNOWN);
6343 init_optab (vec_unpacks_float_hi_optab, UNKNOWN);
6344 init_optab (vec_unpacks_float_lo_optab, UNKNOWN);
6345 init_optab (vec_unpacku_float_hi_optab, UNKNOWN);
6346 init_optab (vec_unpacku_float_lo_optab, UNKNOWN);
6347 init_optab (vec_pack_trunc_optab, UNKNOWN);
6348 init_optab (vec_pack_usat_optab, UNKNOWN);
6349 init_optab (vec_pack_ssat_optab, UNKNOWN);
6350 init_optab (vec_pack_ufix_trunc_optab, UNKNOWN);
6351 init_optab (vec_pack_sfix_trunc_optab, UNKNOWN);
6353 init_optab (powi_optab, UNKNOWN);
6356 init_convert_optab (sext_optab, SIGN_EXTEND);
6357 init_convert_optab (zext_optab, ZERO_EXTEND);
6358 init_convert_optab (trunc_optab, TRUNCATE);
6359 init_convert_optab (sfix_optab, FIX);
6360 init_convert_optab (ufix_optab, UNSIGNED_FIX);
6361 init_convert_optab (sfixtrunc_optab, UNKNOWN);
6362 init_convert_optab (ufixtrunc_optab, UNKNOWN);
6363 init_convert_optab (sfloat_optab, FLOAT);
6364 init_convert_optab (ufloat_optab, UNSIGNED_FLOAT);
6365 init_convert_optab (lrint_optab, UNKNOWN);
6366 init_convert_optab (lround_optab, UNKNOWN);
6367 init_convert_optab (lfloor_optab, UNKNOWN);
6368 init_convert_optab (lceil_optab, UNKNOWN);
6370 init_convert_optab (fract_optab, FRACT_CONVERT);
6371 init_convert_optab (fractuns_optab, UNSIGNED_FRACT_CONVERT);
6372 init_convert_optab (satfract_optab, SAT_FRACT);
6373 init_convert_optab (satfractuns_optab, UNSIGNED_SAT_FRACT);
6375 for (i = 0; i < NUM_MACHINE_MODES; i++)
6377 movmem_optab[i] = CODE_FOR_nothing;
6378 cmpstr_optab[i] = CODE_FOR_nothing;
6379 cmpstrn_optab[i] = CODE_FOR_nothing;
6380 cmpmem_optab[i] = CODE_FOR_nothing;
6381 setmem_optab[i] = CODE_FOR_nothing;
6383 sync_add_optab[i] = CODE_FOR_nothing;
6384 sync_sub_optab[i] = CODE_FOR_nothing;
6385 sync_ior_optab[i] = CODE_FOR_nothing;
6386 sync_and_optab[i] = CODE_FOR_nothing;
6387 sync_xor_optab[i] = CODE_FOR_nothing;
6388 sync_nand_optab[i] = CODE_FOR_nothing;
6389 sync_old_add_optab[i] = CODE_FOR_nothing;
6390 sync_old_sub_optab[i] = CODE_FOR_nothing;
6391 sync_old_ior_optab[i] = CODE_FOR_nothing;
6392 sync_old_and_optab[i] = CODE_FOR_nothing;
6393 sync_old_xor_optab[i] = CODE_FOR_nothing;
6394 sync_old_nand_optab[i] = CODE_FOR_nothing;
6395 sync_new_add_optab[i] = CODE_FOR_nothing;
6396 sync_new_sub_optab[i] = CODE_FOR_nothing;
6397 sync_new_ior_optab[i] = CODE_FOR_nothing;
6398 sync_new_and_optab[i] = CODE_FOR_nothing;
6399 sync_new_xor_optab[i] = CODE_FOR_nothing;
6400 sync_new_nand_optab[i] = CODE_FOR_nothing;
6401 sync_compare_and_swap[i] = CODE_FOR_nothing;
6402 sync_compare_and_swap_cc[i] = CODE_FOR_nothing;
6403 sync_lock_test_and_set[i] = CODE_FOR_nothing;
6404 sync_lock_release[i] = CODE_FOR_nothing;
6406 reload_in_optab[i] = reload_out_optab[i] = CODE_FOR_nothing;
6409 /* Fill in the optabs with the insns we support. */
6412 /* Initialize the optabs with the names of the library functions. */
6413 add_optab->libcall_basename = "add";
6414 add_optab->libcall_suffix = '3';
6415 add_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6416 addv_optab->libcall_basename = "add";
6417 addv_optab->libcall_suffix = '3';
6418 addv_optab->libcall_gen = gen_intv_fp_libfunc;
6419 ssadd_optab->libcall_basename = "ssadd";
6420 ssadd_optab->libcall_suffix = '3';
6421 ssadd_optab->libcall_gen = gen_signed_fixed_libfunc;
6422 usadd_optab->libcall_basename = "usadd";
6423 usadd_optab->libcall_suffix = '3';
6424 usadd_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6425 sub_optab->libcall_basename = "sub";
6426 sub_optab->libcall_suffix = '3';
6427 sub_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6428 subv_optab->libcall_basename = "sub";
6429 subv_optab->libcall_suffix = '3';
6430 subv_optab->libcall_gen = gen_intv_fp_libfunc;
6431 sssub_optab->libcall_basename = "sssub";
6432 sssub_optab->libcall_suffix = '3';
6433 sssub_optab->libcall_gen = gen_signed_fixed_libfunc;
6434 ussub_optab->libcall_basename = "ussub";
6435 ussub_optab->libcall_suffix = '3';
6436 ussub_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6437 smul_optab->libcall_basename = "mul";
6438 smul_optab->libcall_suffix = '3';
6439 smul_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6440 smulv_optab->libcall_basename = "mul";
6441 smulv_optab->libcall_suffix = '3';
6442 smulv_optab->libcall_gen = gen_intv_fp_libfunc;
6443 ssmul_optab->libcall_basename = "ssmul";
6444 ssmul_optab->libcall_suffix = '3';
6445 ssmul_optab->libcall_gen = gen_signed_fixed_libfunc;
6446 usmul_optab->libcall_basename = "usmul";
6447 usmul_optab->libcall_suffix = '3';
6448 usmul_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6449 sdiv_optab->libcall_basename = "div";
6450 sdiv_optab->libcall_suffix = '3';
6451 sdiv_optab->libcall_gen = gen_int_fp_signed_fixed_libfunc;
6452 sdivv_optab->libcall_basename = "divv";
6453 sdivv_optab->libcall_suffix = '3';
6454 sdivv_optab->libcall_gen = gen_int_libfunc;
6455 ssdiv_optab->libcall_basename = "ssdiv";
6456 ssdiv_optab->libcall_suffix = '3';
6457 ssdiv_optab->libcall_gen = gen_signed_fixed_libfunc;
6458 udiv_optab->libcall_basename = "udiv";
6459 udiv_optab->libcall_suffix = '3';
6460 udiv_optab->libcall_gen = gen_int_unsigned_fixed_libfunc;
6461 usdiv_optab->libcall_basename = "usdiv";
6462 usdiv_optab->libcall_suffix = '3';
6463 usdiv_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6464 sdivmod_optab->libcall_basename = "divmod";
6465 sdivmod_optab->libcall_suffix = '4';
6466 sdivmod_optab->libcall_gen = gen_int_libfunc;
6467 udivmod_optab->libcall_basename = "udivmod";
6468 udivmod_optab->libcall_suffix = '4';
6469 udivmod_optab->libcall_gen = gen_int_libfunc;
6470 smod_optab->libcall_basename = "mod";
6471 smod_optab->libcall_suffix = '3';
6472 smod_optab->libcall_gen = gen_int_libfunc;
6473 umod_optab->libcall_basename = "umod";
6474 umod_optab->libcall_suffix = '3';
6475 umod_optab->libcall_gen = gen_int_libfunc;
6476 ftrunc_optab->libcall_basename = "ftrunc";
6477 ftrunc_optab->libcall_suffix = '2';
6478 ftrunc_optab->libcall_gen = gen_fp_libfunc;
6479 and_optab->libcall_basename = "and";
6480 and_optab->libcall_suffix = '3';
6481 and_optab->libcall_gen = gen_int_libfunc;
6482 ior_optab->libcall_basename = "ior";
6483 ior_optab->libcall_suffix = '3';
6484 ior_optab->libcall_gen = gen_int_libfunc;
6485 xor_optab->libcall_basename = "xor";
6486 xor_optab->libcall_suffix = '3';
6487 xor_optab->libcall_gen = gen_int_libfunc;
6488 ashl_optab->libcall_basename = "ashl";
6489 ashl_optab->libcall_suffix = '3';
6490 ashl_optab->libcall_gen = gen_int_fixed_libfunc;
6491 ssashl_optab->libcall_basename = "ssashl";
6492 ssashl_optab->libcall_suffix = '3';
6493 ssashl_optab->libcall_gen = gen_signed_fixed_libfunc;
6494 usashl_optab->libcall_basename = "usashl";
6495 usashl_optab->libcall_suffix = '3';
6496 usashl_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6497 ashr_optab->libcall_basename = "ashr";
6498 ashr_optab->libcall_suffix = '3';
6499 ashr_optab->libcall_gen = gen_int_signed_fixed_libfunc;
6500 lshr_optab->libcall_basename = "lshr";
6501 lshr_optab->libcall_suffix = '3';
6502 lshr_optab->libcall_gen = gen_int_unsigned_fixed_libfunc;
6503 smin_optab->libcall_basename = "min";
6504 smin_optab->libcall_suffix = '3';
6505 smin_optab->libcall_gen = gen_int_fp_libfunc;
6506 smax_optab->libcall_basename = "max";
6507 smax_optab->libcall_suffix = '3';
6508 smax_optab->libcall_gen = gen_int_fp_libfunc;
6509 umin_optab->libcall_basename = "umin";
6510 umin_optab->libcall_suffix = '3';
6511 umin_optab->libcall_gen = gen_int_libfunc;
6512 umax_optab->libcall_basename = "umax";
6513 umax_optab->libcall_suffix = '3';
6514 umax_optab->libcall_gen = gen_int_libfunc;
6515 neg_optab->libcall_basename = "neg";
6516 neg_optab->libcall_suffix = '2';
6517 neg_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6518 ssneg_optab->libcall_basename = "ssneg";
6519 ssneg_optab->libcall_suffix = '2';
6520 ssneg_optab->libcall_gen = gen_signed_fixed_libfunc;
6521 usneg_optab->libcall_basename = "usneg";
6522 usneg_optab->libcall_suffix = '2';
6523 usneg_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6524 negv_optab->libcall_basename = "neg";
6525 negv_optab->libcall_suffix = '2';
6526 negv_optab->libcall_gen = gen_intv_fp_libfunc;
6527 one_cmpl_optab->libcall_basename = "one_cmpl";
6528 one_cmpl_optab->libcall_suffix = '2';
6529 one_cmpl_optab->libcall_gen = gen_int_libfunc;
6530 ffs_optab->libcall_basename = "ffs";
6531 ffs_optab->libcall_suffix = '2';
6532 ffs_optab->libcall_gen = gen_int_libfunc;
6533 clz_optab->libcall_basename = "clz";
6534 clz_optab->libcall_suffix = '2';
6535 clz_optab->libcall_gen = gen_int_libfunc;
6536 ctz_optab->libcall_basename = "ctz";
6537 ctz_optab->libcall_suffix = '2';
6538 ctz_optab->libcall_gen = gen_int_libfunc;
6539 popcount_optab->libcall_basename = "popcount";
6540 popcount_optab->libcall_suffix = '2';
6541 popcount_optab->libcall_gen = gen_int_libfunc;
6542 parity_optab->libcall_basename = "parity";
6543 parity_optab->libcall_suffix = '2';
6544 parity_optab->libcall_gen = gen_int_libfunc;
6546 /* Comparison libcalls for integers MUST come in pairs,
6548 cmp_optab->libcall_basename = "cmp";
6549 cmp_optab->libcall_suffix = '2';
6550 cmp_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6551 ucmp_optab->libcall_basename = "ucmp";
6552 ucmp_optab->libcall_suffix = '2';
6553 ucmp_optab->libcall_gen = gen_int_libfunc;
6555 /* EQ etc are floating point only. */
6556 eq_optab->libcall_basename = "eq";
6557 eq_optab->libcall_suffix = '2';
6558 eq_optab->libcall_gen = gen_fp_libfunc;
6559 ne_optab->libcall_basename = "ne";
6560 ne_optab->libcall_suffix = '2';
6561 ne_optab->libcall_gen = gen_fp_libfunc;
6562 gt_optab->libcall_basename = "gt";
6563 gt_optab->libcall_suffix = '2';
6564 gt_optab->libcall_gen = gen_fp_libfunc;
6565 ge_optab->libcall_basename = "ge";
6566 ge_optab->libcall_suffix = '2';
6567 ge_optab->libcall_gen = gen_fp_libfunc;
6568 lt_optab->libcall_basename = "lt";
6569 lt_optab->libcall_suffix = '2';
6570 lt_optab->libcall_gen = gen_fp_libfunc;
6571 le_optab->libcall_basename = "le";
6572 le_optab->libcall_suffix = '2';
6573 le_optab->libcall_gen = gen_fp_libfunc;
6574 unord_optab->libcall_basename = "unord";
6575 unord_optab->libcall_suffix = '2';
6576 unord_optab->libcall_gen = gen_fp_libfunc;
6578 powi_optab->libcall_basename = "powi";
6579 powi_optab->libcall_suffix = '2';
6580 powi_optab->libcall_gen = gen_fp_libfunc;
6583 sfloat_optab->libcall_basename = "float";
6584 sfloat_optab->libcall_gen = gen_int_to_fp_conv_libfunc;
6585 ufloat_optab->libcall_gen = gen_ufloat_conv_libfunc;
6586 sfix_optab->libcall_basename = "fix";
6587 sfix_optab->libcall_gen = gen_fp_to_int_conv_libfunc;
6588 ufix_optab->libcall_basename = "fixuns";
6589 ufix_optab->libcall_gen = gen_fp_to_int_conv_libfunc;
6590 lrint_optab->libcall_basename = "lrint";
6591 lrint_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6592 lround_optab->libcall_basename = "lround";
6593 lround_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6594 lfloor_optab->libcall_basename = "lfloor";
6595 lfloor_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6596 lceil_optab->libcall_basename = "lceil";
6597 lceil_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6599 /* trunc_optab is also used for FLOAT_EXTEND. */
6600 sext_optab->libcall_basename = "extend";
6601 sext_optab->libcall_gen = gen_extend_conv_libfunc;
6602 trunc_optab->libcall_basename = "trunc";
6603 trunc_optab->libcall_gen = gen_trunc_conv_libfunc;
6605 /* Conversions for fixed-point modes and other modes. */
6606 fract_optab->libcall_basename = "fract";
6607 fract_optab->libcall_gen = gen_fract_conv_libfunc;
6608 satfract_optab->libcall_basename = "satfract";
6609 satfract_optab->libcall_gen = gen_satfract_conv_libfunc;
6610 fractuns_optab->libcall_basename = "fractuns";
6611 fractuns_optab->libcall_gen = gen_fractuns_conv_libfunc;
6612 satfractuns_optab->libcall_basename = "satfractuns";
6613 satfractuns_optab->libcall_gen = gen_satfractuns_conv_libfunc;
6615 /* The ffs function operates on `int'. Fall back on it if we do not
6616 have a libgcc2 function for that width. */
6617 if (INT_TYPE_SIZE < BITS_PER_WORD)
6619 int_mode = mode_for_size (INT_TYPE_SIZE, MODE_INT, 0);
6620 set_optab_libfunc (ffs_optab, mode_for_size (INT_TYPE_SIZE, MODE_INT, 0),
6624 /* Explicitly initialize the bswap libfuncs since we need them to be
6625 valid for things other than word_mode. */
6626 set_optab_libfunc (bswap_optab, SImode, "__bswapsi2");
6627 set_optab_libfunc (bswap_optab, DImode, "__bswapdi2");
6629 /* Use cabs for double complex abs, since systems generally have cabs.
6630 Don't define any libcall for float complex, so that cabs will be used. */
6631 if (complex_double_type_node)
6632 set_optab_libfunc (abs_optab, TYPE_MODE (complex_double_type_node), "cabs");
6634 abort_libfunc = init_one_libfunc ("abort");
6635 memcpy_libfunc = init_one_libfunc ("memcpy");
6636 memmove_libfunc = init_one_libfunc ("memmove");
6637 memcmp_libfunc = init_one_libfunc ("memcmp");
6638 memset_libfunc = init_one_libfunc ("memset");
6639 setbits_libfunc = init_one_libfunc ("__setbits");
6641 #ifndef DONT_USE_BUILTIN_SETJMP
6642 setjmp_libfunc = init_one_libfunc ("__builtin_setjmp");
6643 longjmp_libfunc = init_one_libfunc ("__builtin_longjmp");
6645 setjmp_libfunc = init_one_libfunc ("setjmp");
6646 longjmp_libfunc = init_one_libfunc ("longjmp");
6648 unwind_sjlj_register_libfunc = init_one_libfunc ("_Unwind_SjLj_Register");
6649 unwind_sjlj_unregister_libfunc
6650 = init_one_libfunc ("_Unwind_SjLj_Unregister");
6652 /* For function entry/exit instrumentation. */
6653 profile_function_entry_libfunc
6654 = init_one_libfunc ("__cyg_profile_func_enter");
6655 profile_function_exit_libfunc
6656 = init_one_libfunc ("__cyg_profile_func_exit");
6658 gcov_flush_libfunc = init_one_libfunc ("__gcov_flush");
6660 if (HAVE_conditional_trap)
6661 trap_rtx = gen_rtx_fmt_ee (EQ, VOIDmode, NULL_RTX, NULL_RTX);
6663 /* Allow the target to add more libcalls or rename some, etc. */
6664 targetm.init_libfuncs ();
6669 /* Print information about the current contents of the optabs on
6673 debug_optab_libfuncs (void)
6679 /* Dump the arithmetic optabs. */
6680 for (i = 0; i != (int) OTI_MAX; i++)
6681 for (j = 0; j < NUM_MACHINE_MODES; ++j)
6686 o = &optab_table[i];
6687 l = optab_libfunc (o, j);
6690 gcc_assert (GET_CODE (l) == SYMBOL_REF);
6691 fprintf (stderr, "%s\t%s:\t%s\n",
6692 GET_RTX_NAME (o->code),
6698 /* Dump the conversion optabs. */
6699 for (i = 0; i < (int) COI_MAX; ++i)
6700 for (j = 0; j < NUM_MACHINE_MODES; ++j)
6701 for (k = 0; k < NUM_MACHINE_MODES; ++k)
6706 o = &convert_optab_table[i];
6707 l = convert_optab_libfunc (o, j, k);
6710 gcc_assert (GET_CODE (l) == SYMBOL_REF);
6711 fprintf (stderr, "%s\t%s\t%s:\t%s\n",
6712 GET_RTX_NAME (o->code),
6721 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
6722 CODE. Return 0 on failure. */
6725 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED, rtx op1,
6726 rtx op2 ATTRIBUTE_UNUSED, rtx tcode ATTRIBUTE_UNUSED)
6728 enum machine_mode mode = GET_MODE (op1);
6729 enum insn_code icode;
6732 if (!HAVE_conditional_trap)
6735 if (mode == VOIDmode)
6738 icode = optab_handler (cmp_optab, mode)->insn_code;
6739 if (icode == CODE_FOR_nothing)
6743 op1 = prepare_operand (icode, op1, 0, mode, mode, 0);
6744 op2 = prepare_operand (icode, op2, 1, mode, mode, 0);
6750 emit_insn (GEN_FCN (icode) (op1, op2));
6752 PUT_CODE (trap_rtx, code);
6753 gcc_assert (HAVE_conditional_trap);
6754 insn = gen_conditional_trap (trap_rtx, tcode);
6758 insn = get_insns ();
6765 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
6766 or unsigned operation code. */
6768 static enum rtx_code
6769 get_rtx_code (enum tree_code tcode, bool unsignedp)
6781 code = unsignedp ? LTU : LT;
6784 code = unsignedp ? LEU : LE;
6787 code = unsignedp ? GTU : GT;
6790 code = unsignedp ? GEU : GE;
6793 case UNORDERED_EXPR:
6824 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
6825 unsigned operators. Do not generate compare instruction. */
6828 vector_compare_rtx (tree cond, bool unsignedp, enum insn_code icode)
6830 enum rtx_code rcode;
6832 rtx rtx_op0, rtx_op1;
6834 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
6835 ensures that condition is a relational operation. */
6836 gcc_assert (COMPARISON_CLASS_P (cond));
6838 rcode = get_rtx_code (TREE_CODE (cond), unsignedp);
6839 t_op0 = TREE_OPERAND (cond, 0);
6840 t_op1 = TREE_OPERAND (cond, 1);
6842 /* Expand operands. */
6843 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)),
6845 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)),
6848 if (!insn_data[icode].operand[4].predicate (rtx_op0, GET_MODE (rtx_op0))
6849 && GET_MODE (rtx_op0) != VOIDmode)
6850 rtx_op0 = force_reg (GET_MODE (rtx_op0), rtx_op0);
6852 if (!insn_data[icode].operand[5].predicate (rtx_op1, GET_MODE (rtx_op1))
6853 && GET_MODE (rtx_op1) != VOIDmode)
6854 rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1);
6856 return gen_rtx_fmt_ee (rcode, VOIDmode, rtx_op0, rtx_op1);
6859 /* Return insn code for VEC_COND_EXPR EXPR. */
6861 static inline enum insn_code
6862 get_vcond_icode (tree expr, enum machine_mode mode)
6864 enum insn_code icode = CODE_FOR_nothing;
6866 if (TYPE_UNSIGNED (TREE_TYPE (expr)))
6867 icode = vcondu_gen_code[mode];
6869 icode = vcond_gen_code[mode];
6873 /* Return TRUE iff, appropriate vector insns are available
6874 for vector cond expr expr in VMODE mode. */
6877 expand_vec_cond_expr_p (tree expr, enum machine_mode vmode)
6879 if (get_vcond_icode (expr, vmode) == CODE_FOR_nothing)
6884 /* Generate insns for VEC_COND_EXPR. */
6887 expand_vec_cond_expr (tree vec_cond_expr, rtx target)
6889 enum insn_code icode;
6890 rtx comparison, rtx_op1, rtx_op2, cc_op0, cc_op1;
6891 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_cond_expr));
6892 bool unsignedp = TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr));
6894 icode = get_vcond_icode (vec_cond_expr, mode);
6895 if (icode == CODE_FOR_nothing)
6898 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6899 target = gen_reg_rtx (mode);
6901 /* Get comparison rtx. First expand both cond expr operands. */
6902 comparison = vector_compare_rtx (TREE_OPERAND (vec_cond_expr, 0),
6904 cc_op0 = XEXP (comparison, 0);
6905 cc_op1 = XEXP (comparison, 1);
6906 /* Expand both operands and force them in reg, if required. */
6907 rtx_op1 = expand_normal (TREE_OPERAND (vec_cond_expr, 1));
6908 if (!insn_data[icode].operand[1].predicate (rtx_op1, mode)
6909 && mode != VOIDmode)
6910 rtx_op1 = force_reg (mode, rtx_op1);
6912 rtx_op2 = expand_normal (TREE_OPERAND (vec_cond_expr, 2));
6913 if (!insn_data[icode].operand[2].predicate (rtx_op2, mode)
6914 && mode != VOIDmode)
6915 rtx_op2 = force_reg (mode, rtx_op2);
6917 /* Emit instruction! */
6918 emit_insn (GEN_FCN (icode) (target, rtx_op1, rtx_op2,
6919 comparison, cc_op0, cc_op1));
6925 /* This is an internal subroutine of the other compare_and_swap expanders.
6926 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
6927 operation. TARGET is an optional place to store the value result of
6928 the operation. ICODE is the particular instruction to expand. Return
6929 the result of the operation. */
6932 expand_val_compare_and_swap_1 (rtx mem, rtx old_val, rtx new_val,
6933 rtx target, enum insn_code icode)
6935 enum machine_mode mode = GET_MODE (mem);
6938 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6939 target = gen_reg_rtx (mode);
6941 if (GET_MODE (old_val) != VOIDmode && GET_MODE (old_val) != mode)
6942 old_val = convert_modes (mode, GET_MODE (old_val), old_val, 1);
6943 if (!insn_data[icode].operand[2].predicate (old_val, mode))
6944 old_val = force_reg (mode, old_val);
6946 if (GET_MODE (new_val) != VOIDmode && GET_MODE (new_val) != mode)
6947 new_val = convert_modes (mode, GET_MODE (new_val), new_val, 1);
6948 if (!insn_data[icode].operand[3].predicate (new_val, mode))
6949 new_val = force_reg (mode, new_val);
6951 insn = GEN_FCN (icode) (target, mem, old_val, new_val);
6952 if (insn == NULL_RTX)
6959 /* Expand a compare-and-swap operation and return its value. */
6962 expand_val_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
6964 enum machine_mode mode = GET_MODE (mem);
6965 enum insn_code icode = sync_compare_and_swap[mode];
6967 if (icode == CODE_FOR_nothing)
6970 return expand_val_compare_and_swap_1 (mem, old_val, new_val, target, icode);
6973 /* Expand a compare-and-swap operation and store true into the result if
6974 the operation was successful and false otherwise. Return the result.
6975 Unlike other routines, TARGET is not optional. */
6978 expand_bool_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
6980 enum machine_mode mode = GET_MODE (mem);
6981 enum insn_code icode;
6982 rtx subtarget, label0, label1;
6984 /* If the target supports a compare-and-swap pattern that simultaneously
6985 sets some flag for success, then use it. Otherwise use the regular
6986 compare-and-swap and follow that immediately with a compare insn. */
6987 icode = sync_compare_and_swap_cc[mode];
6991 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
6993 if (subtarget != NULL_RTX)
6997 case CODE_FOR_nothing:
6998 icode = sync_compare_and_swap[mode];
6999 if (icode == CODE_FOR_nothing)
7002 /* Ensure that if old_val == mem, that we're not comparing
7003 against an old value. */
7004 if (MEM_P (old_val))
7005 old_val = force_reg (mode, old_val);
7007 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
7009 if (subtarget == NULL_RTX)
7012 emit_cmp_insn (subtarget, old_val, EQ, const0_rtx, mode, true);
7015 /* If the target has a sane STORE_FLAG_VALUE, then go ahead and use a
7016 setcc instruction from the beginning. We don't work too hard here,
7017 but it's nice to not be stupid about initial code gen either. */
7018 if (STORE_FLAG_VALUE == 1)
7020 icode = setcc_gen_code[EQ];
7021 if (icode != CODE_FOR_nothing)
7023 enum machine_mode cmode = insn_data[icode].operand[0].mode;
7027 if (!insn_data[icode].operand[0].predicate (target, cmode))
7028 subtarget = gen_reg_rtx (cmode);
7030 insn = GEN_FCN (icode) (subtarget);
7034 if (GET_MODE (target) != GET_MODE (subtarget))
7036 convert_move (target, subtarget, 1);
7044 /* Without an appropriate setcc instruction, use a set of branches to
7045 get 1 and 0 stored into target. Presumably if the target has a
7046 STORE_FLAG_VALUE that isn't 1, then this will get cleaned up by ifcvt. */
7048 label0 = gen_label_rtx ();
7049 label1 = gen_label_rtx ();
7051 emit_jump_insn (bcc_gen_fctn[EQ] (label0));
7052 emit_move_insn (target, const0_rtx);
7053 emit_jump_insn (gen_jump (label1));
7055 emit_label (label0);
7056 emit_move_insn (target, const1_rtx);
7057 emit_label (label1);
7062 /* This is a helper function for the other atomic operations. This function
7063 emits a loop that contains SEQ that iterates until a compare-and-swap
7064 operation at the end succeeds. MEM is the memory to be modified. SEQ is
7065 a set of instructions that takes a value from OLD_REG as an input and
7066 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
7067 set to the current contents of MEM. After SEQ, a compare-and-swap will
7068 attempt to update MEM with NEW_REG. The function returns true when the
7069 loop was generated successfully. */
7072 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
7074 enum machine_mode mode = GET_MODE (mem);
7075 enum insn_code icode;
7076 rtx label, cmp_reg, subtarget;
7078 /* The loop we want to generate looks like
7084 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
7085 if (cmp_reg != old_reg)
7088 Note that we only do the plain load from memory once. Subsequent
7089 iterations use the value loaded by the compare-and-swap pattern. */
7091 label = gen_label_rtx ();
7092 cmp_reg = gen_reg_rtx (mode);
7094 emit_move_insn (cmp_reg, mem);
7096 emit_move_insn (old_reg, cmp_reg);
7100 /* If the target supports a compare-and-swap pattern that simultaneously
7101 sets some flag for success, then use it. Otherwise use the regular
7102 compare-and-swap and follow that immediately with a compare insn. */
7103 icode = sync_compare_and_swap_cc[mode];
7107 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
7109 if (subtarget != NULL_RTX)
7111 gcc_assert (subtarget == cmp_reg);
7116 case CODE_FOR_nothing:
7117 icode = sync_compare_and_swap[mode];
7118 if (icode == CODE_FOR_nothing)
7121 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
7123 if (subtarget == NULL_RTX)
7125 if (subtarget != cmp_reg)
7126 emit_move_insn (cmp_reg, subtarget);
7128 emit_cmp_insn (cmp_reg, old_reg, EQ, const0_rtx, mode, true);
7131 /* ??? Mark this jump predicted not taken? */
7132 emit_jump_insn (bcc_gen_fctn[NE] (label));
7137 /* This function generates the atomic operation MEM CODE= VAL. In this
7138 case, we do not care about any resulting value. Returns NULL if we
7139 cannot generate the operation. */
7142 expand_sync_operation (rtx mem, rtx val, enum rtx_code code)
7144 enum machine_mode mode = GET_MODE (mem);
7145 enum insn_code icode;
7148 /* Look to see if the target supports the operation directly. */
7152 icode = sync_add_optab[mode];
7155 icode = sync_ior_optab[mode];
7158 icode = sync_xor_optab[mode];
7161 icode = sync_and_optab[mode];
7164 icode = sync_nand_optab[mode];
7168 icode = sync_sub_optab[mode];
7169 if (icode == CODE_FOR_nothing || CONST_INT_P (val))
7171 icode = sync_add_optab[mode];
7172 if (icode != CODE_FOR_nothing)
7174 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
7184 /* Generate the direct operation, if present. */
7185 if (icode != CODE_FOR_nothing)
7187 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
7188 val = convert_modes (mode, GET_MODE (val), val, 1);
7189 if (!insn_data[icode].operand[1].predicate (val, mode))
7190 val = force_reg (mode, val);
7192 insn = GEN_FCN (icode) (mem, val);
7200 /* Failing that, generate a compare-and-swap loop in which we perform the
7201 operation with normal arithmetic instructions. */
7202 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
7204 rtx t0 = gen_reg_rtx (mode), t1;
7211 t1 = expand_simple_binop (mode, AND, t1, val, NULL_RTX,
7212 true, OPTAB_LIB_WIDEN);
7213 t1 = expand_simple_unop (mode, code, t1, NULL_RTX, true);
7216 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
7217 true, OPTAB_LIB_WIDEN);
7218 insn = get_insns ();
7221 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
7228 /* This function generates the atomic operation MEM CODE= VAL. In this
7229 case, we do care about the resulting value: if AFTER is true then
7230 return the value MEM holds after the operation, if AFTER is false
7231 then return the value MEM holds before the operation. TARGET is an
7232 optional place for the result value to be stored. */
7235 expand_sync_fetch_operation (rtx mem, rtx val, enum rtx_code code,
7236 bool after, rtx target)
7238 enum machine_mode mode = GET_MODE (mem);
7239 enum insn_code old_code, new_code, icode;
7243 /* Look to see if the target supports the operation directly. */
7247 old_code = sync_old_add_optab[mode];
7248 new_code = sync_new_add_optab[mode];
7251 old_code = sync_old_ior_optab[mode];
7252 new_code = sync_new_ior_optab[mode];
7255 old_code = sync_old_xor_optab[mode];
7256 new_code = sync_new_xor_optab[mode];
7259 old_code = sync_old_and_optab[mode];
7260 new_code = sync_new_and_optab[mode];
7263 old_code = sync_old_nand_optab[mode];
7264 new_code = sync_new_nand_optab[mode];
7268 old_code = sync_old_sub_optab[mode];
7269 new_code = sync_new_sub_optab[mode];
7270 if ((old_code == CODE_FOR_nothing && new_code == CODE_FOR_nothing)
7271 || CONST_INT_P (val))
7273 old_code = sync_old_add_optab[mode];
7274 new_code = sync_new_add_optab[mode];
7275 if (old_code != CODE_FOR_nothing || new_code != CODE_FOR_nothing)
7277 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
7287 /* If the target does supports the proper new/old operation, great. But
7288 if we only support the opposite old/new operation, check to see if we
7289 can compensate. In the case in which the old value is supported, then
7290 we can always perform the operation again with normal arithmetic. In
7291 the case in which the new value is supported, then we can only handle
7292 this in the case the operation is reversible. */
7297 if (icode == CODE_FOR_nothing)
7300 if (icode != CODE_FOR_nothing)
7307 if (icode == CODE_FOR_nothing
7308 && (code == PLUS || code == MINUS || code == XOR))
7311 if (icode != CODE_FOR_nothing)
7316 /* If we found something supported, great. */
7317 if (icode != CODE_FOR_nothing)
7319 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
7320 target = gen_reg_rtx (mode);
7322 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
7323 val = convert_modes (mode, GET_MODE (val), val, 1);
7324 if (!insn_data[icode].operand[2].predicate (val, mode))
7325 val = force_reg (mode, val);
7327 insn = GEN_FCN (icode) (target, mem, val);
7332 /* If we need to compensate for using an operation with the
7333 wrong return value, do so now. */
7340 else if (code == MINUS)
7346 target = expand_simple_binop (mode, AND, target, val,
7349 target = expand_simple_unop (mode, code, target,
7353 target = expand_simple_binop (mode, code, target, val,
7362 /* Failing that, generate a compare-and-swap loop in which we perform the
7363 operation with normal arithmetic instructions. */
7364 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
7366 rtx t0 = gen_reg_rtx (mode), t1;
7368 if (!target || !register_operand (target, mode))
7369 target = gen_reg_rtx (mode);
7374 emit_move_insn (target, t0);
7378 t1 = expand_simple_binop (mode, AND, t1, val, NULL_RTX,
7379 true, OPTAB_LIB_WIDEN);
7380 t1 = expand_simple_unop (mode, code, t1, NULL_RTX, true);
7383 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
7384 true, OPTAB_LIB_WIDEN);
7386 emit_move_insn (target, t1);
7388 insn = get_insns ();
7391 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
7398 /* This function expands a test-and-set operation. Ideally we atomically
7399 store VAL in MEM and return the previous value in MEM. Some targets
7400 may not support this operation and only support VAL with the constant 1;
7401 in this case while the return value will be 0/1, but the exact value
7402 stored in MEM is target defined. TARGET is an option place to stick
7403 the return value. */
7406 expand_sync_lock_test_and_set (rtx mem, rtx val, rtx target)
7408 enum machine_mode mode = GET_MODE (mem);
7409 enum insn_code icode;
7412 /* If the target supports the test-and-set directly, great. */
7413 icode = sync_lock_test_and_set[mode];
7414 if (icode != CODE_FOR_nothing)
7416 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
7417 target = gen_reg_rtx (mode);
7419 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
7420 val = convert_modes (mode, GET_MODE (val), val, 1);
7421 if (!insn_data[icode].operand[2].predicate (val, mode))
7422 val = force_reg (mode, val);
7424 insn = GEN_FCN (icode) (target, mem, val);
7432 /* Otherwise, use a compare-and-swap loop for the exchange. */
7433 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
7435 if (!target || !register_operand (target, mode))
7436 target = gen_reg_rtx (mode);
7437 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
7438 val = convert_modes (mode, GET_MODE (val), val, 1);
7439 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
7446 #include "gt-optabs.h"