1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
45 #include "basic-block.h"
48 /* Each optab contains info on how this target machine
49 can perform a particular operation
50 for all sizes and kinds of operands.
52 The operation to be performed is often specified
53 by passing one of these optabs as an argument.
55 See expr.h for documentation of these optabs. */
57 optab optab_table[OTI_MAX];
59 rtx libfunc_table[LTI_MAX];
61 /* Tables of patterns for converting one mode to another. */
62 convert_optab convert_optab_table[COI_MAX];
64 /* Contains the optab used for each rtx code. */
65 optab code_to_optab[NUM_RTX_CODE + 1];
67 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
68 gives the gen_function to make a branch to test that condition. */
70 rtxfun bcc_gen_fctn[NUM_RTX_CODE];
72 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
73 gives the insn code to make a store-condition insn
74 to test that condition. */
76 enum insn_code setcc_gen_code[NUM_RTX_CODE];
78 #ifdef HAVE_conditional_move
79 /* Indexed by the machine mode, gives the insn code to make a conditional
80 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
81 setcc_gen_code to cut down on the number of named patterns. Consider a day
82 when a lot more rtx codes are conditional (eg: for the ARM). */
84 enum insn_code movcc_gen_code[NUM_MACHINE_MODES];
87 /* Indexed by the machine mode, gives the insn code for vector conditional
90 enum insn_code vcond_gen_code[NUM_MACHINE_MODES];
91 enum insn_code vcondu_gen_code[NUM_MACHINE_MODES];
93 /* The insn generating function can not take an rtx_code argument.
94 TRAP_RTX is used as an rtx argument. Its code is replaced with
95 the code to be used in the trap insn and all other fields are ignored. */
96 static GTY(()) rtx trap_rtx;
98 static void prepare_float_lib_cmp (rtx *, rtx *, enum rtx_code *,
99 enum machine_mode *, int *);
100 static rtx expand_unop_direct (enum machine_mode, optab, rtx, rtx, int);
102 /* Current libcall id. It doesn't matter what these are, as long
103 as they are unique to each libcall that is emitted. */
104 static HOST_WIDE_INT libcall_id = 0;
106 /* Debug facility for use in GDB. */
107 void debug_optab_libfuncs (void);
109 #ifndef HAVE_conditional_trap
110 #define HAVE_conditional_trap 0
111 #define gen_conditional_trap(a,b) (gcc_unreachable (), NULL_RTX)
114 /* Prefixes for the current version of decimal floating point (BID vs. DPD) */
115 #if ENABLE_DECIMAL_BID_FORMAT
116 #define DECIMAL_PREFIX "bid_"
118 #define DECIMAL_PREFIX "dpd_"
122 /* Info about libfunc. We use same hashtable for normal optabs and conversion
123 optab. In the first case mode2 is unused. */
124 struct libfunc_entry GTY(())
127 enum machine_mode mode1, mode2;
131 /* Hash table used to convert declarations into nodes. */
132 static GTY((param_is (struct libfunc_entry))) htab_t libfunc_hash;
134 /* Used for attribute_hash. */
137 hash_libfunc (const void *p)
139 const struct libfunc_entry *const e = (const struct libfunc_entry *) p;
141 return (((int) e->mode1 + (int) e->mode2 * NUM_MACHINE_MODES)
145 /* Used for optab_hash. */
148 eq_libfunc (const void *p, const void *q)
150 const struct libfunc_entry *const e1 = (const struct libfunc_entry *) p;
151 const struct libfunc_entry *const e2 = (const struct libfunc_entry *) q;
153 return (e1->optab == e2->optab
154 && e1->mode1 == e2->mode1
155 && e1->mode2 == e2->mode2);
158 /* Return libfunc corresponding operation defined by OPTAB converting
159 from MODE2 to MODE1. Trigger lazy initialization if needed, return NULL
160 if no libfunc is available. */
162 convert_optab_libfunc (convert_optab optab, enum machine_mode mode1,
163 enum machine_mode mode2)
165 struct libfunc_entry e;
166 struct libfunc_entry **slot;
168 e.optab = (size_t) (convert_optab_table[0] - optab);
171 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, NO_INSERT);
174 if (optab->libcall_gen)
176 optab->libcall_gen (optab, optab->libcall_basename, mode1, mode2);
177 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, NO_INSERT);
179 return (*slot)->libfunc;
185 return (*slot)->libfunc;
188 /* Return libfunc corresponding operation defined by OPTAB in MODE.
189 Trigger lazy initialization if needed, return NULL if no libfunc is
192 optab_libfunc (optab optab, enum machine_mode mode)
194 struct libfunc_entry e;
195 struct libfunc_entry **slot;
197 e.optab = (size_t) (optab_table[0] - optab);
200 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, NO_INSERT);
203 if (optab->libcall_gen)
205 optab->libcall_gen (optab, optab->libcall_basename,
206 optab->libcall_suffix, mode);
207 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash,
210 return (*slot)->libfunc;
216 return (*slot)->libfunc;
220 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
221 the result of operation CODE applied to OP0 (and OP1 if it is a binary
224 If the last insn does not set TARGET, don't do anything, but return 1.
226 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
227 don't add the REG_EQUAL note but return 0. Our caller can then try
228 again, ensuring that TARGET is not one of the operands. */
231 add_equal_note (rtx insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
233 rtx last_insn, insn, set;
236 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
238 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
239 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
240 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
241 && GET_RTX_CLASS (code) != RTX_COMPARE
242 && GET_RTX_CLASS (code) != RTX_UNARY)
245 if (GET_CODE (target) == ZERO_EXTRACT)
248 for (last_insn = insns;
249 NEXT_INSN (last_insn) != NULL_RTX;
250 last_insn = NEXT_INSN (last_insn))
253 set = single_set (last_insn);
257 if (! rtx_equal_p (SET_DEST (set), target)
258 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
259 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
260 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
263 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
264 besides the last insn. */
265 if (reg_overlap_mentioned_p (target, op0)
266 || (op1 && reg_overlap_mentioned_p (target, op1)))
268 insn = PREV_INSN (last_insn);
269 while (insn != NULL_RTX)
271 if (reg_set_p (target, insn))
274 insn = PREV_INSN (insn);
278 if (GET_RTX_CLASS (code) == RTX_UNARY)
279 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
281 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
283 set_unique_reg_note (last_insn, REG_EQUAL, note);
288 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
289 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
290 not actually do a sign-extend or zero-extend, but can leave the
291 higher-order bits of the result rtx undefined, for example, in the case
292 of logical operations, but not right shifts. */
295 widen_operand (rtx op, enum machine_mode mode, enum machine_mode oldmode,
296 int unsignedp, int no_extend)
300 /* If we don't have to extend and this is a constant, return it. */
301 if (no_extend && GET_MODE (op) == VOIDmode)
304 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
305 extend since it will be more efficient to do so unless the signedness of
306 a promoted object differs from our extension. */
308 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
309 && SUBREG_PROMOTED_UNSIGNED_P (op) == unsignedp))
310 return convert_modes (mode, oldmode, op, unsignedp);
312 /* If MODE is no wider than a single word, we return a paradoxical
314 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
315 return gen_rtx_SUBREG (mode, force_reg (GET_MODE (op), op), 0);
317 /* Otherwise, get an object of MODE, clobber it, and set the low-order
320 result = gen_reg_rtx (mode);
321 emit_insn (gen_rtx_CLOBBER (VOIDmode, result));
322 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
326 /* Return the optab used for computing the operation given by
327 the tree code, CODE. This function is not always usable (for
328 example, it cannot give complete results for multiplication
329 or division) but probably ought to be relied on more widely
330 throughout the expander. */
332 optab_for_tree_code (enum tree_code code, const_tree type)
344 return one_cmpl_optab;
353 return TYPE_UNSIGNED (type) ? umod_optab : smod_optab;
361 return TYPE_UNSIGNED (type) ? udiv_optab : sdiv_optab;
367 return TYPE_UNSIGNED (type) ? lshr_optab : ashr_optab;
376 return TYPE_UNSIGNED (type) ? umax_optab : smax_optab;
379 return TYPE_UNSIGNED (type) ? umin_optab : smin_optab;
381 case REALIGN_LOAD_EXPR:
382 return vec_realign_load_optab;
385 return TYPE_UNSIGNED (type) ? usum_widen_optab : ssum_widen_optab;
388 return TYPE_UNSIGNED (type) ? udot_prod_optab : sdot_prod_optab;
391 return TYPE_UNSIGNED (type) ? reduc_umax_optab : reduc_smax_optab;
394 return TYPE_UNSIGNED (type) ? reduc_umin_optab : reduc_smin_optab;
396 case REDUC_PLUS_EXPR:
397 return TYPE_UNSIGNED (type) ? reduc_uplus_optab : reduc_splus_optab;
399 case VEC_LSHIFT_EXPR:
400 return vec_shl_optab;
402 case VEC_RSHIFT_EXPR:
403 return vec_shr_optab;
405 case VEC_WIDEN_MULT_HI_EXPR:
406 return TYPE_UNSIGNED (type) ?
407 vec_widen_umult_hi_optab : vec_widen_smult_hi_optab;
409 case VEC_WIDEN_MULT_LO_EXPR:
410 return TYPE_UNSIGNED (type) ?
411 vec_widen_umult_lo_optab : vec_widen_smult_lo_optab;
413 case VEC_UNPACK_HI_EXPR:
414 return TYPE_UNSIGNED (type) ?
415 vec_unpacku_hi_optab : vec_unpacks_hi_optab;
417 case VEC_UNPACK_LO_EXPR:
418 return TYPE_UNSIGNED (type) ?
419 vec_unpacku_lo_optab : vec_unpacks_lo_optab;
421 case VEC_UNPACK_FLOAT_HI_EXPR:
422 /* The signedness is determined from input operand. */
423 return TYPE_UNSIGNED (type) ?
424 vec_unpacku_float_hi_optab : vec_unpacks_float_hi_optab;
426 case VEC_UNPACK_FLOAT_LO_EXPR:
427 /* The signedness is determined from input operand. */
428 return TYPE_UNSIGNED (type) ?
429 vec_unpacku_float_lo_optab : vec_unpacks_float_lo_optab;
431 case VEC_PACK_TRUNC_EXPR:
432 return vec_pack_trunc_optab;
434 case VEC_PACK_SAT_EXPR:
435 return TYPE_UNSIGNED (type) ? vec_pack_usat_optab : vec_pack_ssat_optab;
437 case VEC_PACK_FIX_TRUNC_EXPR:
438 /* The signedness is determined from output operand. */
439 return TYPE_UNSIGNED (type) ?
440 vec_pack_ufix_trunc_optab : vec_pack_sfix_trunc_optab;
446 trapv = INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type);
449 case POINTER_PLUS_EXPR:
451 return trapv ? addv_optab : add_optab;
454 return trapv ? subv_optab : sub_optab;
457 return trapv ? smulv_optab : smul_optab;
460 return trapv ? negv_optab : neg_optab;
463 return trapv ? absv_optab : abs_optab;
465 case VEC_EXTRACT_EVEN_EXPR:
466 return vec_extract_even_optab;
468 case VEC_EXTRACT_ODD_EXPR:
469 return vec_extract_odd_optab;
471 case VEC_INTERLEAVE_HIGH_EXPR:
472 return vec_interleave_high_optab;
474 case VEC_INTERLEAVE_LOW_EXPR:
475 return vec_interleave_low_optab;
483 /* Expand vector widening operations.
485 There are two different classes of operations handled here:
486 1) Operations whose result is wider than all the arguments to the operation.
487 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
488 In this case OP0 and optionally OP1 would be initialized,
489 but WIDE_OP wouldn't (not relevant for this case).
490 2) Operations whose result is of the same size as the last argument to the
491 operation, but wider than all the other arguments to the operation.
492 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
493 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
495 E.g, when called to expand the following operations, this is how
496 the arguments will be initialized:
498 widening-sum 2 oprnd0 - oprnd1
499 widening-dot-product 3 oprnd0 oprnd1 oprnd2
500 widening-mult 2 oprnd0 oprnd1 -
501 type-promotion (vec-unpack) 1 oprnd0 - - */
504 expand_widen_pattern_expr (tree exp, rtx op0, rtx op1, rtx wide_op, rtx target,
507 tree oprnd0, oprnd1, oprnd2;
508 enum machine_mode wmode = 0, tmode0, tmode1 = 0;
509 optab widen_pattern_optab;
511 enum machine_mode xmode0, xmode1 = 0, wxmode = 0;
514 rtx xop0, xop1, wxop;
515 int nops = TREE_OPERAND_LENGTH (exp);
517 oprnd0 = TREE_OPERAND (exp, 0);
518 tmode0 = TYPE_MODE (TREE_TYPE (oprnd0));
519 widen_pattern_optab =
520 optab_for_tree_code (TREE_CODE (exp), TREE_TYPE (oprnd0));
521 icode = (int) optab_handler (widen_pattern_optab, tmode0)->insn_code;
522 gcc_assert (icode != CODE_FOR_nothing);
523 xmode0 = insn_data[icode].operand[1].mode;
527 oprnd1 = TREE_OPERAND (exp, 1);
528 tmode1 = TYPE_MODE (TREE_TYPE (oprnd1));
529 xmode1 = insn_data[icode].operand[2].mode;
532 /* The last operand is of a wider mode than the rest of the operands. */
540 gcc_assert (tmode1 == tmode0);
542 oprnd2 = TREE_OPERAND (exp, 2);
543 wmode = TYPE_MODE (TREE_TYPE (oprnd2));
544 wxmode = insn_data[icode].operand[3].mode;
548 wmode = wxmode = insn_data[icode].operand[0].mode;
551 || ! (*insn_data[icode].operand[0].predicate) (target, wmode))
552 temp = gen_reg_rtx (wmode);
560 /* In case the insn wants input operands in modes different from
561 those of the actual operands, convert the operands. It would
562 seem that we don't need to convert CONST_INTs, but we do, so
563 that they're properly zero-extended, sign-extended or truncated
566 if (GET_MODE (op0) != xmode0 && xmode0 != VOIDmode)
567 xop0 = convert_modes (xmode0,
568 GET_MODE (op0) != VOIDmode
574 if (GET_MODE (op1) != xmode1 && xmode1 != VOIDmode)
575 xop1 = convert_modes (xmode1,
576 GET_MODE (op1) != VOIDmode
582 if (GET_MODE (wide_op) != wxmode && wxmode != VOIDmode)
583 wxop = convert_modes (wxmode,
584 GET_MODE (wide_op) != VOIDmode
589 /* Now, if insn's predicates don't allow our operands, put them into
592 if (! (*insn_data[icode].operand[1].predicate) (xop0, xmode0)
593 && xmode0 != VOIDmode)
594 xop0 = copy_to_mode_reg (xmode0, xop0);
598 if (! (*insn_data[icode].operand[2].predicate) (xop1, xmode1)
599 && xmode1 != VOIDmode)
600 xop1 = copy_to_mode_reg (xmode1, xop1);
604 if (! (*insn_data[icode].operand[3].predicate) (wxop, wxmode)
605 && wxmode != VOIDmode)
606 wxop = copy_to_mode_reg (wxmode, wxop);
608 pat = GEN_FCN (icode) (temp, xop0, xop1, wxop);
611 pat = GEN_FCN (icode) (temp, xop0, xop1);
617 if (! (*insn_data[icode].operand[2].predicate) (wxop, wxmode)
618 && wxmode != VOIDmode)
619 wxop = copy_to_mode_reg (wxmode, wxop);
621 pat = GEN_FCN (icode) (temp, xop0, wxop);
624 pat = GEN_FCN (icode) (temp, xop0);
631 /* Generate code to perform an operation specified by TERNARY_OPTAB
632 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
634 UNSIGNEDP is for the case where we have to widen the operands
635 to perform the operation. It says to use zero-extension.
637 If TARGET is nonzero, the value
638 is generated there, if it is convenient to do so.
639 In all cases an rtx is returned for the locus of the value;
640 this may or may not be TARGET. */
643 expand_ternary_op (enum machine_mode mode, optab ternary_optab, rtx op0,
644 rtx op1, rtx op2, rtx target, int unsignedp)
646 int icode = (int) optab_handler (ternary_optab, mode)->insn_code;
647 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
648 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
649 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
652 rtx xop0 = op0, xop1 = op1, xop2 = op2;
654 gcc_assert (optab_handler (ternary_optab, mode)->insn_code
655 != CODE_FOR_nothing);
657 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
658 temp = gen_reg_rtx (mode);
662 /* In case the insn wants input operands in modes different from
663 those of the actual operands, convert the operands. It would
664 seem that we don't need to convert CONST_INTs, but we do, so
665 that they're properly zero-extended, sign-extended or truncated
668 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
669 xop0 = convert_modes (mode0,
670 GET_MODE (op0) != VOIDmode
675 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
676 xop1 = convert_modes (mode1,
677 GET_MODE (op1) != VOIDmode
682 if (GET_MODE (op2) != mode2 && mode2 != VOIDmode)
683 xop2 = convert_modes (mode2,
684 GET_MODE (op2) != VOIDmode
689 /* Now, if insn's predicates don't allow our operands, put them into
692 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
693 && mode0 != VOIDmode)
694 xop0 = copy_to_mode_reg (mode0, xop0);
696 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
697 && mode1 != VOIDmode)
698 xop1 = copy_to_mode_reg (mode1, xop1);
700 if (!insn_data[icode].operand[3].predicate (xop2, mode2)
701 && mode2 != VOIDmode)
702 xop2 = copy_to_mode_reg (mode2, xop2);
704 pat = GEN_FCN (icode) (temp, xop0, xop1, xop2);
711 /* Like expand_binop, but return a constant rtx if the result can be
712 calculated at compile time. The arguments and return value are
713 otherwise the same as for expand_binop. */
716 simplify_expand_binop (enum machine_mode mode, optab binoptab,
717 rtx op0, rtx op1, rtx target, int unsignedp,
718 enum optab_methods methods)
720 if (CONSTANT_P (op0) && CONSTANT_P (op1))
722 rtx x = simplify_binary_operation (binoptab->code, mode, op0, op1);
728 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
731 /* Like simplify_expand_binop, but always put the result in TARGET.
732 Return true if the expansion succeeded. */
735 force_expand_binop (enum machine_mode mode, optab binoptab,
736 rtx op0, rtx op1, rtx target, int unsignedp,
737 enum optab_methods methods)
739 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
740 target, unsignedp, methods);
744 emit_move_insn (target, x);
748 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
751 expand_vec_shift_expr (tree vec_shift_expr, rtx target)
753 enum insn_code icode;
754 rtx rtx_op1, rtx_op2;
755 enum machine_mode mode1;
756 enum machine_mode mode2;
757 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_shift_expr));
758 tree vec_oprnd = TREE_OPERAND (vec_shift_expr, 0);
759 tree shift_oprnd = TREE_OPERAND (vec_shift_expr, 1);
763 switch (TREE_CODE (vec_shift_expr))
765 case VEC_RSHIFT_EXPR:
766 shift_optab = vec_shr_optab;
768 case VEC_LSHIFT_EXPR:
769 shift_optab = vec_shl_optab;
775 icode = (int) optab_handler (shift_optab, mode)->insn_code;
776 gcc_assert (icode != CODE_FOR_nothing);
778 mode1 = insn_data[icode].operand[1].mode;
779 mode2 = insn_data[icode].operand[2].mode;
781 rtx_op1 = expand_normal (vec_oprnd);
782 if (!(*insn_data[icode].operand[1].predicate) (rtx_op1, mode1)
783 && mode1 != VOIDmode)
784 rtx_op1 = force_reg (mode1, rtx_op1);
786 rtx_op2 = expand_normal (shift_oprnd);
787 if (!(*insn_data[icode].operand[2].predicate) (rtx_op2, mode2)
788 && mode2 != VOIDmode)
789 rtx_op2 = force_reg (mode2, rtx_op2);
792 || ! (*insn_data[icode].operand[0].predicate) (target, mode))
793 target = gen_reg_rtx (mode);
795 /* Emit instruction */
796 pat = GEN_FCN (icode) (target, rtx_op1, rtx_op2);
803 /* This subroutine of expand_doubleword_shift handles the cases in which
804 the effective shift value is >= BITS_PER_WORD. The arguments and return
805 value are the same as for the parent routine, except that SUPERWORD_OP1
806 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
807 INTO_TARGET may be null if the caller has decided to calculate it. */
810 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
811 rtx outof_target, rtx into_target,
812 int unsignedp, enum optab_methods methods)
814 if (into_target != 0)
815 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
816 into_target, unsignedp, methods))
819 if (outof_target != 0)
821 /* For a signed right shift, we must fill OUTOF_TARGET with copies
822 of the sign bit, otherwise we must fill it with zeros. */
823 if (binoptab != ashr_optab)
824 emit_move_insn (outof_target, CONST0_RTX (word_mode));
826 if (!force_expand_binop (word_mode, binoptab,
827 outof_input, GEN_INT (BITS_PER_WORD - 1),
828 outof_target, unsignedp, methods))
834 /* This subroutine of expand_doubleword_shift handles the cases in which
835 the effective shift value is < BITS_PER_WORD. The arguments and return
836 value are the same as for the parent routine. */
839 expand_subword_shift (enum machine_mode op1_mode, optab binoptab,
840 rtx outof_input, rtx into_input, rtx op1,
841 rtx outof_target, rtx into_target,
842 int unsignedp, enum optab_methods methods,
843 unsigned HOST_WIDE_INT shift_mask)
845 optab reverse_unsigned_shift, unsigned_shift;
848 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
849 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
851 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
852 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
853 the opposite direction to BINOPTAB. */
854 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
856 carries = outof_input;
857 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
858 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
863 /* We must avoid shifting by BITS_PER_WORD bits since that is either
864 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
865 has unknown behavior. Do a single shift first, then shift by the
866 remainder. It's OK to use ~OP1 as the remainder if shift counts
867 are truncated to the mode size. */
868 carries = expand_binop (word_mode, reverse_unsigned_shift,
869 outof_input, const1_rtx, 0, unsignedp, methods);
870 if (shift_mask == BITS_PER_WORD - 1)
872 tmp = immed_double_const (-1, -1, op1_mode);
873 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
878 tmp = immed_double_const (BITS_PER_WORD - 1, 0, op1_mode);
879 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
883 if (tmp == 0 || carries == 0)
885 carries = expand_binop (word_mode, reverse_unsigned_shift,
886 carries, tmp, 0, unsignedp, methods);
890 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
891 so the result can go directly into INTO_TARGET if convenient. */
892 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
893 into_target, unsignedp, methods);
897 /* Now OR in the bits carried over from OUTOF_INPUT. */
898 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
899 into_target, unsignedp, methods))
902 /* Use a standard word_mode shift for the out-of half. */
903 if (outof_target != 0)
904 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
905 outof_target, unsignedp, methods))
912 #ifdef HAVE_conditional_move
913 /* Try implementing expand_doubleword_shift using conditional moves.
914 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
915 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
916 are the shift counts to use in the former and latter case. All other
917 arguments are the same as the parent routine. */
920 expand_doubleword_shift_condmove (enum machine_mode op1_mode, optab binoptab,
921 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
922 rtx outof_input, rtx into_input,
923 rtx subword_op1, rtx superword_op1,
924 rtx outof_target, rtx into_target,
925 int unsignedp, enum optab_methods methods,
926 unsigned HOST_WIDE_INT shift_mask)
928 rtx outof_superword, into_superword;
930 /* Put the superword version of the output into OUTOF_SUPERWORD and
932 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
933 if (outof_target != 0 && subword_op1 == superword_op1)
935 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
936 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
937 into_superword = outof_target;
938 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
939 outof_superword, 0, unsignedp, methods))
944 into_superword = gen_reg_rtx (word_mode);
945 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
946 outof_superword, into_superword,
951 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
952 if (!expand_subword_shift (op1_mode, binoptab,
953 outof_input, into_input, subword_op1,
954 outof_target, into_target,
955 unsignedp, methods, shift_mask))
958 /* Select between them. Do the INTO half first because INTO_SUPERWORD
959 might be the current value of OUTOF_TARGET. */
960 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
961 into_target, into_superword, word_mode, false))
964 if (outof_target != 0)
965 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
966 outof_target, outof_superword,
974 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
975 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
976 input operand; the shift moves bits in the direction OUTOF_INPUT->
977 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
978 of the target. OP1 is the shift count and OP1_MODE is its mode.
979 If OP1 is constant, it will have been truncated as appropriate
980 and is known to be nonzero.
982 If SHIFT_MASK is zero, the result of word shifts is undefined when the
983 shift count is outside the range [0, BITS_PER_WORD). This routine must
984 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
986 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
987 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
988 fill with zeros or sign bits as appropriate.
990 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
991 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
992 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
993 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
996 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
997 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
998 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
999 function wants to calculate it itself.
1001 Return true if the shift could be successfully synthesized. */
1004 expand_doubleword_shift (enum machine_mode op1_mode, optab binoptab,
1005 rtx outof_input, rtx into_input, rtx op1,
1006 rtx outof_target, rtx into_target,
1007 int unsignedp, enum optab_methods methods,
1008 unsigned HOST_WIDE_INT shift_mask)
1010 rtx superword_op1, tmp, cmp1, cmp2;
1011 rtx subword_label, done_label;
1012 enum rtx_code cmp_code;
1014 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
1015 fill the result with sign or zero bits as appropriate. If so, the value
1016 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
1017 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
1018 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
1020 This isn't worthwhile for constant shifts since the optimizers will
1021 cope better with in-range shift counts. */
1022 if (shift_mask >= BITS_PER_WORD
1023 && outof_target != 0
1024 && !CONSTANT_P (op1))
1026 if (!expand_doubleword_shift (op1_mode, binoptab,
1027 outof_input, into_input, op1,
1029 unsignedp, methods, shift_mask))
1031 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
1032 outof_target, unsignedp, methods))
1037 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
1038 is true when the effective shift value is less than BITS_PER_WORD.
1039 Set SUPERWORD_OP1 to the shift count that should be used to shift
1040 OUTOF_INPUT into INTO_TARGET when the condition is false. */
1041 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
1042 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
1044 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
1045 is a subword shift count. */
1046 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
1048 cmp2 = CONST0_RTX (op1_mode);
1050 superword_op1 = op1;
1054 /* Set CMP1 to OP1 - BITS_PER_WORD. */
1055 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
1057 cmp2 = CONST0_RTX (op1_mode);
1059 superword_op1 = cmp1;
1064 /* If we can compute the condition at compile time, pick the
1065 appropriate subroutine. */
1066 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
1067 if (tmp != 0 && GET_CODE (tmp) == CONST_INT)
1069 if (tmp == const0_rtx)
1070 return expand_superword_shift (binoptab, outof_input, superword_op1,
1071 outof_target, into_target,
1072 unsignedp, methods);
1074 return expand_subword_shift (op1_mode, binoptab,
1075 outof_input, into_input, op1,
1076 outof_target, into_target,
1077 unsignedp, methods, shift_mask);
1080 #ifdef HAVE_conditional_move
1081 /* Try using conditional moves to generate straight-line code. */
1083 rtx start = get_last_insn ();
1084 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
1085 cmp_code, cmp1, cmp2,
1086 outof_input, into_input,
1088 outof_target, into_target,
1089 unsignedp, methods, shift_mask))
1091 delete_insns_since (start);
1095 /* As a last resort, use branches to select the correct alternative. */
1096 subword_label = gen_label_rtx ();
1097 done_label = gen_label_rtx ();
1100 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
1101 0, 0, subword_label);
1104 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
1105 outof_target, into_target,
1106 unsignedp, methods))
1109 emit_jump_insn (gen_jump (done_label));
1111 emit_label (subword_label);
1113 if (!expand_subword_shift (op1_mode, binoptab,
1114 outof_input, into_input, op1,
1115 outof_target, into_target,
1116 unsignedp, methods, shift_mask))
1119 emit_label (done_label);
1123 /* Subroutine of expand_binop. Perform a double word multiplication of
1124 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
1125 as the target's word_mode. This function return NULL_RTX if anything
1126 goes wrong, in which case it may have already emitted instructions
1127 which need to be deleted.
1129 If we want to multiply two two-word values and have normal and widening
1130 multiplies of single-word values, we can do this with three smaller
1131 multiplications. Note that we do not make a REG_NO_CONFLICT block here
1132 because we are not operating on one word at a time.
1134 The multiplication proceeds as follows:
1135 _______________________
1136 [__op0_high_|__op0_low__]
1137 _______________________
1138 * [__op1_high_|__op1_low__]
1139 _______________________________________________
1140 _______________________
1141 (1) [__op0_low__*__op1_low__]
1142 _______________________
1143 (2a) [__op0_low__*__op1_high_]
1144 _______________________
1145 (2b) [__op0_high_*__op1_low__]
1146 _______________________
1147 (3) [__op0_high_*__op1_high_]
1150 This gives a 4-word result. Since we are only interested in the
1151 lower 2 words, partial result (3) and the upper words of (2a) and
1152 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1153 calculated using non-widening multiplication.
1155 (1), however, needs to be calculated with an unsigned widening
1156 multiplication. If this operation is not directly supported we
1157 try using a signed widening multiplication and adjust the result.
1158 This adjustment works as follows:
1160 If both operands are positive then no adjustment is needed.
1162 If the operands have different signs, for example op0_low < 0 and
1163 op1_low >= 0, the instruction treats the most significant bit of
1164 op0_low as a sign bit instead of a bit with significance
1165 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1166 with 2**BITS_PER_WORD - op0_low, and two's complements the
1167 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1170 Similarly, if both operands are negative, we need to add
1171 (op0_low + op1_low) * 2**BITS_PER_WORD.
1173 We use a trick to adjust quickly. We logically shift op0_low right
1174 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1175 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1176 logical shift exists, we do an arithmetic right shift and subtract
1180 expand_doubleword_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
1181 bool umulp, enum optab_methods methods)
1183 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
1184 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
1185 rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
1186 rtx product, adjust, product_high, temp;
1188 rtx op0_high = operand_subword_force (op0, high, mode);
1189 rtx op0_low = operand_subword_force (op0, low, mode);
1190 rtx op1_high = operand_subword_force (op1, high, mode);
1191 rtx op1_low = operand_subword_force (op1, low, mode);
1193 /* If we're using an unsigned multiply to directly compute the product
1194 of the low-order words of the operands and perform any required
1195 adjustments of the operands, we begin by trying two more multiplications
1196 and then computing the appropriate sum.
1198 We have checked above that the required addition is provided.
1199 Full-word addition will normally always succeed, especially if
1200 it is provided at all, so we don't worry about its failure. The
1201 multiplication may well fail, however, so we do handle that. */
1205 /* ??? This could be done with emit_store_flag where available. */
1206 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
1207 NULL_RTX, 1, methods);
1209 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
1210 NULL_RTX, 0, OPTAB_DIRECT);
1213 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
1214 NULL_RTX, 0, methods);
1217 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
1218 NULL_RTX, 0, OPTAB_DIRECT);
1225 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
1226 NULL_RTX, 0, OPTAB_DIRECT);
1230 /* OP0_HIGH should now be dead. */
1234 /* ??? This could be done with emit_store_flag where available. */
1235 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
1236 NULL_RTX, 1, methods);
1238 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
1239 NULL_RTX, 0, OPTAB_DIRECT);
1242 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
1243 NULL_RTX, 0, methods);
1246 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
1247 NULL_RTX, 0, OPTAB_DIRECT);
1254 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
1255 NULL_RTX, 0, OPTAB_DIRECT);
1259 /* OP1_HIGH should now be dead. */
1261 adjust = expand_binop (word_mode, add_optab, adjust, temp,
1262 adjust, 0, OPTAB_DIRECT);
1264 if (target && !REG_P (target))
1268 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
1269 target, 1, OPTAB_DIRECT);
1271 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
1272 target, 1, OPTAB_DIRECT);
1277 product_high = operand_subword (product, high, 1, mode);
1278 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
1279 REG_P (product_high) ? product_high : adjust,
1281 emit_move_insn (product_high, adjust);
1285 /* Wrapper around expand_binop which takes an rtx code to specify
1286 the operation to perform, not an optab pointer. All other
1287 arguments are the same. */
1289 expand_simple_binop (enum machine_mode mode, enum rtx_code code, rtx op0,
1290 rtx op1, rtx target, int unsignedp,
1291 enum optab_methods methods)
1293 optab binop = code_to_optab[(int) code];
1296 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
1299 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1300 binop. Order them according to commutative_operand_precedence and, if
1301 possible, try to put TARGET or a pseudo first. */
1303 swap_commutative_operands_with_target (rtx target, rtx op0, rtx op1)
1305 int op0_prec = commutative_operand_precedence (op0);
1306 int op1_prec = commutative_operand_precedence (op1);
1308 if (op0_prec < op1_prec)
1311 if (op0_prec > op1_prec)
1314 /* With equal precedence, both orders are ok, but it is better if the
1315 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
1316 if (target == 0 || REG_P (target))
1317 return (REG_P (op1) && !REG_P (op0)) || target == op1;
1319 return rtx_equal_p (op1, target);
1322 /* Return true if BINOPTAB implements a shift operation. */
1325 shift_optab_p (optab binoptab)
1327 switch (binoptab->code)
1341 /* Return true if BINOPTAB implements a commutative binary operation. */
1344 commutative_optab_p (optab binoptab)
1346 return (GET_RTX_CLASS (binoptab->code) == RTX_COMM_ARITH
1347 || binoptab == smul_widen_optab
1348 || binoptab == umul_widen_optab
1349 || binoptab == smul_highpart_optab
1350 || binoptab == umul_highpart_optab);
1353 /* X is to be used in mode MODE as an operand to BINOPTAB. If we're
1354 optimizing, and if the operand is a constant that costs more than
1355 1 instruction, force the constant into a register and return that
1356 register. Return X otherwise. UNSIGNEDP says whether X is unsigned. */
1359 avoid_expensive_constant (enum machine_mode mode, optab binoptab,
1360 rtx x, bool unsignedp)
1362 if (mode != VOIDmode
1365 && rtx_cost (x, binoptab->code) > COSTS_N_INSNS (1))
1367 if (GET_CODE (x) == CONST_INT)
1369 HOST_WIDE_INT intval = trunc_int_for_mode (INTVAL (x), mode);
1370 if (intval != INTVAL (x))
1371 x = GEN_INT (intval);
1374 x = convert_modes (mode, VOIDmode, x, unsignedp);
1375 x = force_reg (mode, x);
1380 /* Helper function for expand_binop: handle the case where there
1381 is an insn that directly implements the indicated operation.
1382 Returns null if this is not possible. */
1384 expand_binop_directly (enum machine_mode mode, optab binoptab,
1386 rtx target, int unsignedp, enum optab_methods methods,
1389 int icode = (int) optab_handler (binoptab, mode)->insn_code;
1390 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
1391 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
1392 enum machine_mode tmp_mode;
1395 rtx xop0 = op0, xop1 = op1;
1402 temp = gen_reg_rtx (mode);
1404 /* If it is a commutative operator and the modes would match
1405 if we would swap the operands, we can save the conversions. */
1406 commutative_p = commutative_optab_p (binoptab);
1408 && GET_MODE (xop0) != mode0 && GET_MODE (xop1) != mode1
1409 && GET_MODE (xop0) == mode1 && GET_MODE (xop1) == mode1)
1416 /* If we are optimizing, force expensive constants into a register. */
1417 xop0 = avoid_expensive_constant (mode0, binoptab, xop0, unsignedp);
1418 if (!shift_optab_p (binoptab))
1419 xop1 = avoid_expensive_constant (mode1, binoptab, xop1, unsignedp);
1421 /* In case the insn wants input operands in modes different from
1422 those of the actual operands, convert the operands. It would
1423 seem that we don't need to convert CONST_INTs, but we do, so
1424 that they're properly zero-extended, sign-extended or truncated
1427 if (GET_MODE (xop0) != mode0 && mode0 != VOIDmode)
1428 xop0 = convert_modes (mode0,
1429 GET_MODE (xop0) != VOIDmode
1434 if (GET_MODE (xop1) != mode1 && mode1 != VOIDmode)
1435 xop1 = convert_modes (mode1,
1436 GET_MODE (xop1) != VOIDmode
1441 /* If operation is commutative,
1442 try to make the first operand a register.
1443 Even better, try to make it the same as the target.
1444 Also try to make the last operand a constant. */
1446 && swap_commutative_operands_with_target (target, xop0, xop1))
1453 /* Now, if insn's predicates don't allow our operands, put them into
1456 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
1457 && mode0 != VOIDmode)
1458 xop0 = copy_to_mode_reg (mode0, xop0);
1460 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
1461 && mode1 != VOIDmode)
1462 xop1 = copy_to_mode_reg (mode1, xop1);
1464 if (binoptab == vec_pack_trunc_optab
1465 || binoptab == vec_pack_usat_optab
1466 || binoptab == vec_pack_ssat_optab
1467 || binoptab == vec_pack_ufix_trunc_optab
1468 || binoptab == vec_pack_sfix_trunc_optab)
1470 /* The mode of the result is different then the mode of the
1472 tmp_mode = insn_data[icode].operand[0].mode;
1473 if (GET_MODE_NUNITS (tmp_mode) != 2 * GET_MODE_NUNITS (mode))
1479 if (!insn_data[icode].operand[0].predicate (temp, tmp_mode))
1480 temp = gen_reg_rtx (tmp_mode);
1482 pat = GEN_FCN (icode) (temp, xop0, xop1);
1485 /* If PAT is composed of more than one insn, try to add an appropriate
1486 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1487 operand, call expand_binop again, this time without a target. */
1488 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1489 && ! add_equal_note (pat, temp, binoptab->code, xop0, xop1))
1491 delete_insns_since (last);
1492 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1493 unsignedp, methods);
1500 delete_insns_since (last);
1504 /* Generate code to perform an operation specified by BINOPTAB
1505 on operands OP0 and OP1, with result having machine-mode MODE.
1507 UNSIGNEDP is for the case where we have to widen the operands
1508 to perform the operation. It says to use zero-extension.
1510 If TARGET is nonzero, the value
1511 is generated there, if it is convenient to do so.
1512 In all cases an rtx is returned for the locus of the value;
1513 this may or may not be TARGET. */
1516 expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1,
1517 rtx target, int unsignedp, enum optab_methods methods)
1519 enum optab_methods next_methods
1520 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1521 ? OPTAB_WIDEN : methods);
1522 enum mode_class class;
1523 enum machine_mode wider_mode;
1526 rtx entry_last = get_last_insn ();
1529 class = GET_MODE_CLASS (mode);
1531 /* If subtracting an integer constant, convert this into an addition of
1532 the negated constant. */
1534 if (binoptab == sub_optab && GET_CODE (op1) == CONST_INT)
1536 op1 = negate_rtx (mode, op1);
1537 binoptab = add_optab;
1540 /* Record where to delete back to if we backtrack. */
1541 last = get_last_insn ();
1543 /* If we can do it with a three-operand insn, do so. */
1545 if (methods != OPTAB_MUST_WIDEN
1546 && optab_handler (binoptab, mode)->insn_code != CODE_FOR_nothing)
1548 temp = expand_binop_directly (mode, binoptab, op0, op1, target,
1549 unsignedp, methods, last);
1554 /* If we were trying to rotate, and that didn't work, try rotating
1555 the other direction before falling back to shifts and bitwise-or. */
1556 if (((binoptab == rotl_optab
1557 && optab_handler (rotr_optab, mode)->insn_code != CODE_FOR_nothing)
1558 || (binoptab == rotr_optab
1559 && optab_handler (rotl_optab, mode)->insn_code != CODE_FOR_nothing))
1560 && class == MODE_INT)
1562 optab otheroptab = (binoptab == rotl_optab ? rotr_optab : rotl_optab);
1564 unsigned int bits = GET_MODE_BITSIZE (mode);
1566 if (GET_CODE (op1) == CONST_INT)
1567 newop1 = GEN_INT (bits - INTVAL (op1));
1568 else if (targetm.shift_truncation_mask (mode) == bits - 1)
1569 newop1 = negate_rtx (mode, op1);
1571 newop1 = expand_binop (mode, sub_optab,
1572 GEN_INT (bits), op1,
1573 NULL_RTX, unsignedp, OPTAB_DIRECT);
1575 temp = expand_binop_directly (mode, otheroptab, op0, newop1,
1576 target, unsignedp, methods, last);
1581 /* If this is a multiply, see if we can do a widening operation that
1582 takes operands of this mode and makes a wider mode. */
1584 if (binoptab == smul_optab
1585 && GET_MODE_WIDER_MODE (mode) != VOIDmode
1586 && ((optab_handler ((unsignedp ? umul_widen_optab : smul_widen_optab),
1587 GET_MODE_WIDER_MODE (mode))->insn_code)
1588 != CODE_FOR_nothing))
1590 temp = expand_binop (GET_MODE_WIDER_MODE (mode),
1591 unsignedp ? umul_widen_optab : smul_widen_optab,
1592 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1596 if (GET_MODE_CLASS (mode) == MODE_INT
1597 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1598 GET_MODE_BITSIZE (GET_MODE (temp))))
1599 return gen_lowpart (mode, temp);
1601 return convert_to_mode (mode, temp, unsignedp);
1605 /* Look for a wider mode of the same class for which we think we
1606 can open-code the operation. Check for a widening multiply at the
1607 wider mode as well. */
1609 if (CLASS_HAS_WIDER_MODES_P (class)
1610 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1611 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1612 wider_mode != VOIDmode;
1613 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1615 if (optab_handler (binoptab, wider_mode)->insn_code != CODE_FOR_nothing
1616 || (binoptab == smul_optab
1617 && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
1618 && ((optab_handler ((unsignedp ? umul_widen_optab
1619 : smul_widen_optab),
1620 GET_MODE_WIDER_MODE (wider_mode))->insn_code)
1621 != CODE_FOR_nothing)))
1623 rtx xop0 = op0, xop1 = op1;
1626 /* For certain integer operations, we need not actually extend
1627 the narrow operands, as long as we will truncate
1628 the results to the same narrowness. */
1630 if ((binoptab == ior_optab || binoptab == and_optab
1631 || binoptab == xor_optab
1632 || binoptab == add_optab || binoptab == sub_optab
1633 || binoptab == smul_optab || binoptab == ashl_optab)
1634 && class == MODE_INT)
1637 xop0 = avoid_expensive_constant (mode, binoptab,
1639 if (binoptab != ashl_optab)
1640 xop1 = avoid_expensive_constant (mode, binoptab,
1644 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1646 /* The second operand of a shift must always be extended. */
1647 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1648 no_extend && binoptab != ashl_optab);
1650 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1651 unsignedp, OPTAB_DIRECT);
1654 if (class != MODE_INT
1655 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1656 GET_MODE_BITSIZE (wider_mode)))
1659 target = gen_reg_rtx (mode);
1660 convert_move (target, temp, 0);
1664 return gen_lowpart (mode, temp);
1667 delete_insns_since (last);
1671 /* If operation is commutative,
1672 try to make the first operand a register.
1673 Even better, try to make it the same as the target.
1674 Also try to make the last operand a constant. */
1675 if (commutative_optab_p (binoptab)
1676 && swap_commutative_operands_with_target (target, op0, op1))
1683 /* These can be done a word at a time. */
1684 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1685 && class == MODE_INT
1686 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
1687 && optab_handler (binoptab, word_mode)->insn_code != CODE_FOR_nothing)
1693 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1694 won't be accurate, so use a new target. */
1695 if (target == 0 || target == op0 || target == op1)
1696 target = gen_reg_rtx (mode);
1700 /* Do the actual arithmetic. */
1701 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
1703 rtx target_piece = operand_subword (target, i, 1, mode);
1704 rtx x = expand_binop (word_mode, binoptab,
1705 operand_subword_force (op0, i, mode),
1706 operand_subword_force (op1, i, mode),
1707 target_piece, unsignedp, next_methods);
1712 if (target_piece != x)
1713 emit_move_insn (target_piece, x);
1716 insns = get_insns ();
1719 if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
1721 if (binoptab->code != UNKNOWN)
1723 = gen_rtx_fmt_ee (binoptab->code, mode,
1724 copy_rtx (op0), copy_rtx (op1));
1728 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1733 /* Synthesize double word shifts from single word shifts. */
1734 if ((binoptab == lshr_optab || binoptab == ashl_optab
1735 || binoptab == ashr_optab)
1736 && class == MODE_INT
1737 && (GET_CODE (op1) == CONST_INT || !optimize_size)
1738 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1739 && optab_handler (binoptab, word_mode)->insn_code != CODE_FOR_nothing
1740 && optab_handler (ashl_optab, word_mode)->insn_code != CODE_FOR_nothing
1741 && optab_handler (lshr_optab, word_mode)->insn_code != CODE_FOR_nothing)
1743 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1744 enum machine_mode op1_mode;
1746 double_shift_mask = targetm.shift_truncation_mask (mode);
1747 shift_mask = targetm.shift_truncation_mask (word_mode);
1748 op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode;
1750 /* Apply the truncation to constant shifts. */
1751 if (double_shift_mask > 0 && GET_CODE (op1) == CONST_INT)
1752 op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
1754 if (op1 == CONST0_RTX (op1_mode))
1757 /* Make sure that this is a combination that expand_doubleword_shift
1758 can handle. See the comments there for details. */
1759 if (double_shift_mask == 0
1760 || (shift_mask == BITS_PER_WORD - 1
1761 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1763 rtx insns, equiv_value;
1764 rtx into_target, outof_target;
1765 rtx into_input, outof_input;
1766 int left_shift, outof_word;
1768 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1769 won't be accurate, so use a new target. */
1770 if (target == 0 || target == op0 || target == op1)
1771 target = gen_reg_rtx (mode);
1775 /* OUTOF_* is the word we are shifting bits away from, and
1776 INTO_* is the word that we are shifting bits towards, thus
1777 they differ depending on the direction of the shift and
1778 WORDS_BIG_ENDIAN. */
1780 left_shift = binoptab == ashl_optab;
1781 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1783 outof_target = operand_subword (target, outof_word, 1, mode);
1784 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1786 outof_input = operand_subword_force (op0, outof_word, mode);
1787 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1789 if (expand_doubleword_shift (op1_mode, binoptab,
1790 outof_input, into_input, op1,
1791 outof_target, into_target,
1792 unsignedp, next_methods, shift_mask))
1794 insns = get_insns ();
1797 equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, op0, op1);
1798 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1805 /* Synthesize double word rotates from single word shifts. */
1806 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1807 && class == MODE_INT
1808 && GET_CODE (op1) == CONST_INT
1809 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1810 && optab_handler (ashl_optab, word_mode)->insn_code != CODE_FOR_nothing
1811 && optab_handler (lshr_optab, word_mode)->insn_code != CODE_FOR_nothing)
1814 rtx into_target, outof_target;
1815 rtx into_input, outof_input;
1817 int shift_count, left_shift, outof_word;
1819 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1820 won't be accurate, so use a new target. Do this also if target is not
1821 a REG, first because having a register instead may open optimization
1822 opportunities, and second because if target and op0 happen to be MEMs
1823 designating the same location, we would risk clobbering it too early
1824 in the code sequence we generate below. */
1825 if (target == 0 || target == op0 || target == op1 || ! REG_P (target))
1826 target = gen_reg_rtx (mode);
1830 shift_count = INTVAL (op1);
1832 /* OUTOF_* is the word we are shifting bits away from, and
1833 INTO_* is the word that we are shifting bits towards, thus
1834 they differ depending on the direction of the shift and
1835 WORDS_BIG_ENDIAN. */
1837 left_shift = (binoptab == rotl_optab);
1838 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1840 outof_target = operand_subword (target, outof_word, 1, mode);
1841 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1843 outof_input = operand_subword_force (op0, outof_word, mode);
1844 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1846 if (shift_count == BITS_PER_WORD)
1848 /* This is just a word swap. */
1849 emit_move_insn (outof_target, into_input);
1850 emit_move_insn (into_target, outof_input);
1855 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1856 rtx first_shift_count, second_shift_count;
1857 optab reverse_unsigned_shift, unsigned_shift;
1859 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1860 ? lshr_optab : ashl_optab);
1862 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1863 ? ashl_optab : lshr_optab);
1865 if (shift_count > BITS_PER_WORD)
1867 first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1868 second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1872 first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1873 second_shift_count = GEN_INT (shift_count);
1876 into_temp1 = expand_binop (word_mode, unsigned_shift,
1877 outof_input, first_shift_count,
1878 NULL_RTX, unsignedp, next_methods);
1879 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1880 into_input, second_shift_count,
1881 NULL_RTX, unsignedp, next_methods);
1883 if (into_temp1 != 0 && into_temp2 != 0)
1884 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1885 into_target, unsignedp, next_methods);
1889 if (inter != 0 && inter != into_target)
1890 emit_move_insn (into_target, inter);
1892 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1893 into_input, first_shift_count,
1894 NULL_RTX, unsignedp, next_methods);
1895 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1896 outof_input, second_shift_count,
1897 NULL_RTX, unsignedp, next_methods);
1899 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1900 inter = expand_binop (word_mode, ior_optab,
1901 outof_temp1, outof_temp2,
1902 outof_target, unsignedp, next_methods);
1904 if (inter != 0 && inter != outof_target)
1905 emit_move_insn (outof_target, inter);
1908 insns = get_insns ();
1913 /* One may be tempted to wrap the insns in a REG_NO_CONFLICT
1914 block to help the register allocator a bit. But a multi-word
1915 rotate will need all the input bits when setting the output
1916 bits, so there clearly is a conflict between the input and
1917 output registers. So we can't use a no-conflict block here. */
1923 /* These can be done a word at a time by propagating carries. */
1924 if ((binoptab == add_optab || binoptab == sub_optab)
1925 && class == MODE_INT
1926 && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
1927 && optab_handler (binoptab, word_mode)->insn_code != CODE_FOR_nothing)
1930 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1931 const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
1932 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1933 rtx xop0, xop1, xtarget;
1935 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1936 value is one of those, use it. Otherwise, use 1 since it is the
1937 one easiest to get. */
1938 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1939 int normalizep = STORE_FLAG_VALUE;
1944 /* Prepare the operands. */
1945 xop0 = force_reg (mode, op0);
1946 xop1 = force_reg (mode, op1);
1948 xtarget = gen_reg_rtx (mode);
1950 if (target == 0 || !REG_P (target))
1953 /* Indicate for flow that the entire target reg is being set. */
1955 emit_insn (gen_rtx_CLOBBER (VOIDmode, xtarget));
1957 /* Do the actual arithmetic. */
1958 for (i = 0; i < nwords; i++)
1960 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1961 rtx target_piece = operand_subword (xtarget, index, 1, mode);
1962 rtx op0_piece = operand_subword_force (xop0, index, mode);
1963 rtx op1_piece = operand_subword_force (xop1, index, mode);
1966 /* Main add/subtract of the input operands. */
1967 x = expand_binop (word_mode, binoptab,
1968 op0_piece, op1_piece,
1969 target_piece, unsignedp, next_methods);
1975 /* Store carry from main add/subtract. */
1976 carry_out = gen_reg_rtx (word_mode);
1977 carry_out = emit_store_flag_force (carry_out,
1978 (binoptab == add_optab
1981 word_mode, 1, normalizep);
1988 /* Add/subtract previous carry to main result. */
1989 newx = expand_binop (word_mode,
1990 normalizep == 1 ? binoptab : otheroptab,
1992 NULL_RTX, 1, next_methods);
1996 /* Get out carry from adding/subtracting carry in. */
1997 rtx carry_tmp = gen_reg_rtx (word_mode);
1998 carry_tmp = emit_store_flag_force (carry_tmp,
1999 (binoptab == add_optab
2002 word_mode, 1, normalizep);
2004 /* Logical-ior the two poss. carry together. */
2005 carry_out = expand_binop (word_mode, ior_optab,
2006 carry_out, carry_tmp,
2007 carry_out, 0, next_methods);
2011 emit_move_insn (target_piece, newx);
2015 if (x != target_piece)
2016 emit_move_insn (target_piece, x);
2019 carry_in = carry_out;
2022 if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
2024 if (optab_handler (mov_optab, mode)->insn_code != CODE_FOR_nothing
2025 || ! rtx_equal_p (target, xtarget))
2027 rtx temp = emit_move_insn (target, xtarget);
2029 set_unique_reg_note (temp,
2031 gen_rtx_fmt_ee (binoptab->code, mode,
2042 delete_insns_since (last);
2045 /* Attempt to synthesize double word multiplies using a sequence of word
2046 mode multiplications. We first attempt to generate a sequence using a
2047 more efficient unsigned widening multiply, and if that fails we then
2048 try using a signed widening multiply. */
2050 if (binoptab == smul_optab
2051 && class == MODE_INT
2052 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
2053 && optab_handler (smul_optab, word_mode)->insn_code != CODE_FOR_nothing
2054 && optab_handler (add_optab, word_mode)->insn_code != CODE_FOR_nothing)
2056 rtx product = NULL_RTX;
2058 if (optab_handler (umul_widen_optab, mode)->insn_code
2059 != CODE_FOR_nothing)
2061 product = expand_doubleword_mult (mode, op0, op1, target,
2064 delete_insns_since (last);
2067 if (product == NULL_RTX
2068 && optab_handler (smul_widen_optab, mode)->insn_code
2069 != CODE_FOR_nothing)
2071 product = expand_doubleword_mult (mode, op0, op1, target,
2074 delete_insns_since (last);
2077 if (product != NULL_RTX)
2079 if (optab_handler (mov_optab, mode)->insn_code != CODE_FOR_nothing)
2081 temp = emit_move_insn (target ? target : product, product);
2082 set_unique_reg_note (temp,
2084 gen_rtx_fmt_ee (MULT, mode,
2092 /* It can't be open-coded in this mode.
2093 Use a library call if one is available and caller says that's ok. */
2095 libfunc = optab_libfunc (binoptab, mode);
2097 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
2101 enum machine_mode op1_mode = mode;
2106 if (shift_optab_p (binoptab))
2108 op1_mode = targetm.libgcc_shift_count_mode ();
2109 /* Specify unsigned here,
2110 since negative shift counts are meaningless. */
2111 op1x = convert_to_mode (op1_mode, op1, 1);
2114 if (GET_MODE (op0) != VOIDmode
2115 && GET_MODE (op0) != mode)
2116 op0 = convert_to_mode (mode, op0, unsignedp);
2118 /* Pass 1 for NO_QUEUE so we don't lose any increments
2119 if the libcall is cse'd or moved. */
2120 value = emit_library_call_value (libfunc,
2121 NULL_RTX, LCT_CONST, mode, 2,
2122 op0, mode, op1x, op1_mode);
2124 insns = get_insns ();
2127 target = gen_reg_rtx (mode);
2128 emit_libcall_block (insns, target, value,
2129 gen_rtx_fmt_ee (binoptab->code, mode, op0, op1));
2134 delete_insns_since (last);
2136 /* It can't be done in this mode. Can we do it in a wider mode? */
2138 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
2139 || methods == OPTAB_MUST_WIDEN))
2141 /* Caller says, don't even try. */
2142 delete_insns_since (entry_last);
2146 /* Compute the value of METHODS to pass to recursive calls.
2147 Don't allow widening to be tried recursively. */
2149 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
2151 /* Look for a wider mode of the same class for which it appears we can do
2154 if (CLASS_HAS_WIDER_MODES_P (class))
2156 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2157 wider_mode != VOIDmode;
2158 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2160 if ((optab_handler (binoptab, wider_mode)->insn_code
2161 != CODE_FOR_nothing)
2162 || (methods == OPTAB_LIB
2163 && optab_libfunc (binoptab, wider_mode)))
2165 rtx xop0 = op0, xop1 = op1;
2168 /* For certain integer operations, we need not actually extend
2169 the narrow operands, as long as we will truncate
2170 the results to the same narrowness. */
2172 if ((binoptab == ior_optab || binoptab == and_optab
2173 || binoptab == xor_optab
2174 || binoptab == add_optab || binoptab == sub_optab
2175 || binoptab == smul_optab || binoptab == ashl_optab)
2176 && class == MODE_INT)
2179 xop0 = widen_operand (xop0, wider_mode, mode,
2180 unsignedp, no_extend);
2182 /* The second operand of a shift must always be extended. */
2183 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
2184 no_extend && binoptab != ashl_optab);
2186 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
2187 unsignedp, methods);
2190 if (class != MODE_INT
2191 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
2192 GET_MODE_BITSIZE (wider_mode)))
2195 target = gen_reg_rtx (mode);
2196 convert_move (target, temp, 0);
2200 return gen_lowpart (mode, temp);
2203 delete_insns_since (last);
2208 delete_insns_since (entry_last);
2212 /* Expand a binary operator which has both signed and unsigned forms.
2213 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2216 If we widen unsigned operands, we may use a signed wider operation instead
2217 of an unsigned wider operation, since the result would be the same. */
2220 sign_expand_binop (enum machine_mode mode, optab uoptab, optab soptab,
2221 rtx op0, rtx op1, rtx target, int unsignedp,
2222 enum optab_methods methods)
2225 optab direct_optab = unsignedp ? uoptab : soptab;
2226 struct optab wide_soptab;
2228 /* Do it without widening, if possible. */
2229 temp = expand_binop (mode, direct_optab, op0, op1, target,
2230 unsignedp, OPTAB_DIRECT);
2231 if (temp || methods == OPTAB_DIRECT)
2234 /* Try widening to a signed int. Make a fake signed optab that
2235 hides any signed insn for direct use. */
2236 wide_soptab = *soptab;
2237 optab_handler (&wide_soptab, mode)->insn_code = CODE_FOR_nothing;
2239 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2240 unsignedp, OPTAB_WIDEN);
2242 /* For unsigned operands, try widening to an unsigned int. */
2243 if (temp == 0 && unsignedp)
2244 temp = expand_binop (mode, uoptab, op0, op1, target,
2245 unsignedp, OPTAB_WIDEN);
2246 if (temp || methods == OPTAB_WIDEN)
2249 /* Use the right width lib call if that exists. */
2250 temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_LIB);
2251 if (temp || methods == OPTAB_LIB)
2254 /* Must widen and use a lib call, use either signed or unsigned. */
2255 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2256 unsignedp, methods);
2260 return expand_binop (mode, uoptab, op0, op1, target,
2261 unsignedp, methods);
2265 /* Generate code to perform an operation specified by UNOPPTAB
2266 on operand OP0, with two results to TARG0 and TARG1.
2267 We assume that the order of the operands for the instruction
2268 is TARG0, TARG1, OP0.
2270 Either TARG0 or TARG1 may be zero, but what that means is that
2271 the result is not actually wanted. We will generate it into
2272 a dummy pseudo-reg and discard it. They may not both be zero.
2274 Returns 1 if this operation can be performed; 0 if not. */
2277 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
2280 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2281 enum mode_class class;
2282 enum machine_mode wider_mode;
2283 rtx entry_last = get_last_insn ();
2286 class = GET_MODE_CLASS (mode);
2289 targ0 = gen_reg_rtx (mode);
2291 targ1 = gen_reg_rtx (mode);
2293 /* Record where to go back to if we fail. */
2294 last = get_last_insn ();
2296 if (optab_handler (unoptab, mode)->insn_code != CODE_FOR_nothing)
2298 int icode = (int) optab_handler (unoptab, mode)->insn_code;
2299 enum machine_mode mode0 = insn_data[icode].operand[2].mode;
2303 if (GET_MODE (xop0) != VOIDmode
2304 && GET_MODE (xop0) != mode0)
2305 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2307 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2308 if (!insn_data[icode].operand[2].predicate (xop0, mode0))
2309 xop0 = copy_to_mode_reg (mode0, xop0);
2311 /* We could handle this, but we should always be called with a pseudo
2312 for our targets and all insns should take them as outputs. */
2313 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2314 gcc_assert (insn_data[icode].operand[1].predicate (targ1, mode));
2316 pat = GEN_FCN (icode) (targ0, targ1, xop0);
2323 delete_insns_since (last);
2326 /* It can't be done in this mode. Can we do it in a wider mode? */
2328 if (CLASS_HAS_WIDER_MODES_P (class))
2330 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2331 wider_mode != VOIDmode;
2332 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2334 if (optab_handler (unoptab, wider_mode)->insn_code
2335 != CODE_FOR_nothing)
2337 rtx t0 = gen_reg_rtx (wider_mode);
2338 rtx t1 = gen_reg_rtx (wider_mode);
2339 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2341 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
2343 convert_move (targ0, t0, unsignedp);
2344 convert_move (targ1, t1, unsignedp);
2348 delete_insns_since (last);
2353 delete_insns_since (entry_last);
2357 /* Generate code to perform an operation specified by BINOPTAB
2358 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2359 We assume that the order of the operands for the instruction
2360 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2361 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2363 Either TARG0 or TARG1 may be zero, but what that means is that
2364 the result is not actually wanted. We will generate it into
2365 a dummy pseudo-reg and discard it. They may not both be zero.
2367 Returns 1 if this operation can be performed; 0 if not. */
2370 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
2373 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2374 enum mode_class class;
2375 enum machine_mode wider_mode;
2376 rtx entry_last = get_last_insn ();
2379 class = GET_MODE_CLASS (mode);
2382 targ0 = gen_reg_rtx (mode);
2384 targ1 = gen_reg_rtx (mode);
2386 /* Record where to go back to if we fail. */
2387 last = get_last_insn ();
2389 if (optab_handler (binoptab, mode)->insn_code != CODE_FOR_nothing)
2391 int icode = (int) optab_handler (binoptab, mode)->insn_code;
2392 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2393 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
2395 rtx xop0 = op0, xop1 = op1;
2397 /* If we are optimizing, force expensive constants into a register. */
2398 xop0 = avoid_expensive_constant (mode0, binoptab, xop0, unsignedp);
2399 xop1 = avoid_expensive_constant (mode1, binoptab, xop1, unsignedp);
2401 /* In case the insn wants input operands in modes different from
2402 those of the actual operands, convert the operands. It would
2403 seem that we don't need to convert CONST_INTs, but we do, so
2404 that they're properly zero-extended, sign-extended or truncated
2407 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
2408 xop0 = convert_modes (mode0,
2409 GET_MODE (op0) != VOIDmode
2414 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
2415 xop1 = convert_modes (mode1,
2416 GET_MODE (op1) != VOIDmode
2421 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2422 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2423 xop0 = copy_to_mode_reg (mode0, xop0);
2425 if (!insn_data[icode].operand[2].predicate (xop1, mode1))
2426 xop1 = copy_to_mode_reg (mode1, xop1);
2428 /* We could handle this, but we should always be called with a pseudo
2429 for our targets and all insns should take them as outputs. */
2430 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2431 gcc_assert (insn_data[icode].operand[3].predicate (targ1, mode));
2433 pat = GEN_FCN (icode) (targ0, xop0, xop1, targ1);
2440 delete_insns_since (last);
2443 /* It can't be done in this mode. Can we do it in a wider mode? */
2445 if (CLASS_HAS_WIDER_MODES_P (class))
2447 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2448 wider_mode != VOIDmode;
2449 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2451 if (optab_handler (binoptab, wider_mode)->insn_code
2452 != CODE_FOR_nothing)
2454 rtx t0 = gen_reg_rtx (wider_mode);
2455 rtx t1 = gen_reg_rtx (wider_mode);
2456 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2457 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2459 if (expand_twoval_binop (binoptab, cop0, cop1,
2462 convert_move (targ0, t0, unsignedp);
2463 convert_move (targ1, t1, unsignedp);
2467 delete_insns_since (last);
2472 delete_insns_since (entry_last);
2476 /* Expand the two-valued library call indicated by BINOPTAB, but
2477 preserve only one of the values. If TARG0 is non-NULL, the first
2478 value is placed into TARG0; otherwise the second value is placed
2479 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2480 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2481 This routine assumes that the value returned by the library call is
2482 as if the return value was of an integral mode twice as wide as the
2483 mode of OP0. Returns 1 if the call was successful. */
2486 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2487 rtx targ0, rtx targ1, enum rtx_code code)
2489 enum machine_mode mode;
2490 enum machine_mode libval_mode;
2495 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2496 gcc_assert (!targ0 != !targ1);
2498 mode = GET_MODE (op0);
2499 libfunc = optab_libfunc (binoptab, mode);
2503 /* The value returned by the library function will have twice as
2504 many bits as the nominal MODE. */
2505 libval_mode = smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode),
2508 libval = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
2512 /* Get the part of VAL containing the value that we want. */
2513 libval = simplify_gen_subreg (mode, libval, libval_mode,
2514 targ0 ? 0 : GET_MODE_SIZE (mode));
2515 insns = get_insns ();
2517 /* Move the into the desired location. */
2518 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2519 gen_rtx_fmt_ee (code, mode, op0, op1));
2525 /* Wrapper around expand_unop which takes an rtx code to specify
2526 the operation to perform, not an optab pointer. All other
2527 arguments are the same. */
2529 expand_simple_unop (enum machine_mode mode, enum rtx_code code, rtx op0,
2530 rtx target, int unsignedp)
2532 optab unop = code_to_optab[(int) code];
2535 return expand_unop (mode, unop, op0, target, unsignedp);
2541 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2543 widen_clz (enum machine_mode mode, rtx op0, rtx target)
2545 enum mode_class class = GET_MODE_CLASS (mode);
2546 if (CLASS_HAS_WIDER_MODES_P (class))
2548 enum machine_mode wider_mode;
2549 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2550 wider_mode != VOIDmode;
2551 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2553 if (optab_handler (clz_optab, wider_mode)->insn_code
2554 != CODE_FOR_nothing)
2556 rtx xop0, temp, last;
2558 last = get_last_insn ();
2561 target = gen_reg_rtx (mode);
2562 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2563 temp = expand_unop (wider_mode, clz_optab, xop0, NULL_RTX, true);
2565 temp = expand_binop (wider_mode, sub_optab, temp,
2566 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2567 - GET_MODE_BITSIZE (mode)),
2568 target, true, OPTAB_DIRECT);
2570 delete_insns_since (last);
2579 /* Try calculating clz of a double-word quantity as two clz's of word-sized
2580 quantities, choosing which based on whether the high word is nonzero. */
2582 expand_doubleword_clz (enum machine_mode mode, rtx op0, rtx target)
2584 rtx xop0 = force_reg (mode, op0);
2585 rtx subhi = gen_highpart (word_mode, xop0);
2586 rtx sublo = gen_lowpart (word_mode, xop0);
2587 rtx hi0_label = gen_label_rtx ();
2588 rtx after_label = gen_label_rtx ();
2589 rtx seq, temp, result;
2591 /* If we were not given a target, use a word_mode register, not a
2592 'mode' register. The result will fit, and nobody is expecting
2593 anything bigger (the return type of __builtin_clz* is int). */
2595 target = gen_reg_rtx (word_mode);
2597 /* In any case, write to a word_mode scratch in both branches of the
2598 conditional, so we can ensure there is a single move insn setting
2599 'target' to tag a REG_EQUAL note on. */
2600 result = gen_reg_rtx (word_mode);
2604 /* If the high word is not equal to zero,
2605 then clz of the full value is clz of the high word. */
2606 emit_cmp_and_jump_insns (subhi, CONST0_RTX (word_mode), EQ, 0,
2607 word_mode, true, hi0_label);
2609 temp = expand_unop_direct (word_mode, clz_optab, subhi, result, true);
2614 convert_move (result, temp, true);
2616 emit_jump_insn (gen_jump (after_label));
2619 /* Else clz of the full value is clz of the low word plus the number
2620 of bits in the high word. */
2621 emit_label (hi0_label);
2623 temp = expand_unop_direct (word_mode, clz_optab, sublo, 0, true);
2626 temp = expand_binop (word_mode, add_optab, temp,
2627 GEN_INT (GET_MODE_BITSIZE (word_mode)),
2628 result, true, OPTAB_DIRECT);
2632 convert_move (result, temp, true);
2634 emit_label (after_label);
2635 convert_move (target, result, true);
2640 add_equal_note (seq, target, CLZ, xop0, 0);
2652 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2654 widen_bswap (enum machine_mode mode, rtx op0, rtx target)
2656 enum mode_class class = GET_MODE_CLASS (mode);
2657 enum machine_mode wider_mode;
2660 if (!CLASS_HAS_WIDER_MODES_P (class))
2663 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2664 wider_mode != VOIDmode;
2665 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2666 if (optab_handler (bswap_optab, wider_mode)->insn_code != CODE_FOR_nothing)
2671 last = get_last_insn ();
2673 x = widen_operand (op0, wider_mode, mode, true, true);
2674 x = expand_unop (wider_mode, bswap_optab, x, NULL_RTX, true);
2677 x = expand_shift (RSHIFT_EXPR, wider_mode, x,
2678 size_int (GET_MODE_BITSIZE (wider_mode)
2679 - GET_MODE_BITSIZE (mode)),
2685 target = gen_reg_rtx (mode);
2686 emit_move_insn (target, gen_lowpart (mode, x));
2689 delete_insns_since (last);
2694 /* Try calculating bswap as two bswaps of two word-sized operands. */
2697 expand_doubleword_bswap (enum machine_mode mode, rtx op, rtx target)
2701 t1 = expand_unop (word_mode, bswap_optab,
2702 operand_subword_force (op, 0, mode), NULL_RTX, true);
2703 t0 = expand_unop (word_mode, bswap_optab,
2704 operand_subword_force (op, 1, mode), NULL_RTX, true);
2707 target = gen_reg_rtx (mode);
2709 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
2710 emit_move_insn (operand_subword (target, 0, 1, mode), t0);
2711 emit_move_insn (operand_subword (target, 1, 1, mode), t1);
2716 /* Try calculating (parity x) as (and (popcount x) 1), where
2717 popcount can also be done in a wider mode. */
2719 expand_parity (enum machine_mode mode, rtx op0, rtx target)
2721 enum mode_class class = GET_MODE_CLASS (mode);
2722 if (CLASS_HAS_WIDER_MODES_P (class))
2724 enum machine_mode wider_mode;
2725 for (wider_mode = mode; wider_mode != VOIDmode;
2726 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2728 if (optab_handler (popcount_optab, wider_mode)->insn_code
2729 != CODE_FOR_nothing)
2731 rtx xop0, temp, last;
2733 last = get_last_insn ();
2736 target = gen_reg_rtx (mode);
2737 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2738 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2741 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2742 target, true, OPTAB_DIRECT);
2744 delete_insns_since (last);
2753 /* Try calculating ctz(x) as K - clz(x & -x) ,
2754 where K is GET_MODE_BITSIZE(mode) - 1.
2756 Both __builtin_ctz and __builtin_clz are undefined at zero, so we
2757 don't have to worry about what the hardware does in that case. (If
2758 the clz instruction produces the usual value at 0, which is K, the
2759 result of this code sequence will be -1; expand_ffs, below, relies
2760 on this. It might be nice to have it be K instead, for consistency
2761 with the (very few) processors that provide a ctz with a defined
2762 value, but that would take one more instruction, and it would be
2763 less convenient for expand_ffs anyway. */
2766 expand_ctz (enum machine_mode mode, rtx op0, rtx target)
2770 if (optab_handler (clz_optab, mode)->insn_code == CODE_FOR_nothing)
2775 temp = expand_unop_direct (mode, neg_optab, op0, NULL_RTX, true);
2777 temp = expand_binop (mode, and_optab, op0, temp, NULL_RTX,
2778 true, OPTAB_DIRECT);
2780 temp = expand_unop_direct (mode, clz_optab, temp, NULL_RTX, true);
2782 temp = expand_binop (mode, sub_optab, GEN_INT (GET_MODE_BITSIZE (mode) - 1),
2784 true, OPTAB_DIRECT);
2794 add_equal_note (seq, temp, CTZ, op0, 0);
2800 /* Try calculating ffs(x) using ctz(x) if we have that instruction, or
2801 else with the sequence used by expand_clz.
2803 The ffs builtin promises to return zero for a zero value and ctz/clz
2804 may have an undefined value in that case. If they do not give us a
2805 convenient value, we have to generate a test and branch. */
2807 expand_ffs (enum machine_mode mode, rtx op0, rtx target)
2809 HOST_WIDE_INT val = 0;
2810 bool defined_at_zero = false;
2813 if (optab_handler (ctz_optab, mode)->insn_code != CODE_FOR_nothing)
2817 temp = expand_unop_direct (mode, ctz_optab, op0, 0, true);
2821 defined_at_zero = (CTZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2);
2823 else if (optab_handler (clz_optab, mode)->insn_code != CODE_FOR_nothing)
2826 temp = expand_ctz (mode, op0, 0);
2830 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2)
2832 defined_at_zero = true;
2833 val = (GET_MODE_BITSIZE (mode) - 1) - val;
2839 if (defined_at_zero && val == -1)
2840 /* No correction needed at zero. */;
2843 /* We don't try to do anything clever with the situation found
2844 on some processors (eg Alpha) where ctz(0:mode) ==
2845 bitsize(mode). If someone can think of a way to send N to -1
2846 and leave alone all values in the range 0..N-1 (where N is a
2847 power of two), cheaper than this test-and-branch, please add it.
2849 The test-and-branch is done after the operation itself, in case
2850 the operation sets condition codes that can be recycled for this.
2851 (This is true on i386, for instance.) */
2853 rtx nonzero_label = gen_label_rtx ();
2854 emit_cmp_and_jump_insns (op0, CONST0_RTX (mode), NE, 0,
2855 mode, true, nonzero_label);
2857 convert_move (temp, GEN_INT (-1), false);
2858 emit_label (nonzero_label);
2861 /* temp now has a value in the range -1..bitsize-1. ffs is supposed
2862 to produce a value in the range 0..bitsize. */
2863 temp = expand_binop (mode, add_optab, temp, GEN_INT (1),
2864 target, false, OPTAB_DIRECT);
2871 add_equal_note (seq, temp, FFS, op0, 0);
2880 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2881 conditions, VAL may already be a SUBREG against which we cannot generate
2882 a further SUBREG. In this case, we expect forcing the value into a
2883 register will work around the situation. */
2886 lowpart_subreg_maybe_copy (enum machine_mode omode, rtx val,
2887 enum machine_mode imode)
2890 ret = lowpart_subreg (omode, val, imode);
2893 val = force_reg (imode, val);
2894 ret = lowpart_subreg (omode, val, imode);
2895 gcc_assert (ret != NULL);
2900 /* Expand a floating point absolute value or negation operation via a
2901 logical operation on the sign bit. */
2904 expand_absneg_bit (enum rtx_code code, enum machine_mode mode,
2905 rtx op0, rtx target)
2907 const struct real_format *fmt;
2908 int bitpos, word, nwords, i;
2909 enum machine_mode imode;
2910 HOST_WIDE_INT hi, lo;
2913 /* The format has to have a simple sign bit. */
2914 fmt = REAL_MODE_FORMAT (mode);
2918 bitpos = fmt->signbit_rw;
2922 /* Don't create negative zeros if the format doesn't support them. */
2923 if (code == NEG && !fmt->has_signed_zero)
2926 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2928 imode = int_mode_for_mode (mode);
2929 if (imode == BLKmode)
2938 if (FLOAT_WORDS_BIG_ENDIAN)
2939 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2941 word = bitpos / BITS_PER_WORD;
2942 bitpos = bitpos % BITS_PER_WORD;
2943 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2946 if (bitpos < HOST_BITS_PER_WIDE_INT)
2949 lo = (HOST_WIDE_INT) 1 << bitpos;
2953 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2959 if (target == 0 || target == op0)
2960 target = gen_reg_rtx (mode);
2966 for (i = 0; i < nwords; ++i)
2968 rtx targ_piece = operand_subword (target, i, 1, mode);
2969 rtx op0_piece = operand_subword_force (op0, i, mode);
2973 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2975 immed_double_const (lo, hi, imode),
2976 targ_piece, 1, OPTAB_LIB_WIDEN);
2977 if (temp != targ_piece)
2978 emit_move_insn (targ_piece, temp);
2981 emit_move_insn (targ_piece, op0_piece);
2984 insns = get_insns ();
2987 temp = gen_rtx_fmt_e (code, mode, copy_rtx (op0));
2988 emit_no_conflict_block (insns, target, op0, NULL_RTX, temp);
2992 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2993 gen_lowpart (imode, op0),
2994 immed_double_const (lo, hi, imode),
2995 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2996 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2998 set_unique_reg_note (get_last_insn (), REG_EQUAL,
2999 gen_rtx_fmt_e (code, mode, copy_rtx (op0)));
3005 /* As expand_unop, but will fail rather than attempt the operation in a
3006 different mode or with a libcall. */
3008 expand_unop_direct (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
3011 if (optab_handler (unoptab, mode)->insn_code != CODE_FOR_nothing)
3013 int icode = (int) optab_handler (unoptab, mode)->insn_code;
3014 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
3016 rtx last = get_last_insn ();
3022 temp = gen_reg_rtx (mode);
3024 if (GET_MODE (xop0) != VOIDmode
3025 && GET_MODE (xop0) != mode0)
3026 xop0 = convert_to_mode (mode0, xop0, unsignedp);
3028 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
3030 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
3031 xop0 = copy_to_mode_reg (mode0, xop0);
3033 if (!insn_data[icode].operand[0].predicate (temp, mode))
3034 temp = gen_reg_rtx (mode);
3036 pat = GEN_FCN (icode) (temp, xop0);
3039 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
3040 && ! add_equal_note (pat, temp, unoptab->code, xop0, NULL_RTX))
3042 delete_insns_since (last);
3043 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
3051 delete_insns_since (last);
3056 /* Generate code to perform an operation specified by UNOPTAB
3057 on operand OP0, with result having machine-mode MODE.
3059 UNSIGNEDP is for the case where we have to widen the operands
3060 to perform the operation. It says to use zero-extension.
3062 If TARGET is nonzero, the value
3063 is generated there, if it is convenient to do so.
3064 In all cases an rtx is returned for the locus of the value;
3065 this may or may not be TARGET. */
3068 expand_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
3071 enum mode_class class = GET_MODE_CLASS (mode);
3072 enum machine_mode wider_mode;
3076 temp = expand_unop_direct (mode, unoptab, op0, target, unsignedp);
3080 /* It can't be done in this mode. Can we open-code it in a wider mode? */
3082 /* Widening (or narrowing) clz needs special treatment. */
3083 if (unoptab == clz_optab)
3085 temp = widen_clz (mode, op0, target);
3089 if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
3090 && optab_handler (unoptab, word_mode)->insn_code != CODE_FOR_nothing)
3092 temp = expand_doubleword_clz (mode, op0, target);
3100 /* Widening (or narrowing) bswap needs special treatment. */
3101 if (unoptab == bswap_optab)
3103 temp = widen_bswap (mode, op0, target);
3107 if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
3108 && optab_handler (unoptab, word_mode)->insn_code != CODE_FOR_nothing)
3110 temp = expand_doubleword_bswap (mode, op0, target);
3118 if (CLASS_HAS_WIDER_MODES_P (class))
3119 for (wider_mode = GET_MODE_WIDER_MODE (mode);
3120 wider_mode != VOIDmode;
3121 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
3123 if (optab_handler (unoptab, wider_mode)->insn_code != CODE_FOR_nothing)
3126 rtx last = get_last_insn ();
3128 /* For certain operations, we need not actually extend
3129 the narrow operand, as long as we will truncate the
3130 results to the same narrowness. */
3132 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
3133 (unoptab == neg_optab
3134 || unoptab == one_cmpl_optab)
3135 && class == MODE_INT);
3137 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
3142 if (class != MODE_INT
3143 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
3144 GET_MODE_BITSIZE (wider_mode)))
3147 target = gen_reg_rtx (mode);
3148 convert_move (target, temp, 0);
3152 return gen_lowpart (mode, temp);
3155 delete_insns_since (last);
3159 /* These can be done a word at a time. */
3160 if (unoptab == one_cmpl_optab
3161 && class == MODE_INT
3162 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
3163 && optab_handler (unoptab, word_mode)->insn_code != CODE_FOR_nothing)
3168 if (target == 0 || target == op0)
3169 target = gen_reg_rtx (mode);
3173 /* Do the actual arithmetic. */
3174 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
3176 rtx target_piece = operand_subword (target, i, 1, mode);
3177 rtx x = expand_unop (word_mode, unoptab,
3178 operand_subword_force (op0, i, mode),
3179 target_piece, unsignedp);
3181 if (target_piece != x)
3182 emit_move_insn (target_piece, x);
3185 insns = get_insns ();
3188 emit_no_conflict_block (insns, target, op0, NULL_RTX,
3189 gen_rtx_fmt_e (unoptab->code, mode,
3194 if (unoptab->code == NEG)
3196 /* Try negating floating point values by flipping the sign bit. */
3197 if (SCALAR_FLOAT_MODE_P (mode))
3199 temp = expand_absneg_bit (NEG, mode, op0, target);
3204 /* If there is no negation pattern, and we have no negative zero,
3205 try subtracting from zero. */
3206 if (!HONOR_SIGNED_ZEROS (mode))
3208 temp = expand_binop (mode, (unoptab == negv_optab
3209 ? subv_optab : sub_optab),
3210 CONST0_RTX (mode), op0, target,
3211 unsignedp, OPTAB_DIRECT);
3217 /* Try calculating parity (x) as popcount (x) % 2. */
3218 if (unoptab == parity_optab)
3220 temp = expand_parity (mode, op0, target);
3225 /* Try implementing ffs (x) in terms of clz (x). */
3226 if (unoptab == ffs_optab)
3228 temp = expand_ffs (mode, op0, target);
3233 /* Try implementing ctz (x) in terms of clz (x). */
3234 if (unoptab == ctz_optab)
3236 temp = expand_ctz (mode, op0, target);
3242 /* Now try a library call in this mode. */
3243 libfunc = optab_libfunc (unoptab, mode);
3248 enum machine_mode outmode = mode;
3250 /* All of these functions return small values. Thus we choose to
3251 have them return something that isn't a double-word. */
3252 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
3253 || unoptab == popcount_optab || unoptab == parity_optab)
3255 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node)));
3259 /* Pass 1 for NO_QUEUE so we don't lose any increments
3260 if the libcall is cse'd or moved. */
3261 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, outmode,
3263 insns = get_insns ();
3266 target = gen_reg_rtx (outmode);
3267 emit_libcall_block (insns, target, value,
3268 gen_rtx_fmt_e (unoptab->code, outmode, op0));
3273 /* It can't be done in this mode. Can we do it in a wider mode? */
3275 if (CLASS_HAS_WIDER_MODES_P (class))
3277 for (wider_mode = GET_MODE_WIDER_MODE (mode);
3278 wider_mode != VOIDmode;
3279 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
3281 if ((optab_handler (unoptab, wider_mode)->insn_code
3282 != CODE_FOR_nothing)
3283 || optab_libfunc (unoptab, wider_mode))
3286 rtx last = get_last_insn ();
3288 /* For certain operations, we need not actually extend
3289 the narrow operand, as long as we will truncate the
3290 results to the same narrowness. */
3292 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
3293 (unoptab == neg_optab
3294 || unoptab == one_cmpl_optab)
3295 && class == MODE_INT);
3297 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
3300 /* If we are generating clz using wider mode, adjust the
3302 if (unoptab == clz_optab && temp != 0)
3303 temp = expand_binop (wider_mode, sub_optab, temp,
3304 GEN_INT (GET_MODE_BITSIZE (wider_mode)
3305 - GET_MODE_BITSIZE (mode)),
3306 target, true, OPTAB_DIRECT);
3310 if (class != MODE_INT)
3313 target = gen_reg_rtx (mode);
3314 convert_move (target, temp, 0);
3318 return gen_lowpart (mode, temp);
3321 delete_insns_since (last);
3326 /* One final attempt at implementing negation via subtraction,
3327 this time allowing widening of the operand. */
3328 if (unoptab->code == NEG && !HONOR_SIGNED_ZEROS (mode))
3331 temp = expand_binop (mode,
3332 unoptab == negv_optab ? subv_optab : sub_optab,
3333 CONST0_RTX (mode), op0,
3334 target, unsignedp, OPTAB_LIB_WIDEN);
3342 /* Emit code to compute the absolute value of OP0, with result to
3343 TARGET if convenient. (TARGET may be 0.) The return value says
3344 where the result actually is to be found.
3346 MODE is the mode of the operand; the mode of the result is
3347 different but can be deduced from MODE.
3352 expand_abs_nojump (enum machine_mode mode, rtx op0, rtx target,
3353 int result_unsignedp)
3358 result_unsignedp = 1;
3360 /* First try to do it with a special abs instruction. */
3361 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
3366 /* For floating point modes, try clearing the sign bit. */
3367 if (SCALAR_FLOAT_MODE_P (mode))
3369 temp = expand_absneg_bit (ABS, mode, op0, target);
3374 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3375 if (optab_handler (smax_optab, mode)->insn_code != CODE_FOR_nothing
3376 && !HONOR_SIGNED_ZEROS (mode))
3378 rtx last = get_last_insn ();
3380 temp = expand_unop (mode, neg_optab, op0, NULL_RTX, 0);
3382 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3388 delete_insns_since (last);
3391 /* If this machine has expensive jumps, we can do integer absolute
3392 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3393 where W is the width of MODE. */
3395 if (GET_MODE_CLASS (mode) == MODE_INT && BRANCH_COST >= 2)
3397 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
3398 size_int (GET_MODE_BITSIZE (mode) - 1),
3401 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
3404 temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab,
3405 temp, extended, target, 0, OPTAB_LIB_WIDEN);
3415 expand_abs (enum machine_mode mode, rtx op0, rtx target,
3416 int result_unsignedp, int safe)
3421 result_unsignedp = 1;
3423 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
3427 /* If that does not win, use conditional jump and negate. */
3429 /* It is safe to use the target if it is the same
3430 as the source if this is also a pseudo register */
3431 if (op0 == target && REG_P (op0)
3432 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
3435 op1 = gen_label_rtx ();
3436 if (target == 0 || ! safe
3437 || GET_MODE (target) != mode
3438 || (MEM_P (target) && MEM_VOLATILE_P (target))
3440 && REGNO (target) < FIRST_PSEUDO_REGISTER))
3441 target = gen_reg_rtx (mode);
3443 emit_move_insn (target, op0);
3446 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
3447 NULL_RTX, NULL_RTX, op1);
3449 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3452 emit_move_insn (target, op0);
3458 /* A subroutine of expand_copysign, perform the copysign operation using the
3459 abs and neg primitives advertised to exist on the target. The assumption
3460 is that we have a split register file, and leaving op0 in fp registers,
3461 and not playing with subregs so much, will help the register allocator. */
3464 expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3465 int bitpos, bool op0_is_abs)
3467 enum machine_mode imode;
3474 /* Check if the back end provides an insn that handles signbit for the
3476 icode = (int) signbit_optab->handlers [(int) mode].insn_code;
3477 if (icode != CODE_FOR_nothing)
3479 imode = insn_data[icode].operand[0].mode;
3480 sign = gen_reg_rtx (imode);
3481 emit_unop_insn (icode, sign, op1, UNKNOWN);
3485 HOST_WIDE_INT hi, lo;
3487 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3489 imode = int_mode_for_mode (mode);
3490 if (imode == BLKmode)
3492 op1 = gen_lowpart (imode, op1);
3499 if (FLOAT_WORDS_BIG_ENDIAN)
3500 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3502 word = bitpos / BITS_PER_WORD;
3503 bitpos = bitpos % BITS_PER_WORD;
3504 op1 = operand_subword_force (op1, word, mode);
3507 if (bitpos < HOST_BITS_PER_WIDE_INT)
3510 lo = (HOST_WIDE_INT) 1 << bitpos;
3514 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
3518 sign = gen_reg_rtx (imode);
3519 sign = expand_binop (imode, and_optab, op1,
3520 immed_double_const (lo, hi, imode),
3521 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3526 op0 = expand_unop (mode, abs_optab, op0, target, 0);
3533 if (target == NULL_RTX)
3534 target = copy_to_reg (op0);
3536 emit_move_insn (target, op0);
3539 label = gen_label_rtx ();
3540 emit_cmp_and_jump_insns (sign, const0_rtx, EQ, NULL_RTX, imode, 1, label);
3542 if (GET_CODE (op0) == CONST_DOUBLE)
3543 op0 = simplify_unary_operation (NEG, mode, op0, mode);
3545 op0 = expand_unop (mode, neg_optab, op0, target, 0);
3547 emit_move_insn (target, op0);
3555 /* A subroutine of expand_copysign, perform the entire copysign operation
3556 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3557 is true if op0 is known to have its sign bit clear. */
3560 expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3561 int bitpos, bool op0_is_abs)
3563 enum machine_mode imode;
3564 HOST_WIDE_INT hi, lo;
3565 int word, nwords, i;
3568 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3570 imode = int_mode_for_mode (mode);
3571 if (imode == BLKmode)
3580 if (FLOAT_WORDS_BIG_ENDIAN)
3581 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3583 word = bitpos / BITS_PER_WORD;
3584 bitpos = bitpos % BITS_PER_WORD;
3585 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
3588 if (bitpos < HOST_BITS_PER_WIDE_INT)
3591 lo = (HOST_WIDE_INT) 1 << bitpos;
3595 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
3599 if (target == 0 || target == op0 || target == op1)
3600 target = gen_reg_rtx (mode);
3606 for (i = 0; i < nwords; ++i)
3608 rtx targ_piece = operand_subword (target, i, 1, mode);
3609 rtx op0_piece = operand_subword_force (op0, i, mode);
3614 op0_piece = expand_binop (imode, and_optab, op0_piece,
3615 immed_double_const (~lo, ~hi, imode),
3616 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3618 op1 = expand_binop (imode, and_optab,
3619 operand_subword_force (op1, i, mode),
3620 immed_double_const (lo, hi, imode),
3621 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3623 temp = expand_binop (imode, ior_optab, op0_piece, op1,
3624 targ_piece, 1, OPTAB_LIB_WIDEN);
3625 if (temp != targ_piece)
3626 emit_move_insn (targ_piece, temp);
3629 emit_move_insn (targ_piece, op0_piece);
3632 insns = get_insns ();
3635 emit_no_conflict_block (insns, target, op0, op1, NULL_RTX);
3639 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
3640 immed_double_const (lo, hi, imode),
3641 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3643 op0 = gen_lowpart (imode, op0);
3645 op0 = expand_binop (imode, and_optab, op0,
3646 immed_double_const (~lo, ~hi, imode),
3647 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3649 temp = expand_binop (imode, ior_optab, op0, op1,
3650 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3651 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3657 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3658 scalar floating point mode. Return NULL if we do not know how to
3659 expand the operation inline. */
3662 expand_copysign (rtx op0, rtx op1, rtx target)
3664 enum machine_mode mode = GET_MODE (op0);
3665 const struct real_format *fmt;
3669 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3670 gcc_assert (GET_MODE (op1) == mode);
3672 /* First try to do it with a special instruction. */
3673 temp = expand_binop (mode, copysign_optab, op0, op1,
3674 target, 0, OPTAB_DIRECT);
3678 fmt = REAL_MODE_FORMAT (mode);
3679 if (fmt == NULL || !fmt->has_signed_zero)
3683 if (GET_CODE (op0) == CONST_DOUBLE)
3685 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
3686 op0 = simplify_unary_operation (ABS, mode, op0, mode);
3690 if (fmt->signbit_ro >= 0
3691 && (GET_CODE (op0) == CONST_DOUBLE
3692 || (optab_handler (neg_optab, mode)->insn_code != CODE_FOR_nothing
3693 && optab_handler (abs_optab, mode)->insn_code != CODE_FOR_nothing)))
3695 temp = expand_copysign_absneg (mode, op0, op1, target,
3696 fmt->signbit_ro, op0_is_abs);
3701 if (fmt->signbit_rw < 0)
3703 return expand_copysign_bit (mode, op0, op1, target,
3704 fmt->signbit_rw, op0_is_abs);
3707 /* Generate an instruction whose insn-code is INSN_CODE,
3708 with two operands: an output TARGET and an input OP0.
3709 TARGET *must* be nonzero, and the output is always stored there.
3710 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3711 the value that is stored into TARGET. */
3714 emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code)
3717 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
3722 /* Now, if insn does not accept our operands, put them into pseudos. */
3724 if (!insn_data[icode].operand[1].predicate (op0, mode0))
3725 op0 = copy_to_mode_reg (mode0, op0);
3727 if (!insn_data[icode].operand[0].predicate (temp, GET_MODE (temp)))
3728 temp = gen_reg_rtx (GET_MODE (temp));
3730 pat = GEN_FCN (icode) (temp, op0);
3732 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && code != UNKNOWN)
3733 add_equal_note (pat, temp, code, op0, NULL_RTX);
3738 emit_move_insn (target, temp);
3741 struct no_conflict_data
3743 rtx target, first, insn;
3747 /* Called via note_stores by emit_no_conflict_block and emit_libcall_block.
3748 Set P->must_stay if the currently examined clobber / store has to stay
3749 in the list of insns that constitute the actual no_conflict block /
3752 no_conflict_move_test (rtx dest, const_rtx set, void *p0)
3754 struct no_conflict_data *p= p0;
3756 /* If this inns directly contributes to setting the target, it must stay. */
3757 if (reg_overlap_mentioned_p (p->target, dest))
3758 p->must_stay = true;
3759 /* If we haven't committed to keeping any other insns in the list yet,
3760 there is nothing more to check. */
3761 else if (p->insn == p->first)
3763 /* If this insn sets / clobbers a register that feeds one of the insns
3764 already in the list, this insn has to stay too. */
3765 else if (reg_overlap_mentioned_p (dest, PATTERN (p->first))
3766 || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest)))
3767 || reg_used_between_p (dest, p->first, p->insn)
3768 /* Likewise if this insn depends on a register set by a previous
3769 insn in the list, or if it sets a result (presumably a hard
3770 register) that is set or clobbered by a previous insn.
3771 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3772 SET_DEST perform the former check on the address, and the latter
3773 check on the MEM. */
3774 || (GET_CODE (set) == SET
3775 && (modified_in_p (SET_SRC (set), p->first)
3776 || modified_in_p (SET_DEST (set), p->first)
3777 || modified_between_p (SET_SRC (set), p->first, p->insn)
3778 || modified_between_p (SET_DEST (set), p->first, p->insn))))
3779 p->must_stay = true;
3782 /* Encapsulate the block starting at FIRST and ending with LAST, which is
3783 logically equivalent to EQUIV, so it gets manipulated as a unit if it
3784 is possible to do so. */
3787 maybe_encapsulate_block (rtx first, rtx last, rtx equiv)
3789 if (!flag_non_call_exceptions || !may_trap_p (equiv))
3791 /* We can't attach the REG_LIBCALL and REG_RETVAL notes when the
3792 encapsulated region would not be in one basic block, i.e. when
3793 there is a control_flow_insn_p insn between FIRST and LAST. */
3794 bool attach_libcall_retval_notes = true;
3795 rtx insn, next = NEXT_INSN (last);
3797 for (insn = first; insn != next; insn = NEXT_INSN (insn))
3798 if (control_flow_insn_p (insn))
3800 attach_libcall_retval_notes = false;
3804 if (attach_libcall_retval_notes)
3806 REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
3808 REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first,
3810 next = NEXT_INSN (last);
3811 for (insn = first; insn != next; insn = NEXT_INSN (insn))
3812 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_LIBCALL_ID,
3813 GEN_INT (libcall_id),
3820 /* Emit code to perform a series of operations on a multi-word quantity, one
3823 Such a block is preceded by a CLOBBER of the output, consists of multiple
3824 insns, each setting one word of the output, and followed by a SET copying
3825 the output to itself.
3827 Each of the insns setting words of the output receives a REG_NO_CONFLICT
3828 note indicating that it doesn't conflict with the (also multi-word)
3829 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
3832 INSNS is a block of code generated to perform the operation, not including
3833 the CLOBBER and final copy. All insns that compute intermediate values
3834 are first emitted, followed by the block as described above.
3836 TARGET, OP0, and OP1 are the output and inputs of the operations,
3837 respectively. OP1 may be zero for a unary operation.
3839 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
3842 If TARGET is not a register, INSNS is simply emitted with no special
3843 processing. Likewise if anything in INSNS is not an INSN or if
3844 there is a libcall block inside INSNS.
3846 The final insn emitted is returned. */
3849 emit_no_conflict_block (rtx insns, rtx target, rtx op0, rtx op1, rtx equiv)
3851 rtx prev, next, first, last, insn;
3853 if (!REG_P (target) || reload_in_progress)
3854 return emit_insn (insns);
3856 for (insn = insns; insn; insn = NEXT_INSN (insn))
3857 if (!NONJUMP_INSN_P (insn)
3858 || find_reg_note (insn, REG_LIBCALL, NULL_RTX))
3859 return emit_insn (insns);
3861 /* First emit all insns that do not store into words of the output and remove
3862 these from the list. */
3863 for (insn = insns; insn; insn = next)
3866 struct no_conflict_data data;
3868 next = NEXT_INSN (insn);
3870 /* Some ports (cris) create a libcall regions at their own. We must
3871 avoid any potential nesting of LIBCALLs. */
3872 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3873 remove_note (insn, note);
3874 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3875 remove_note (insn, note);
3876 if ((note = find_reg_note (insn, REG_LIBCALL_ID, NULL)) != NULL)
3877 remove_note (insn, note);
3879 data.target = target;
3883 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3884 if (! data.must_stay)
3886 if (PREV_INSN (insn))
3887 NEXT_INSN (PREV_INSN (insn)) = next;
3892 PREV_INSN (next) = PREV_INSN (insn);
3898 prev = get_last_insn ();
3900 /* Now write the CLOBBER of the output, followed by the setting of each
3901 of the words, followed by the final copy. */
3902 if (target != op0 && target != op1)
3903 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
3905 for (insn = insns; insn; insn = next)
3907 next = NEXT_INSN (insn);
3910 if (op1 && REG_P (op1))
3911 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op1,
3914 if (op0 && REG_P (op0))
3915 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op0,
3919 if (optab_handler (mov_optab, GET_MODE (target))->insn_code
3920 != CODE_FOR_nothing)
3922 last = emit_move_insn (target, target);
3924 set_unique_reg_note (last, REG_EQUAL, equiv);
3928 last = get_last_insn ();
3930 /* Remove any existing REG_EQUAL note from "last", or else it will
3931 be mistaken for a note referring to the full contents of the
3932 alleged libcall value when found together with the REG_RETVAL
3933 note added below. An existing note can come from an insn
3934 expansion at "last". */
3935 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3939 first = get_insns ();
3941 first = NEXT_INSN (prev);
3943 maybe_encapsulate_block (first, last, equiv);
3948 /* Emit code to make a call to a constant function or a library call.
3950 INSNS is a list containing all insns emitted in the call.
3951 These insns leave the result in RESULT. Our block is to copy RESULT
3952 to TARGET, which is logically equivalent to EQUIV.
3954 We first emit any insns that set a pseudo on the assumption that these are
3955 loading constants into registers; doing so allows them to be safely cse'ed
3956 between blocks. Then we emit all the other insns in the block, followed by
3957 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3958 note with an operand of EQUIV.
3960 Moving assignments to pseudos outside of the block is done to improve
3961 the generated code, but is not required to generate correct code,
3962 hence being unable to move an assignment is not grounds for not making
3963 a libcall block. There are two reasons why it is safe to leave these
3964 insns inside the block: First, we know that these pseudos cannot be
3965 used in generated RTL outside the block since they are created for
3966 temporary purposes within the block. Second, CSE will not record the
3967 values of anything set inside a libcall block, so we know they must
3968 be dead at the end of the block.
3970 Except for the first group of insns (the ones setting pseudos), the
3971 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3973 emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv)
3975 rtx final_dest = target;
3976 rtx prev, next, first, last, insn;
3978 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3979 into a MEM later. Protect the libcall block from this change. */
3980 if (! REG_P (target) || REG_USERVAR_P (target))
3981 target = gen_reg_rtx (GET_MODE (target));
3983 /* If we're using non-call exceptions, a libcall corresponding to an
3984 operation that may trap may also trap. */
3985 if (flag_non_call_exceptions && may_trap_p (equiv))
3987 for (insn = insns; insn; insn = NEXT_INSN (insn))
3990 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3992 if (note != 0 && INTVAL (XEXP (note, 0)) <= 0)
3993 remove_note (insn, note);
3997 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3998 reg note to indicate that this call cannot throw or execute a nonlocal
3999 goto (unless there is already a REG_EH_REGION note, in which case
4001 for (insn = insns; insn; insn = NEXT_INSN (insn))
4004 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
4007 XEXP (note, 0) = constm1_rtx;
4009 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx,
4013 /* First emit all insns that set pseudos. Remove them from the list as
4014 we go. Avoid insns that set pseudos which were referenced in previous
4015 insns. These can be generated by move_by_pieces, for example,
4016 to update an address. Similarly, avoid insns that reference things
4017 set in previous insns. */
4019 for (insn = insns; insn; insn = next)
4021 rtx set = single_set (insn);
4024 /* Some ports (cris) create a libcall regions at their own. We must
4025 avoid any potential nesting of LIBCALLs. */
4026 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
4027 remove_note (insn, note);
4028 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
4029 remove_note (insn, note);
4030 if ((note = find_reg_note (insn, REG_LIBCALL_ID, NULL)) != NULL)
4031 remove_note (insn, note);
4033 next = NEXT_INSN (insn);
4035 if (set != 0 && REG_P (SET_DEST (set))
4036 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
4038 struct no_conflict_data data;
4040 data.target = const0_rtx;
4044 note_stores (PATTERN (insn), no_conflict_move_test, &data);
4045 if (! data.must_stay)
4047 if (PREV_INSN (insn))
4048 NEXT_INSN (PREV_INSN (insn)) = next;
4053 PREV_INSN (next) = PREV_INSN (insn);
4059 /* Some ports use a loop to copy large arguments onto the stack.
4060 Don't move anything outside such a loop. */
4065 prev = get_last_insn ();
4067 /* Write the remaining insns followed by the final copy. */
4069 for (insn = insns; insn; insn = next)
4071 next = NEXT_INSN (insn);
4076 last = emit_move_insn (target, result);
4077 if (optab_handler (mov_optab, GET_MODE (target))->insn_code
4078 != CODE_FOR_nothing)
4079 set_unique_reg_note (last, REG_EQUAL, copy_rtx (equiv));
4082 /* Remove any existing REG_EQUAL note from "last", or else it will
4083 be mistaken for a note referring to the full contents of the
4084 libcall value when found together with the REG_RETVAL note added
4085 below. An existing note can come from an insn expansion at
4087 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
4090 if (final_dest != target)
4091 emit_move_insn (final_dest, target);
4094 first = get_insns ();
4096 first = NEXT_INSN (prev);
4098 maybe_encapsulate_block (first, last, equiv);
4101 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
4102 PURPOSE describes how this comparison will be used. CODE is the rtx
4103 comparison code we will be using.
4105 ??? Actually, CODE is slightly weaker than that. A target is still
4106 required to implement all of the normal bcc operations, but not
4107 required to implement all (or any) of the unordered bcc operations. */
4110 can_compare_p (enum rtx_code code, enum machine_mode mode,
4111 enum can_compare_purpose purpose)
4115 if (optab_handler (cmp_optab, mode)->insn_code != CODE_FOR_nothing)
4117 if (purpose == ccp_jump)
4118 return bcc_gen_fctn[(int) code] != NULL;
4119 else if (purpose == ccp_store_flag)
4120 return setcc_gen_code[(int) code] != CODE_FOR_nothing;
4122 /* There's only one cmov entry point, and it's allowed to fail. */
4125 if (purpose == ccp_jump
4126 && optab_handler (cbranch_optab, mode)->insn_code != CODE_FOR_nothing)
4128 if (purpose == ccp_cmov
4129 && optab_handler (cmov_optab, mode)->insn_code != CODE_FOR_nothing)
4131 if (purpose == ccp_store_flag
4132 && optab_handler (cstore_optab, mode)->insn_code != CODE_FOR_nothing)
4134 mode = GET_MODE_WIDER_MODE (mode);
4136 while (mode != VOIDmode);
4141 /* This function is called when we are going to emit a compare instruction that
4142 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
4144 *PMODE is the mode of the inputs (in case they are const_int).
4145 *PUNSIGNEDP nonzero says that the operands are unsigned;
4146 this matters if they need to be widened.
4148 If they have mode BLKmode, then SIZE specifies the size of both operands.
4150 This function performs all the setup necessary so that the caller only has
4151 to emit a single comparison insn. This setup can involve doing a BLKmode
4152 comparison or emitting a library call to perform the comparison if no insn
4153 is available to handle it.
4154 The values which are passed in through pointers can be modified; the caller
4155 should perform the comparison on the modified values. Constant
4156 comparisons must have already been folded. */
4159 prepare_cmp_insn (rtx *px, rtx *py, enum rtx_code *pcomparison, rtx size,
4160 enum machine_mode *pmode, int *punsignedp,
4161 enum can_compare_purpose purpose)
4163 enum machine_mode mode = *pmode;
4164 rtx x = *px, y = *py;
4165 int unsignedp = *punsignedp;
4168 /* If we are inside an appropriately-short loop and we are optimizing,
4169 force expensive constants into a register. */
4170 if (CONSTANT_P (x) && optimize
4171 && rtx_cost (x, COMPARE) > COSTS_N_INSNS (1))
4172 x = force_reg (mode, x);
4174 if (CONSTANT_P (y) && optimize
4175 && rtx_cost (y, COMPARE) > COSTS_N_INSNS (1))
4176 y = force_reg (mode, y);
4179 /* Make sure if we have a canonical comparison. The RTL
4180 documentation states that canonical comparisons are required only
4181 for targets which have cc0. */
4182 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
4185 /* Don't let both operands fail to indicate the mode. */
4186 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
4187 x = force_reg (mode, x);
4189 /* Handle all BLKmode compares. */
4191 if (mode == BLKmode)
4193 enum machine_mode cmp_mode, result_mode;
4194 enum insn_code cmp_code;
4199 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
4203 /* Try to use a memory block compare insn - either cmpstr
4204 or cmpmem will do. */
4205 for (cmp_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
4206 cmp_mode != VOIDmode;
4207 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode))
4209 cmp_code = cmpmem_optab[cmp_mode];
4210 if (cmp_code == CODE_FOR_nothing)
4211 cmp_code = cmpstr_optab[cmp_mode];
4212 if (cmp_code == CODE_FOR_nothing)
4213 cmp_code = cmpstrn_optab[cmp_mode];
4214 if (cmp_code == CODE_FOR_nothing)
4217 /* Must make sure the size fits the insn's mode. */
4218 if ((GET_CODE (size) == CONST_INT
4219 && INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode)))
4220 || (GET_MODE_BITSIZE (GET_MODE (size))
4221 > GET_MODE_BITSIZE (cmp_mode)))
4224 result_mode = insn_data[cmp_code].operand[0].mode;
4225 result = gen_reg_rtx (result_mode);
4226 size = convert_to_mode (cmp_mode, size, 1);
4227 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
4231 *pmode = result_mode;
4235 /* Otherwise call a library function, memcmp. */
4236 libfunc = memcmp_libfunc;
4237 length_type = sizetype;
4238 result_mode = TYPE_MODE (integer_type_node);
4239 cmp_mode = TYPE_MODE (length_type);
4240 size = convert_to_mode (TYPE_MODE (length_type), size,
4241 TYPE_UNSIGNED (length_type));
4243 result = emit_library_call_value (libfunc, 0, LCT_PURE_MAKE_BLOCK,
4250 *pmode = result_mode;
4254 /* Don't allow operands to the compare to trap, as that can put the
4255 compare and branch in different basic blocks. */
4256 if (flag_non_call_exceptions)
4259 x = force_reg (mode, x);
4261 y = force_reg (mode, y);
4266 if (can_compare_p (*pcomparison, mode, purpose))
4269 /* Handle a lib call just for the mode we are using. */
4271 libfunc = optab_libfunc (cmp_optab, mode);
4272 if (libfunc && !SCALAR_FLOAT_MODE_P (mode))
4277 /* If we want unsigned, and this mode has a distinct unsigned
4278 comparison routine, use that. */
4281 ulibfunc = optab_libfunc (ucmp_optab, mode);
4283 if (unsignedp && ulibfunc)
4286 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST_MAKE_BLOCK,
4287 targetm.libgcc_cmp_return_mode (),
4288 2, x, mode, y, mode);
4290 /* There are two kinds of comparison routines. Biased routines
4291 return 0/1/2, and unbiased routines return -1/0/1. Other parts
4292 of gcc expect that the comparison operation is equivalent
4293 to the modified comparison. For signed comparisons compare the
4294 result against 1 in the biased case, and zero in the unbiased
4295 case. For unsigned comparisons always compare against 1 after
4296 biasing the unbiased result by adding 1. This gives us a way to
4302 if (!TARGET_LIB_INT_CMP_BIASED)
4305 *px = plus_constant (result, 1);
4312 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
4313 prepare_float_lib_cmp (px, py, pcomparison, pmode, punsignedp);
4316 /* Before emitting an insn with code ICODE, make sure that X, which is going
4317 to be used for operand OPNUM of the insn, is converted from mode MODE to
4318 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
4319 that it is accepted by the operand predicate. Return the new value. */
4322 prepare_operand (int icode, rtx x, int opnum, enum machine_mode mode,
4323 enum machine_mode wider_mode, int unsignedp)
4325 if (mode != wider_mode)
4326 x = convert_modes (wider_mode, mode, x, unsignedp);
4328 if (!insn_data[icode].operand[opnum].predicate
4329 (x, insn_data[icode].operand[opnum].mode))
4331 if (reload_completed)
4333 x = copy_to_mode_reg (insn_data[icode].operand[opnum].mode, x);
4339 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
4340 we can do the comparison.
4341 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
4342 be NULL_RTX which indicates that only a comparison is to be generated. */
4345 emit_cmp_and_jump_insn_1 (rtx x, rtx y, enum machine_mode mode,
4346 enum rtx_code comparison, int unsignedp, rtx label)
4348 rtx test = gen_rtx_fmt_ee (comparison, mode, x, y);
4349 enum mode_class class = GET_MODE_CLASS (mode);
4350 enum machine_mode wider_mode = mode;
4352 /* Try combined insns first. */
4355 enum insn_code icode;
4356 PUT_MODE (test, wider_mode);
4360 icode = optab_handler (cbranch_optab, wider_mode)->insn_code;
4362 if (icode != CODE_FOR_nothing
4363 && insn_data[icode].operand[0].predicate (test, wider_mode))
4365 x = prepare_operand (icode, x, 1, mode, wider_mode, unsignedp);
4366 y = prepare_operand (icode, y, 2, mode, wider_mode, unsignedp);
4367 emit_jump_insn (GEN_FCN (icode) (test, x, y, label));
4372 /* Handle some compares against zero. */
4373 icode = (int) optab_handler (tst_optab, wider_mode)->insn_code;
4374 if (y == CONST0_RTX (mode) && icode != CODE_FOR_nothing)
4376 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
4377 emit_insn (GEN_FCN (icode) (x));
4379 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
4383 /* Handle compares for which there is a directly suitable insn. */
4385 icode = (int) optab_handler (cmp_optab, wider_mode)->insn_code;
4386 if (icode != CODE_FOR_nothing)
4388 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
4389 y = prepare_operand (icode, y, 1, mode, wider_mode, unsignedp);
4390 emit_insn (GEN_FCN (icode) (x, y));
4392 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
4396 if (!CLASS_HAS_WIDER_MODES_P (class))
4399 wider_mode = GET_MODE_WIDER_MODE (wider_mode);
4401 while (wider_mode != VOIDmode);
4406 /* Generate code to compare X with Y so that the condition codes are
4407 set and to jump to LABEL if the condition is true. If X is a
4408 constant and Y is not a constant, then the comparison is swapped to
4409 ensure that the comparison RTL has the canonical form.
4411 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
4412 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
4413 the proper branch condition code.
4415 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4417 MODE is the mode of the inputs (in case they are const_int).
4419 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
4420 be passed unchanged to emit_cmp_insn, then potentially converted into an
4421 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
4424 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
4425 enum machine_mode mode, int unsignedp, rtx label)
4427 rtx op0 = x, op1 = y;
4429 /* Swap operands and condition to ensure canonical RTL. */
4430 if (swap_commutative_operands_p (x, y))
4432 /* If we're not emitting a branch, callers are required to pass
4433 operands in an order conforming to canonical RTL. We relax this
4434 for commutative comparisons so callers using EQ don't need to do
4435 swapping by hand. */
4436 gcc_assert (label || (comparison == swap_condition (comparison)));
4439 comparison = swap_condition (comparison);
4443 /* If OP0 is still a constant, then both X and Y must be constants.
4444 Force X into a register to create canonical RTL. */
4445 if (CONSTANT_P (op0))
4446 op0 = force_reg (mode, op0);
4450 comparison = unsigned_condition (comparison);
4452 prepare_cmp_insn (&op0, &op1, &comparison, size, &mode, &unsignedp,
4454 emit_cmp_and_jump_insn_1 (op0, op1, mode, comparison, unsignedp, label);
4457 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
4460 emit_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
4461 enum machine_mode mode, int unsignedp)
4463 emit_cmp_and_jump_insns (x, y, comparison, size, mode, unsignedp, 0);
4466 /* Emit a library call comparison between floating point X and Y.
4467 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4470 prepare_float_lib_cmp (rtx *px, rtx *py, enum rtx_code *pcomparison,
4471 enum machine_mode *pmode, int *punsignedp)
4473 enum rtx_code comparison = *pcomparison;
4474 enum rtx_code swapped = swap_condition (comparison);
4475 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
4478 enum machine_mode orig_mode = GET_MODE (x);
4479 enum machine_mode mode, cmp_mode;
4480 rtx value, target, insns, equiv;
4482 bool reversed_p = false;
4483 cmp_mode = targetm.libgcc_cmp_return_mode ();
4485 for (mode = orig_mode;
4487 mode = GET_MODE_WIDER_MODE (mode))
4489 if ((libfunc = optab_libfunc (code_to_optab[comparison], mode)))
4492 if ((libfunc = optab_libfunc (code_to_optab[swapped] , mode)))
4495 tmp = x; x = y; y = tmp;
4496 comparison = swapped;
4500 if ((libfunc = optab_libfunc (code_to_optab[reversed], mode))
4501 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, reversed))
4503 comparison = reversed;
4509 gcc_assert (mode != VOIDmode);
4511 if (mode != orig_mode)
4513 x = convert_to_mode (mode, x, 0);
4514 y = convert_to_mode (mode, y, 0);
4517 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4518 the RTL. The allows the RTL optimizers to delete the libcall if the
4519 condition can be determined at compile-time. */
4520 if (comparison == UNORDERED)
4522 rtx temp = simplify_gen_relational (NE, cmp_mode, mode, x, x);
4523 equiv = simplify_gen_relational (NE, cmp_mode, mode, y, y);
4524 equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4525 temp, const_true_rtx, equiv);
4529 equiv = simplify_gen_relational (comparison, cmp_mode, mode, x, y);
4530 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4532 rtx true_rtx, false_rtx;
4537 true_rtx = const0_rtx;
4538 false_rtx = const_true_rtx;
4542 true_rtx = const_true_rtx;
4543 false_rtx = const0_rtx;
4547 true_rtx = const1_rtx;
4548 false_rtx = const0_rtx;
4552 true_rtx = const0_rtx;
4553 false_rtx = constm1_rtx;
4557 true_rtx = constm1_rtx;
4558 false_rtx = const0_rtx;
4562 true_rtx = const0_rtx;
4563 false_rtx = const1_rtx;
4569 equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4570 equiv, true_rtx, false_rtx);
4575 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4576 cmp_mode, 2, x, mode, y, mode);
4577 insns = get_insns ();
4580 target = gen_reg_rtx (cmp_mode);
4581 emit_libcall_block (insns, target, value, equiv);
4583 if (comparison == UNORDERED
4584 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4585 comparison = reversed_p ? EQ : NE;
4590 *pcomparison = comparison;
4594 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4597 emit_indirect_jump (rtx loc)
4599 if (!insn_data[(int) CODE_FOR_indirect_jump].operand[0].predicate
4601 loc = copy_to_mode_reg (Pmode, loc);
4603 emit_jump_insn (gen_indirect_jump (loc));
4607 #ifdef HAVE_conditional_move
4609 /* Emit a conditional move instruction if the machine supports one for that
4610 condition and machine mode.
4612 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4613 the mode to use should they be constants. If it is VOIDmode, they cannot
4616 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4617 should be stored there. MODE is the mode to use should they be constants.
4618 If it is VOIDmode, they cannot both be constants.
4620 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4621 is not supported. */
4624 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
4625 enum machine_mode cmode, rtx op2, rtx op3,
4626 enum machine_mode mode, int unsignedp)
4628 rtx tem, subtarget, comparison, insn;
4629 enum insn_code icode;
4630 enum rtx_code reversed;
4632 /* If one operand is constant, make it the second one. Only do this
4633 if the other operand is not constant as well. */
4635 if (swap_commutative_operands_p (op0, op1))
4640 code = swap_condition (code);
4643 /* get_condition will prefer to generate LT and GT even if the old
4644 comparison was against zero, so undo that canonicalization here since
4645 comparisons against zero are cheaper. */
4646 if (code == LT && op1 == const1_rtx)
4647 code = LE, op1 = const0_rtx;
4648 else if (code == GT && op1 == constm1_rtx)
4649 code = GE, op1 = const0_rtx;
4651 if (cmode == VOIDmode)
4652 cmode = GET_MODE (op0);
4654 if (swap_commutative_operands_p (op2, op3)
4655 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4664 if (mode == VOIDmode)
4665 mode = GET_MODE (op2);
4667 icode = movcc_gen_code[mode];
4669 if (icode == CODE_FOR_nothing)
4673 target = gen_reg_rtx (mode);
4677 /* If the insn doesn't accept these operands, put them in pseudos. */
4679 if (!insn_data[icode].operand[0].predicate
4680 (subtarget, insn_data[icode].operand[0].mode))
4681 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4683 if (!insn_data[icode].operand[2].predicate
4684 (op2, insn_data[icode].operand[2].mode))
4685 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4687 if (!insn_data[icode].operand[3].predicate
4688 (op3, insn_data[icode].operand[3].mode))
4689 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4691 /* Everything should now be in the suitable form, so emit the compare insn
4692 and then the conditional move. */
4695 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4697 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4698 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4699 return NULL and let the caller figure out how best to deal with this
4701 if (GET_CODE (comparison) != code)
4704 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4706 /* If that failed, then give up. */
4712 if (subtarget != target)
4713 convert_move (target, subtarget, 0);
4718 /* Return nonzero if a conditional move of mode MODE is supported.
4720 This function is for combine so it can tell whether an insn that looks
4721 like a conditional move is actually supported by the hardware. If we
4722 guess wrong we lose a bit on optimization, but that's it. */
4723 /* ??? sparc64 supports conditionally moving integers values based on fp
4724 comparisons, and vice versa. How do we handle them? */
4727 can_conditionally_move_p (enum machine_mode mode)
4729 if (movcc_gen_code[mode] != CODE_FOR_nothing)
4735 #endif /* HAVE_conditional_move */
4737 /* Emit a conditional addition instruction if the machine supports one for that
4738 condition and machine mode.
4740 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4741 the mode to use should they be constants. If it is VOIDmode, they cannot
4744 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
4745 should be stored there. MODE is the mode to use should they be constants.
4746 If it is VOIDmode, they cannot both be constants.
4748 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4749 is not supported. */
4752 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
4753 enum machine_mode cmode, rtx op2, rtx op3,
4754 enum machine_mode mode, int unsignedp)
4756 rtx tem, subtarget, comparison, insn;
4757 enum insn_code icode;
4758 enum rtx_code reversed;
4760 /* If one operand is constant, make it the second one. Only do this
4761 if the other operand is not constant as well. */
4763 if (swap_commutative_operands_p (op0, op1))
4768 code = swap_condition (code);
4771 /* get_condition will prefer to generate LT and GT even if the old
4772 comparison was against zero, so undo that canonicalization here since
4773 comparisons against zero are cheaper. */
4774 if (code == LT && op1 == const1_rtx)
4775 code = LE, op1 = const0_rtx;
4776 else if (code == GT && op1 == constm1_rtx)
4777 code = GE, op1 = const0_rtx;
4779 if (cmode == VOIDmode)
4780 cmode = GET_MODE (op0);
4782 if (swap_commutative_operands_p (op2, op3)
4783 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4792 if (mode == VOIDmode)
4793 mode = GET_MODE (op2);
4795 icode = optab_handler (addcc_optab, mode)->insn_code;
4797 if (icode == CODE_FOR_nothing)
4801 target = gen_reg_rtx (mode);
4803 /* If the insn doesn't accept these operands, put them in pseudos. */
4805 if (!insn_data[icode].operand[0].predicate
4806 (target, insn_data[icode].operand[0].mode))
4807 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4811 if (!insn_data[icode].operand[2].predicate
4812 (op2, insn_data[icode].operand[2].mode))
4813 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4815 if (!insn_data[icode].operand[3].predicate
4816 (op3, insn_data[icode].operand[3].mode))
4817 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4819 /* Everything should now be in the suitable form, so emit the compare insn
4820 and then the conditional move. */
4823 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4825 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4826 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4827 return NULL and let the caller figure out how best to deal with this
4829 if (GET_CODE (comparison) != code)
4832 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4834 /* If that failed, then give up. */
4840 if (subtarget != target)
4841 convert_move (target, subtarget, 0);
4846 /* These functions attempt to generate an insn body, rather than
4847 emitting the insn, but if the gen function already emits them, we
4848 make no attempt to turn them back into naked patterns. */
4850 /* Generate and return an insn body to add Y to X. */
4853 gen_add2_insn (rtx x, rtx y)
4855 int icode = (int) optab_handler (add_optab, GET_MODE (x))->insn_code;
4857 gcc_assert (insn_data[icode].operand[0].predicate
4858 (x, insn_data[icode].operand[0].mode));
4859 gcc_assert (insn_data[icode].operand[1].predicate
4860 (x, insn_data[icode].operand[1].mode));
4861 gcc_assert (insn_data[icode].operand[2].predicate
4862 (y, insn_data[icode].operand[2].mode));
4864 return GEN_FCN (icode) (x, x, y);
4867 /* Generate and return an insn body to add r1 and c,
4868 storing the result in r0. */
4871 gen_add3_insn (rtx r0, rtx r1, rtx c)
4873 int icode = (int) optab_handler (add_optab, GET_MODE (r0))->insn_code;
4875 if (icode == CODE_FOR_nothing
4876 || !(insn_data[icode].operand[0].predicate
4877 (r0, insn_data[icode].operand[0].mode))
4878 || !(insn_data[icode].operand[1].predicate
4879 (r1, insn_data[icode].operand[1].mode))
4880 || !(insn_data[icode].operand[2].predicate
4881 (c, insn_data[icode].operand[2].mode)))
4884 return GEN_FCN (icode) (r0, r1, c);
4888 have_add2_insn (rtx x, rtx y)
4892 gcc_assert (GET_MODE (x) != VOIDmode);
4894 icode = (int) optab_handler (add_optab, GET_MODE (x))->insn_code;
4896 if (icode == CODE_FOR_nothing)
4899 if (!(insn_data[icode].operand[0].predicate
4900 (x, insn_data[icode].operand[0].mode))
4901 || !(insn_data[icode].operand[1].predicate
4902 (x, insn_data[icode].operand[1].mode))
4903 || !(insn_data[icode].operand[2].predicate
4904 (y, insn_data[icode].operand[2].mode)))
4910 /* Generate and return an insn body to subtract Y from X. */
4913 gen_sub2_insn (rtx x, rtx y)
4915 int icode = (int) optab_handler (sub_optab, GET_MODE (x))->insn_code;
4917 gcc_assert (insn_data[icode].operand[0].predicate
4918 (x, insn_data[icode].operand[0].mode));
4919 gcc_assert (insn_data[icode].operand[1].predicate
4920 (x, insn_data[icode].operand[1].mode));
4921 gcc_assert (insn_data[icode].operand[2].predicate
4922 (y, insn_data[icode].operand[2].mode));
4924 return GEN_FCN (icode) (x, x, y);
4927 /* Generate and return an insn body to subtract r1 and c,
4928 storing the result in r0. */
4931 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4933 int icode = (int) optab_handler (sub_optab, GET_MODE (r0))->insn_code;
4935 if (icode == CODE_FOR_nothing
4936 || !(insn_data[icode].operand[0].predicate
4937 (r0, insn_data[icode].operand[0].mode))
4938 || !(insn_data[icode].operand[1].predicate
4939 (r1, insn_data[icode].operand[1].mode))
4940 || !(insn_data[icode].operand[2].predicate
4941 (c, insn_data[icode].operand[2].mode)))
4944 return GEN_FCN (icode) (r0, r1, c);
4948 have_sub2_insn (rtx x, rtx y)
4952 gcc_assert (GET_MODE (x) != VOIDmode);
4954 icode = (int) optab_handler (sub_optab, GET_MODE (x))->insn_code;
4956 if (icode == CODE_FOR_nothing)
4959 if (!(insn_data[icode].operand[0].predicate
4960 (x, insn_data[icode].operand[0].mode))
4961 || !(insn_data[icode].operand[1].predicate
4962 (x, insn_data[icode].operand[1].mode))
4963 || !(insn_data[icode].operand[2].predicate
4964 (y, insn_data[icode].operand[2].mode)))
4970 /* Generate the body of an instruction to copy Y into X.
4971 It may be a list of insns, if one insn isn't enough. */
4974 gen_move_insn (rtx x, rtx y)
4979 emit_move_insn_1 (x, y);
4985 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4986 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4987 no such operation exists, CODE_FOR_nothing will be returned. */
4990 can_extend_p (enum machine_mode to_mode, enum machine_mode from_mode,
4994 #ifdef HAVE_ptr_extend
4996 return CODE_FOR_ptr_extend;
4999 tab = unsignedp ? zext_optab : sext_optab;
5000 return convert_optab_handler (tab, to_mode, from_mode)->insn_code;
5003 /* Generate the body of an insn to extend Y (with mode MFROM)
5004 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
5007 gen_extend_insn (rtx x, rtx y, enum machine_mode mto,
5008 enum machine_mode mfrom, int unsignedp)
5010 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
5011 return GEN_FCN (icode) (x, y);
5014 /* can_fix_p and can_float_p say whether the target machine
5015 can directly convert a given fixed point type to
5016 a given floating point type, or vice versa.
5017 The returned value is the CODE_FOR_... value to use,
5018 or CODE_FOR_nothing if these modes cannot be directly converted.
5020 *TRUNCP_PTR is set to 1 if it is necessary to output
5021 an explicit FTRUNC insn before the fix insn; otherwise 0. */
5023 static enum insn_code
5024 can_fix_p (enum machine_mode fixmode, enum machine_mode fltmode,
5025 int unsignedp, int *truncp_ptr)
5028 enum insn_code icode;
5030 tab = unsignedp ? ufixtrunc_optab : sfixtrunc_optab;
5031 icode = convert_optab_handler (tab, fixmode, fltmode)->insn_code;
5032 if (icode != CODE_FOR_nothing)
5038 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
5039 for this to work. We need to rework the fix* and ftrunc* patterns
5040 and documentation. */
5041 tab = unsignedp ? ufix_optab : sfix_optab;
5042 icode = convert_optab_handler (tab, fixmode, fltmode)->insn_code;
5043 if (icode != CODE_FOR_nothing
5044 && optab_handler (ftrunc_optab, fltmode)->insn_code != CODE_FOR_nothing)
5051 return CODE_FOR_nothing;
5054 static enum insn_code
5055 can_float_p (enum machine_mode fltmode, enum machine_mode fixmode,
5060 tab = unsignedp ? ufloat_optab : sfloat_optab;
5061 return convert_optab_handler (tab, fltmode, fixmode)->insn_code;
5064 /* Generate code to convert FROM to floating point
5065 and store in TO. FROM must be fixed point and not VOIDmode.
5066 UNSIGNEDP nonzero means regard FROM as unsigned.
5067 Normally this is done by correcting the final value
5068 if it is negative. */
5071 expand_float (rtx to, rtx from, int unsignedp)
5073 enum insn_code icode;
5075 enum machine_mode fmode, imode;
5076 bool can_do_signed = false;
5078 /* Crash now, because we won't be able to decide which mode to use. */
5079 gcc_assert (GET_MODE (from) != VOIDmode);
5081 /* Look for an insn to do the conversion. Do it in the specified
5082 modes if possible; otherwise convert either input, output or both to
5083 wider mode. If the integer mode is wider than the mode of FROM,
5084 we can do the conversion signed even if the input is unsigned. */
5086 for (fmode = GET_MODE (to); fmode != VOIDmode;
5087 fmode = GET_MODE_WIDER_MODE (fmode))
5088 for (imode = GET_MODE (from); imode != VOIDmode;
5089 imode = GET_MODE_WIDER_MODE (imode))
5091 int doing_unsigned = unsignedp;
5093 if (fmode != GET_MODE (to)
5094 && significand_size (fmode) < GET_MODE_BITSIZE (GET_MODE (from)))
5097 icode = can_float_p (fmode, imode, unsignedp);
5098 if (icode == CODE_FOR_nothing && unsignedp)
5100 enum insn_code scode = can_float_p (fmode, imode, 0);
5101 if (scode != CODE_FOR_nothing)
5102 can_do_signed = true;
5103 if (imode != GET_MODE (from))
5104 icode = scode, doing_unsigned = 0;
5107 if (icode != CODE_FOR_nothing)
5109 if (imode != GET_MODE (from))
5110 from = convert_to_mode (imode, from, unsignedp);
5112 if (fmode != GET_MODE (to))
5113 target = gen_reg_rtx (fmode);
5115 emit_unop_insn (icode, target, from,
5116 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
5119 convert_move (to, target, 0);
5124 /* Unsigned integer, and no way to convert directly. Convert as signed,
5125 then unconditionally adjust the result. For decimal float values we
5126 do this only if we have already determined that a signed conversion
5127 provides sufficient accuracy. */
5128 if (unsignedp && (can_do_signed || !DECIMAL_FLOAT_MODE_P (GET_MODE (to))))
5130 rtx label = gen_label_rtx ();
5132 REAL_VALUE_TYPE offset;
5134 /* Look for a usable floating mode FMODE wider than the source and at
5135 least as wide as the target. Using FMODE will avoid rounding woes
5136 with unsigned values greater than the signed maximum value. */
5138 for (fmode = GET_MODE (to); fmode != VOIDmode;
5139 fmode = GET_MODE_WIDER_MODE (fmode))
5140 if (GET_MODE_BITSIZE (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
5141 && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
5144 if (fmode == VOIDmode)
5146 /* There is no such mode. Pretend the target is wide enough. */
5147 fmode = GET_MODE (to);
5149 /* Avoid double-rounding when TO is narrower than FROM. */
5150 if ((significand_size (fmode) + 1)
5151 < GET_MODE_BITSIZE (GET_MODE (from)))
5154 rtx neglabel = gen_label_rtx ();
5156 /* Don't use TARGET if it isn't a register, is a hard register,
5157 or is the wrong mode. */
5159 || REGNO (target) < FIRST_PSEUDO_REGISTER
5160 || GET_MODE (target) != fmode)
5161 target = gen_reg_rtx (fmode);
5163 imode = GET_MODE (from);
5164 do_pending_stack_adjust ();
5166 /* Test whether the sign bit is set. */
5167 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
5170 /* The sign bit is not set. Convert as signed. */
5171 expand_float (target, from, 0);
5172 emit_jump_insn (gen_jump (label));
5175 /* The sign bit is set.
5176 Convert to a usable (positive signed) value by shifting right
5177 one bit, while remembering if a nonzero bit was shifted
5178 out; i.e., compute (from & 1) | (from >> 1). */
5180 emit_label (neglabel);
5181 temp = expand_binop (imode, and_optab, from, const1_rtx,
5182 NULL_RTX, 1, OPTAB_LIB_WIDEN);
5183 temp1 = expand_shift (RSHIFT_EXPR, imode, from, integer_one_node,
5185 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
5187 expand_float (target, temp, 0);
5189 /* Multiply by 2 to undo the shift above. */
5190 temp = expand_binop (fmode, add_optab, target, target,
5191 target, 0, OPTAB_LIB_WIDEN);
5193 emit_move_insn (target, temp);
5195 do_pending_stack_adjust ();
5201 /* If we are about to do some arithmetic to correct for an
5202 unsigned operand, do it in a pseudo-register. */
5204 if (GET_MODE (to) != fmode
5205 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
5206 target = gen_reg_rtx (fmode);
5208 /* Convert as signed integer to floating. */
5209 expand_float (target, from, 0);
5211 /* If FROM is negative (and therefore TO is negative),
5212 correct its value by 2**bitwidth. */
5214 do_pending_stack_adjust ();
5215 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from),
5219 real_2expN (&offset, GET_MODE_BITSIZE (GET_MODE (from)), fmode);
5220 temp = expand_binop (fmode, add_optab, target,
5221 CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode),
5222 target, 0, OPTAB_LIB_WIDEN);
5224 emit_move_insn (target, temp);
5226 do_pending_stack_adjust ();
5231 /* No hardware instruction available; call a library routine. */
5236 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
5238 if (GET_MODE_SIZE (GET_MODE (from)) < GET_MODE_SIZE (SImode))
5239 from = convert_to_mode (SImode, from, unsignedp);
5241 libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
5242 gcc_assert (libfunc);
5246 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
5247 GET_MODE (to), 1, from,
5249 insns = get_insns ();
5252 emit_libcall_block (insns, target, value,
5253 gen_rtx_FLOAT (GET_MODE (to), from));
5258 /* Copy result to requested destination
5259 if we have been computing in a temp location. */
5263 if (GET_MODE (target) == GET_MODE (to))
5264 emit_move_insn (to, target);
5266 convert_move (to, target, 0);
5270 /* Generate code to convert FROM to fixed point and store in TO. FROM
5271 must be floating point. */
5274 expand_fix (rtx to, rtx from, int unsignedp)
5276 enum insn_code icode;
5278 enum machine_mode fmode, imode;
5281 /* We first try to find a pair of modes, one real and one integer, at
5282 least as wide as FROM and TO, respectively, in which we can open-code
5283 this conversion. If the integer mode is wider than the mode of TO,
5284 we can do the conversion either signed or unsigned. */
5286 for (fmode = GET_MODE (from); fmode != VOIDmode;
5287 fmode = GET_MODE_WIDER_MODE (fmode))
5288 for (imode = GET_MODE (to); imode != VOIDmode;
5289 imode = GET_MODE_WIDER_MODE (imode))
5291 int doing_unsigned = unsignedp;
5293 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
5294 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
5295 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
5297 if (icode != CODE_FOR_nothing)
5299 if (fmode != GET_MODE (from))
5300 from = convert_to_mode (fmode, from, 0);
5304 rtx temp = gen_reg_rtx (GET_MODE (from));
5305 from = expand_unop (GET_MODE (from), ftrunc_optab, from,
5309 if (imode != GET_MODE (to))
5310 target = gen_reg_rtx (imode);
5312 emit_unop_insn (icode, target, from,
5313 doing_unsigned ? UNSIGNED_FIX : FIX);
5315 convert_move (to, target, unsignedp);
5320 /* For an unsigned conversion, there is one more way to do it.
5321 If we have a signed conversion, we generate code that compares
5322 the real value to the largest representable positive number. If if
5323 is smaller, the conversion is done normally. Otherwise, subtract
5324 one plus the highest signed number, convert, and add it back.
5326 We only need to check all real modes, since we know we didn't find
5327 anything with a wider integer mode.
5329 This code used to extend FP value into mode wider than the destination.
5330 This is needed for decimal float modes which cannot accurately
5331 represent one plus the highest signed number of the same size, but
5332 not for binary modes. Consider, for instance conversion from SFmode
5335 The hot path through the code is dealing with inputs smaller than 2^63
5336 and doing just the conversion, so there is no bits to lose.
5338 In the other path we know the value is positive in the range 2^63..2^64-1
5339 inclusive. (as for other input overflow happens and result is undefined)
5340 So we know that the most important bit set in mantissa corresponds to
5341 2^63. The subtraction of 2^63 should not generate any rounding as it
5342 simply clears out that bit. The rest is trivial. */
5344 if (unsignedp && GET_MODE_BITSIZE (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
5345 for (fmode = GET_MODE (from); fmode != VOIDmode;
5346 fmode = GET_MODE_WIDER_MODE (fmode))
5347 if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0, &must_trunc)
5348 && (!DECIMAL_FLOAT_MODE_P (fmode)
5349 || GET_MODE_BITSIZE (fmode) > GET_MODE_BITSIZE (GET_MODE (to))))
5352 REAL_VALUE_TYPE offset;
5353 rtx limit, lab1, lab2, insn;
5355 bitsize = GET_MODE_BITSIZE (GET_MODE (to));
5356 real_2expN (&offset, bitsize - 1, fmode);
5357 limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode);
5358 lab1 = gen_label_rtx ();
5359 lab2 = gen_label_rtx ();
5361 if (fmode != GET_MODE (from))
5362 from = convert_to_mode (fmode, from, 0);
5364 /* See if we need to do the subtraction. */
5365 do_pending_stack_adjust ();
5366 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from),
5369 /* If not, do the signed "fix" and branch around fixup code. */
5370 expand_fix (to, from, 0);
5371 emit_jump_insn (gen_jump (lab2));
5374 /* Otherwise, subtract 2**(N-1), convert to signed number,
5375 then add 2**(N-1). Do the addition using XOR since this
5376 will often generate better code. */
5378 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
5379 NULL_RTX, 0, OPTAB_LIB_WIDEN);
5380 expand_fix (to, target, 0);
5381 target = expand_binop (GET_MODE (to), xor_optab, to,
5383 ((HOST_WIDE_INT) 1 << (bitsize - 1),
5385 to, 1, OPTAB_LIB_WIDEN);
5388 emit_move_insn (to, target);
5392 if (optab_handler (mov_optab, GET_MODE (to))->insn_code
5393 != CODE_FOR_nothing)
5395 /* Make a place for a REG_NOTE and add it. */
5396 insn = emit_move_insn (to, to);
5397 set_unique_reg_note (insn,
5399 gen_rtx_fmt_e (UNSIGNED_FIX,
5407 /* We can't do it with an insn, so use a library call. But first ensure
5408 that the mode of TO is at least as wide as SImode, since those are the
5409 only library calls we know about. */
5411 if (GET_MODE_SIZE (GET_MODE (to)) < GET_MODE_SIZE (SImode))
5413 target = gen_reg_rtx (SImode);
5415 expand_fix (target, from, unsignedp);
5423 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
5424 libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
5425 gcc_assert (libfunc);
5429 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
5430 GET_MODE (to), 1, from,
5432 insns = get_insns ();
5435 emit_libcall_block (insns, target, value,
5436 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
5437 GET_MODE (to), from));
5442 if (GET_MODE (to) == GET_MODE (target))
5443 emit_move_insn (to, target);
5445 convert_move (to, target, 0);
5449 /* Generate code to convert FROM to fixed point and store in TO. FROM
5450 must be floating point, TO must be signed. Use the conversion optab
5451 TAB to do the conversion. */
5454 expand_sfix_optab (rtx to, rtx from, convert_optab tab)
5456 enum insn_code icode;
5458 enum machine_mode fmode, imode;
5460 /* We first try to find a pair of modes, one real and one integer, at
5461 least as wide as FROM and TO, respectively, in which we can open-code
5462 this conversion. If the integer mode is wider than the mode of TO,
5463 we can do the conversion either signed or unsigned. */
5465 for (fmode = GET_MODE (from); fmode != VOIDmode;
5466 fmode = GET_MODE_WIDER_MODE (fmode))
5467 for (imode = GET_MODE (to); imode != VOIDmode;
5468 imode = GET_MODE_WIDER_MODE (imode))
5470 icode = convert_optab_handler (tab, imode, fmode)->insn_code;
5471 if (icode != CODE_FOR_nothing)
5473 if (fmode != GET_MODE (from))
5474 from = convert_to_mode (fmode, from, 0);
5476 if (imode != GET_MODE (to))
5477 target = gen_reg_rtx (imode);
5479 emit_unop_insn (icode, target, from, UNKNOWN);
5481 convert_move (to, target, 0);
5489 /* Report whether we have an instruction to perform the operation
5490 specified by CODE on operands of mode MODE. */
5492 have_insn_for (enum rtx_code code, enum machine_mode mode)
5494 return (code_to_optab[(int) code] != 0
5495 && (optab_handler (code_to_optab[(int) code], mode)->insn_code
5496 != CODE_FOR_nothing));
5499 /* Create a blank optab. */
5504 optab op = xcalloc (sizeof (struct optab), 1);
5506 for (i = 0; i < NUM_MACHINE_MODES; i++)
5507 optab_handler (op, i)->insn_code = CODE_FOR_nothing;
5512 static convert_optab
5513 new_convert_optab (void)
5516 convert_optab op = xcalloc (sizeof (struct convert_optab), 1);
5518 for (i = 0; i < NUM_MACHINE_MODES; i++)
5519 for (j = 0; j < NUM_MACHINE_MODES; j++)
5520 convert_optab_handler (op, i, j)->insn_code = CODE_FOR_nothing;
5525 /* Same, but fill in its code as CODE, and write it into the
5526 code_to_optab table. */
5528 init_optab (enum rtx_code code)
5530 optab op = new_optab ();
5532 code_to_optab[(int) code] = op;
5536 /* Same, but fill in its code as CODE, and do _not_ write it into
5537 the code_to_optab table. */
5539 init_optabv (enum rtx_code code)
5541 optab op = new_optab ();
5546 /* Conversion optabs never go in the code_to_optab table. */
5547 static inline convert_optab
5548 init_convert_optab (enum rtx_code code)
5550 convert_optab op = new_convert_optab ();
5555 /* Initialize the libfunc fields of an entire group of entries in some
5556 optab. Each entry is set equal to a string consisting of a leading
5557 pair of underscores followed by a generic operation name followed by
5558 a mode name (downshifted to lowercase) followed by a single character
5559 representing the number of operands for the given operation (which is
5560 usually one of the characters '2', '3', or '4').
5562 OPTABLE is the table in which libfunc fields are to be initialized.
5563 OPNAME is the generic (string) name of the operation.
5564 SUFFIX is the character which specifies the number of operands for
5565 the given generic operation.
5566 MODE is the mode to generate for.
5570 gen_libfunc (optab optable, const char *opname, int suffix, enum machine_mode mode)
5572 unsigned opname_len = strlen (opname);
5573 const char *mname = GET_MODE_NAME (mode);
5574 unsigned mname_len = strlen (mname);
5575 char *libfunc_name = alloca (2 + opname_len + mname_len + 1 + 1);
5582 for (q = opname; *q; )
5584 for (q = mname; *q; q++)
5585 *p++ = TOLOWER (*q);
5589 set_optab_libfunc (optable, mode,
5590 ggc_alloc_string (libfunc_name, p - libfunc_name));
5593 /* Like gen_libfunc, but verify that integer operation is involved. */
5596 gen_int_libfunc (optab optable, const char *opname, char suffix,
5597 enum machine_mode mode)
5599 int maxsize = 2 * BITS_PER_WORD;
5601 if (GET_MODE_CLASS (mode) != MODE_INT)
5603 if (maxsize < LONG_LONG_TYPE_SIZE)
5604 maxsize = LONG_LONG_TYPE_SIZE;
5605 if (GET_MODE_CLASS (mode) != MODE_INT
5606 || mode < word_mode || GET_MODE_BITSIZE (mode) > maxsize)
5608 gen_libfunc (optable, opname, suffix, mode);
5611 /* Like gen_libfunc, but verify that FP and set decimal prefix if needed. */
5614 gen_fp_libfunc (optab optable, const char *opname, char suffix,
5615 enum machine_mode mode)
5619 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5620 gen_libfunc (optable, opname, suffix, mode);
5621 if (DECIMAL_FLOAT_MODE_P (mode))
5623 dec_opname = alloca (sizeof (DECIMAL_PREFIX) + strlen (opname));
5624 /* For BID support, change the name to have either a bid_ or dpd_ prefix
5625 depending on the low level floating format used. */
5626 memcpy (dec_opname, DECIMAL_PREFIX, sizeof (DECIMAL_PREFIX) - 1);
5627 strcpy (dec_opname + sizeof (DECIMAL_PREFIX) - 1, opname);
5628 gen_libfunc (optable, dec_opname, suffix, mode);
5632 /* Like gen_libfunc, but verify that FP or INT operation is involved. */
5635 gen_int_fp_libfunc (optab optable, const char *name, char suffix,
5636 enum machine_mode mode)
5638 if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5639 gen_fp_libfunc (optable, name, suffix, mode);
5640 if (INTEGRAL_MODE_P (mode))
5641 gen_int_libfunc (optable, name, suffix, mode);
5644 /* Like gen_libfunc, but verify that FP or INT operation is involved
5645 and add 'v' suffix for integer operation. */
5648 gen_intv_fp_libfunc (optab optable, const char *name, char suffix,
5649 enum machine_mode mode)
5651 if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5652 gen_fp_libfunc (optable, name, suffix, mode);
5653 if (GET_MODE_CLASS (mode) == MODE_INT)
5655 int len = strlen (name);
5656 char *v_name = alloca (len + 2);
5657 strcpy (v_name, name);
5659 v_name[len + 1] = 0;
5660 gen_int_libfunc (optable, v_name, suffix, mode);
5664 /* Initialize the libfunc fields of an entire group of entries of an
5665 inter-mode-class conversion optab. The string formation rules are
5666 similar to the ones for init_libfuncs, above, but instead of having
5667 a mode name and an operand count these functions have two mode names
5668 and no operand count. */
5671 gen_interclass_conv_libfunc (convert_optab tab,
5673 enum machine_mode tmode,
5674 enum machine_mode fmode)
5676 size_t opname_len = strlen (opname);
5677 size_t mname_len = 0;
5679 const char *fname, *tname;
5681 char *libfunc_name, *suffix;
5682 char *nondec_name, *dec_name, *nondec_suffix, *dec_suffix;
5685 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5686 depends on which underlying decimal floating point format is used. */
5687 const size_t dec_len = sizeof (DECIMAL_PREFIX) - 1;
5689 mname_len = strlen (GET_MODE_NAME (tmode)) + strlen (GET_MODE_NAME (fmode));
5691 nondec_name = alloca (2 + opname_len + mname_len + 1 + 1);
5692 nondec_name[0] = '_';
5693 nondec_name[1] = '_';
5694 memcpy (&nondec_name[2], opname, opname_len);
5695 nondec_suffix = nondec_name + opname_len + 2;
5697 dec_name = alloca (2 + dec_len + opname_len + mname_len + 1 + 1);
5700 memcpy (&dec_name[2], DECIMAL_PREFIX, dec_len);
5701 memcpy (&dec_name[2+dec_len], opname, opname_len);
5702 dec_suffix = dec_name + dec_len + opname_len + 2;
5704 fname = GET_MODE_NAME (fmode);
5705 tname = GET_MODE_NAME (tmode);
5707 if (DECIMAL_FLOAT_MODE_P(fmode) || DECIMAL_FLOAT_MODE_P(tmode))
5709 libfunc_name = dec_name;
5710 suffix = dec_suffix;
5714 libfunc_name = nondec_name;
5715 suffix = nondec_suffix;
5719 for (q = fname; *q; p++, q++)
5721 for (q = tname; *q; p++, q++)
5726 set_conv_libfunc (tab, tmode, fmode,
5727 ggc_alloc_string (libfunc_name, p - libfunc_name));
5730 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5731 int->fp conversion. */
5734 gen_int_to_fp_conv_libfunc (convert_optab tab,
5736 enum machine_mode tmode,
5737 enum machine_mode fmode)
5739 if (GET_MODE_CLASS (fmode) != MODE_INT)
5741 if (GET_MODE_CLASS (tmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (tmode))
5743 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5746 /* ufloat_optab is special by using floatun for FP and floatuns decimal fp
5750 gen_ufloat_conv_libfunc (convert_optab tab,
5751 const char *opname ATTRIBUTE_UNUSED,
5752 enum machine_mode tmode,
5753 enum machine_mode fmode)
5755 if (DECIMAL_FLOAT_MODE_P (tmode))
5756 gen_int_to_fp_conv_libfunc (tab, "floatuns", tmode, fmode);
5758 gen_int_to_fp_conv_libfunc (tab, "floatun", tmode, fmode);
5761 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5762 fp->int conversion. */
5765 gen_int_to_fp_nondecimal_conv_libfunc (convert_optab tab,
5767 enum machine_mode tmode,
5768 enum machine_mode fmode)
5770 if (GET_MODE_CLASS (fmode) != MODE_INT)
5772 if (GET_MODE_CLASS (tmode) != MODE_FLOAT)
5774 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5777 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5778 fp->int conversion with no decimal floating point involved. */
5781 gen_fp_to_int_conv_libfunc (convert_optab tab,
5783 enum machine_mode tmode,
5784 enum machine_mode fmode)
5786 if (GET_MODE_CLASS (fmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (fmode))
5788 if (GET_MODE_CLASS (tmode) != MODE_INT)
5790 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5793 /* Initialize the libfunc fiels of an of an intra-mode-class conversion optab.
5794 The string formation rules are
5795 similar to the ones for init_libfunc, above. */
5798 gen_intraclass_conv_libfunc (convert_optab tab, const char *opname,
5799 enum machine_mode tmode, enum machine_mode fmode)
5801 size_t opname_len = strlen (opname);
5802 size_t mname_len = 0;
5804 const char *fname, *tname;
5806 char *nondec_name, *dec_name, *nondec_suffix, *dec_suffix;
5807 char *libfunc_name, *suffix;
5810 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5811 depends on which underlying decimal floating point format is used. */
5812 const size_t dec_len = sizeof (DECIMAL_PREFIX) - 1;
5814 mname_len = strlen (GET_MODE_NAME (tmode)) + strlen (GET_MODE_NAME (fmode));
5816 nondec_name = alloca (2 + opname_len + mname_len + 1 + 1);
5817 nondec_name[0] = '_';
5818 nondec_name[1] = '_';
5819 memcpy (&nondec_name[2], opname, opname_len);
5820 nondec_suffix = nondec_name + opname_len + 2;
5822 dec_name = alloca (2 + dec_len + opname_len + mname_len + 1 + 1);
5825 memcpy (&dec_name[2], DECIMAL_PREFIX, dec_len);
5826 memcpy (&dec_name[2 + dec_len], opname, opname_len);
5827 dec_suffix = dec_name + dec_len + opname_len + 2;
5829 fname = GET_MODE_NAME (fmode);
5830 tname = GET_MODE_NAME (tmode);
5832 if (DECIMAL_FLOAT_MODE_P(fmode) || DECIMAL_FLOAT_MODE_P(tmode))
5834 libfunc_name = dec_name;
5835 suffix = dec_suffix;
5839 libfunc_name = nondec_name;
5840 suffix = nondec_suffix;
5844 for (q = fname; *q; p++, q++)
5846 for (q = tname; *q; p++, q++)
5852 set_conv_libfunc (tab, tmode, fmode,
5853 ggc_alloc_string (libfunc_name, p - libfunc_name));
5856 /* Pick proper libcall for trunc_optab. We need to chose if we do
5857 truncation or extension and interclass or intraclass. */
5860 gen_trunc_conv_libfunc (convert_optab tab,
5862 enum machine_mode tmode,
5863 enum machine_mode fmode)
5865 if (GET_MODE_CLASS (tmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (tmode))
5867 if (GET_MODE_CLASS (fmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (fmode))
5872 if ((GET_MODE_CLASS (tmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (fmode))
5873 || (GET_MODE_CLASS (fmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (tmode)))
5874 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5876 if (GET_MODE_PRECISION (fmode) <= GET_MODE_PRECISION (tmode))
5879 if ((GET_MODE_CLASS (tmode) == MODE_FLOAT
5880 && GET_MODE_CLASS (fmode) == MODE_FLOAT)
5881 || (DECIMAL_FLOAT_MODE_P (fmode) && DECIMAL_FLOAT_MODE_P (tmode)))
5882 gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
5885 /* Pick proper libcall for extend_optab. We need to chose if we do
5886 truncation or extension and interclass or intraclass. */
5889 gen_extend_conv_libfunc (convert_optab tab,
5890 const char *opname ATTRIBUTE_UNUSED,
5891 enum machine_mode tmode,
5892 enum machine_mode fmode)
5894 if (GET_MODE_CLASS (tmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (tmode))
5896 if (GET_MODE_CLASS (fmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (fmode))
5901 if ((GET_MODE_CLASS (tmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (fmode))
5902 || (GET_MODE_CLASS (fmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (tmode)))
5903 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5905 if (GET_MODE_PRECISION (fmode) > GET_MODE_PRECISION (tmode))
5908 if ((GET_MODE_CLASS (tmode) == MODE_FLOAT
5909 && GET_MODE_CLASS (fmode) == MODE_FLOAT)
5910 || (DECIMAL_FLOAT_MODE_P (fmode) && DECIMAL_FLOAT_MODE_P (tmode)))
5911 gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
5915 init_one_libfunc (const char *name)
5919 /* Create a FUNCTION_DECL that can be passed to
5920 targetm.encode_section_info. */
5921 /* ??? We don't have any type information except for this is
5922 a function. Pretend this is "int foo()". */
5923 tree decl = build_decl (FUNCTION_DECL, get_identifier (name),
5924 build_function_type (integer_type_node, NULL_TREE));
5925 DECL_ARTIFICIAL (decl) = 1;
5926 DECL_EXTERNAL (decl) = 1;
5927 TREE_PUBLIC (decl) = 1;
5929 symbol = XEXP (DECL_RTL (decl), 0);
5931 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
5932 are the flags assigned by targetm.encode_section_info. */
5933 SET_SYMBOL_REF_DECL (symbol, 0);
5938 /* Call this to reset the function entry for one optab (OPTABLE) in mode
5939 MODE to NAME, which should be either 0 or a string constant. */
5941 set_optab_libfunc (optab optable, enum machine_mode mode, const char *name)
5944 struct libfunc_entry e;
5945 struct libfunc_entry **slot;
5946 e.optab = (size_t) (optab_table[0] - optable);
5951 val = init_one_libfunc (name);
5954 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, INSERT);
5956 *slot = ggc_alloc (sizeof (struct libfunc_entry));
5957 (*slot)->optab = (size_t) (optab_table[0] - optable);
5958 (*slot)->mode1 = mode;
5959 (*slot)->mode2 = VOIDmode;
5960 (*slot)->libfunc = val;
5963 /* Call this to reset the function entry for one conversion optab
5964 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
5965 either 0 or a string constant. */
5967 set_conv_libfunc (convert_optab optable, enum machine_mode tmode,
5968 enum machine_mode fmode, const char *name)
5971 struct libfunc_entry e;
5972 struct libfunc_entry **slot;
5973 e.optab = (size_t) (convert_optab_table[0] - optable);
5978 val = init_one_libfunc (name);
5981 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, INSERT);
5983 *slot = ggc_alloc (sizeof (struct libfunc_entry));
5984 (*slot)->optab = (size_t) (convert_optab_table[0] - optable);
5985 (*slot)->mode1 = tmode;
5986 (*slot)->mode2 = fmode;
5987 (*slot)->libfunc = val;
5990 /* Call this to initialize the contents of the optabs
5991 appropriately for the current target machine. */
5997 enum machine_mode int_mode;
5999 libfunc_hash = htab_create_ggc (10, hash_libfunc, eq_libfunc, NULL);
6000 /* Start by initializing all tables to contain CODE_FOR_nothing. */
6002 for (i = 0; i < NUM_RTX_CODE; i++)
6003 setcc_gen_code[i] = CODE_FOR_nothing;
6005 #ifdef HAVE_conditional_move
6006 for (i = 0; i < NUM_MACHINE_MODES; i++)
6007 movcc_gen_code[i] = CODE_FOR_nothing;
6010 for (i = 0; i < NUM_MACHINE_MODES; i++)
6012 vcond_gen_code[i] = CODE_FOR_nothing;
6013 vcondu_gen_code[i] = CODE_FOR_nothing;
6016 add_optab = init_optab (PLUS);
6017 addv_optab = init_optabv (PLUS);
6018 sub_optab = init_optab (MINUS);
6019 subv_optab = init_optabv (MINUS);
6020 smul_optab = init_optab (MULT);
6021 smulv_optab = init_optabv (MULT);
6022 smul_highpart_optab = init_optab (UNKNOWN);
6023 umul_highpart_optab = init_optab (UNKNOWN);
6024 smul_widen_optab = init_optab (UNKNOWN);
6025 umul_widen_optab = init_optab (UNKNOWN);
6026 usmul_widen_optab = init_optab (UNKNOWN);
6027 smadd_widen_optab = init_optab (UNKNOWN);
6028 umadd_widen_optab = init_optab (UNKNOWN);
6029 smsub_widen_optab = init_optab (UNKNOWN);
6030 umsub_widen_optab = init_optab (UNKNOWN);
6031 sdiv_optab = init_optab (DIV);
6032 sdivv_optab = init_optabv (DIV);
6033 sdivmod_optab = init_optab (UNKNOWN);
6034 udiv_optab = init_optab (UDIV);
6035 udivmod_optab = init_optab (UNKNOWN);
6036 smod_optab = init_optab (MOD);
6037 umod_optab = init_optab (UMOD);
6038 fmod_optab = init_optab (UNKNOWN);
6039 remainder_optab = init_optab (UNKNOWN);
6040 ftrunc_optab = init_optab (UNKNOWN);
6041 and_optab = init_optab (AND);
6042 ior_optab = init_optab (IOR);
6043 xor_optab = init_optab (XOR);
6044 ashl_optab = init_optab (ASHIFT);
6045 ashr_optab = init_optab (ASHIFTRT);
6046 lshr_optab = init_optab (LSHIFTRT);
6047 rotl_optab = init_optab (ROTATE);
6048 rotr_optab = init_optab (ROTATERT);
6049 smin_optab = init_optab (SMIN);
6050 smax_optab = init_optab (SMAX);
6051 umin_optab = init_optab (UMIN);
6052 umax_optab = init_optab (UMAX);
6053 pow_optab = init_optab (UNKNOWN);
6054 atan2_optab = init_optab (UNKNOWN);
6056 /* These three have codes assigned exclusively for the sake of
6058 mov_optab = init_optab (SET);
6059 movstrict_optab = init_optab (STRICT_LOW_PART);
6060 cmp_optab = init_optab (COMPARE);
6062 storent_optab = init_optab (UNKNOWN);
6064 ucmp_optab = init_optab (UNKNOWN);
6065 tst_optab = init_optab (UNKNOWN);
6067 eq_optab = init_optab (EQ);
6068 ne_optab = init_optab (NE);
6069 gt_optab = init_optab (GT);
6070 ge_optab = init_optab (GE);
6071 lt_optab = init_optab (LT);
6072 le_optab = init_optab (LE);
6073 unord_optab = init_optab (UNORDERED);
6075 neg_optab = init_optab (NEG);
6076 negv_optab = init_optabv (NEG);
6077 abs_optab = init_optab (ABS);
6078 absv_optab = init_optabv (ABS);
6079 addcc_optab = init_optab (UNKNOWN);
6080 one_cmpl_optab = init_optab (NOT);
6081 bswap_optab = init_optab (BSWAP);
6082 ffs_optab = init_optab (FFS);
6083 clz_optab = init_optab (CLZ);
6084 ctz_optab = init_optab (CTZ);
6085 popcount_optab = init_optab (POPCOUNT);
6086 parity_optab = init_optab (PARITY);
6087 sqrt_optab = init_optab (SQRT);
6088 floor_optab = init_optab (UNKNOWN);
6089 ceil_optab = init_optab (UNKNOWN);
6090 round_optab = init_optab (UNKNOWN);
6091 btrunc_optab = init_optab (UNKNOWN);
6092 nearbyint_optab = init_optab (UNKNOWN);
6093 rint_optab = init_optab (UNKNOWN);
6094 sincos_optab = init_optab (UNKNOWN);
6095 sin_optab = init_optab (UNKNOWN);
6096 asin_optab = init_optab (UNKNOWN);
6097 cos_optab = init_optab (UNKNOWN);
6098 acos_optab = init_optab (UNKNOWN);
6099 exp_optab = init_optab (UNKNOWN);
6100 exp10_optab = init_optab (UNKNOWN);
6101 exp2_optab = init_optab (UNKNOWN);
6102 expm1_optab = init_optab (UNKNOWN);
6103 ldexp_optab = init_optab (UNKNOWN);
6104 scalb_optab = init_optab (UNKNOWN);
6105 logb_optab = init_optab (UNKNOWN);
6106 ilogb_optab = init_optab (UNKNOWN);
6107 log_optab = init_optab (UNKNOWN);
6108 log10_optab = init_optab (UNKNOWN);
6109 log2_optab = init_optab (UNKNOWN);
6110 log1p_optab = init_optab (UNKNOWN);
6111 tan_optab = init_optab (UNKNOWN);
6112 atan_optab = init_optab (UNKNOWN);
6113 copysign_optab = init_optab (UNKNOWN);
6114 signbit_optab = init_optab (UNKNOWN);
6116 isinf_optab = init_optab (UNKNOWN);
6118 strlen_optab = init_optab (UNKNOWN);
6119 cbranch_optab = init_optab (UNKNOWN);
6120 cmov_optab = init_optab (UNKNOWN);
6121 cstore_optab = init_optab (UNKNOWN);
6122 push_optab = init_optab (UNKNOWN);
6124 reduc_smax_optab = init_optab (UNKNOWN);
6125 reduc_umax_optab = init_optab (UNKNOWN);
6126 reduc_smin_optab = init_optab (UNKNOWN);
6127 reduc_umin_optab = init_optab (UNKNOWN);
6128 reduc_splus_optab = init_optab (UNKNOWN);
6129 reduc_uplus_optab = init_optab (UNKNOWN);
6131 ssum_widen_optab = init_optab (UNKNOWN);
6132 usum_widen_optab = init_optab (UNKNOWN);
6133 sdot_prod_optab = init_optab (UNKNOWN);
6134 udot_prod_optab = init_optab (UNKNOWN);
6136 vec_extract_optab = init_optab (UNKNOWN);
6137 vec_extract_even_optab = init_optab (UNKNOWN);
6138 vec_extract_odd_optab = init_optab (UNKNOWN);
6139 vec_interleave_high_optab = init_optab (UNKNOWN);
6140 vec_interleave_low_optab = init_optab (UNKNOWN);
6141 vec_set_optab = init_optab (UNKNOWN);
6142 vec_init_optab = init_optab (UNKNOWN);
6143 vec_shl_optab = init_optab (UNKNOWN);
6144 vec_shr_optab = init_optab (UNKNOWN);
6145 vec_realign_load_optab = init_optab (UNKNOWN);
6146 movmisalign_optab = init_optab (UNKNOWN);
6147 vec_widen_umult_hi_optab = init_optab (UNKNOWN);
6148 vec_widen_umult_lo_optab = init_optab (UNKNOWN);
6149 vec_widen_smult_hi_optab = init_optab (UNKNOWN);
6150 vec_widen_smult_lo_optab = init_optab (UNKNOWN);
6151 vec_unpacks_hi_optab = init_optab (UNKNOWN);
6152 vec_unpacks_lo_optab = init_optab (UNKNOWN);
6153 vec_unpacku_hi_optab = init_optab (UNKNOWN);
6154 vec_unpacku_lo_optab = init_optab (UNKNOWN);
6155 vec_unpacks_float_hi_optab = init_optab (UNKNOWN);
6156 vec_unpacks_float_lo_optab = init_optab (UNKNOWN);
6157 vec_unpacku_float_hi_optab = init_optab (UNKNOWN);
6158 vec_unpacku_float_lo_optab = init_optab (UNKNOWN);
6159 vec_pack_trunc_optab = init_optab (UNKNOWN);
6160 vec_pack_usat_optab = init_optab (UNKNOWN);
6161 vec_pack_ssat_optab = init_optab (UNKNOWN);
6162 vec_pack_ufix_trunc_optab = init_optab (UNKNOWN);
6163 vec_pack_sfix_trunc_optab = init_optab (UNKNOWN);
6165 powi_optab = init_optab (UNKNOWN);
6168 sext_optab = init_convert_optab (SIGN_EXTEND);
6169 zext_optab = init_convert_optab (ZERO_EXTEND);
6170 trunc_optab = init_convert_optab (TRUNCATE);
6171 sfix_optab = init_convert_optab (FIX);
6172 ufix_optab = init_convert_optab (UNSIGNED_FIX);
6173 sfixtrunc_optab = init_convert_optab (UNKNOWN);
6174 ufixtrunc_optab = init_convert_optab (UNKNOWN);
6175 sfloat_optab = init_convert_optab (FLOAT);
6176 ufloat_optab = init_convert_optab (UNSIGNED_FLOAT);
6177 lrint_optab = init_convert_optab (UNKNOWN);
6178 lround_optab = init_convert_optab (UNKNOWN);
6179 lfloor_optab = init_convert_optab (UNKNOWN);
6180 lceil_optab = init_convert_optab (UNKNOWN);
6182 for (i = 0; i < NUM_MACHINE_MODES; i++)
6184 movmem_optab[i] = CODE_FOR_nothing;
6185 cmpstr_optab[i] = CODE_FOR_nothing;
6186 cmpstrn_optab[i] = CODE_FOR_nothing;
6187 cmpmem_optab[i] = CODE_FOR_nothing;
6188 setmem_optab[i] = CODE_FOR_nothing;
6190 sync_add_optab[i] = CODE_FOR_nothing;
6191 sync_sub_optab[i] = CODE_FOR_nothing;
6192 sync_ior_optab[i] = CODE_FOR_nothing;
6193 sync_and_optab[i] = CODE_FOR_nothing;
6194 sync_xor_optab[i] = CODE_FOR_nothing;
6195 sync_nand_optab[i] = CODE_FOR_nothing;
6196 sync_old_add_optab[i] = CODE_FOR_nothing;
6197 sync_old_sub_optab[i] = CODE_FOR_nothing;
6198 sync_old_ior_optab[i] = CODE_FOR_nothing;
6199 sync_old_and_optab[i] = CODE_FOR_nothing;
6200 sync_old_xor_optab[i] = CODE_FOR_nothing;
6201 sync_old_nand_optab[i] = CODE_FOR_nothing;
6202 sync_new_add_optab[i] = CODE_FOR_nothing;
6203 sync_new_sub_optab[i] = CODE_FOR_nothing;
6204 sync_new_ior_optab[i] = CODE_FOR_nothing;
6205 sync_new_and_optab[i] = CODE_FOR_nothing;
6206 sync_new_xor_optab[i] = CODE_FOR_nothing;
6207 sync_new_nand_optab[i] = CODE_FOR_nothing;
6208 sync_compare_and_swap[i] = CODE_FOR_nothing;
6209 sync_compare_and_swap_cc[i] = CODE_FOR_nothing;
6210 sync_lock_test_and_set[i] = CODE_FOR_nothing;
6211 sync_lock_release[i] = CODE_FOR_nothing;
6213 reload_in_optab[i] = reload_out_optab[i] = CODE_FOR_nothing;
6216 /* Fill in the optabs with the insns we support. */
6219 /* Initialize the optabs with the names of the library functions. */
6220 add_optab->libcall_basename = "add";
6221 add_optab->libcall_suffix = '3';
6222 add_optab->libcall_gen = gen_int_fp_libfunc;
6223 addv_optab->libcall_basename = "add";
6224 addv_optab->libcall_suffix = '3';
6225 addv_optab->libcall_gen = gen_intv_fp_libfunc;
6226 sub_optab->libcall_basename = "sub";
6227 sub_optab->libcall_suffix = '3';
6228 sub_optab->libcall_gen = gen_int_fp_libfunc;
6229 subv_optab->libcall_basename = "sub";
6230 subv_optab->libcall_suffix = '3';
6231 subv_optab->libcall_gen = gen_intv_fp_libfunc;
6232 smul_optab->libcall_basename = "mul";
6233 smul_optab->libcall_suffix = '3';
6234 smul_optab->libcall_gen = gen_int_fp_libfunc;
6235 smulv_optab->libcall_basename = "mul";
6236 smulv_optab->libcall_suffix = '3';
6237 smulv_optab->libcall_gen = gen_intv_fp_libfunc;
6238 sdiv_optab->libcall_basename = "div";
6239 sdiv_optab->libcall_suffix = '3';
6240 sdiv_optab->libcall_gen = gen_int_fp_libfunc;
6241 sdivv_optab->libcall_basename = "divv";
6242 sdivv_optab->libcall_suffix = '3';
6243 sdivv_optab->libcall_gen = gen_int_libfunc;
6244 udiv_optab->libcall_basename = "udiv";
6245 udiv_optab->libcall_suffix = '3';
6246 udiv_optab->libcall_gen = gen_int_libfunc;
6247 sdivmod_optab->libcall_basename = "divmod";
6248 sdivmod_optab->libcall_suffix = '4';
6249 sdivmod_optab->libcall_gen = gen_int_libfunc;
6250 udivmod_optab->libcall_basename = "udivmod";
6251 udivmod_optab->libcall_suffix = '4';
6252 udivmod_optab->libcall_gen = gen_int_libfunc;
6253 smod_optab->libcall_basename = "mod";
6254 smod_optab->libcall_suffix = '3';
6255 smod_optab->libcall_gen = gen_int_libfunc;
6256 umod_optab->libcall_basename = "umod";
6257 umod_optab->libcall_suffix = '3';
6258 umod_optab->libcall_gen = gen_int_libfunc;
6259 ftrunc_optab->libcall_basename = "ftrunc";
6260 ftrunc_optab->libcall_suffix = '2';
6261 ftrunc_optab->libcall_gen = gen_fp_libfunc;
6262 and_optab->libcall_basename = "and";
6263 and_optab->libcall_suffix = '3';
6264 and_optab->libcall_gen = gen_int_libfunc;
6265 ior_optab->libcall_basename = "ior";
6266 ior_optab->libcall_suffix = '3';
6267 ior_optab->libcall_gen = gen_int_libfunc;
6268 xor_optab->libcall_basename = "xor";
6269 xor_optab->libcall_suffix = '3';
6270 xor_optab->libcall_gen = gen_int_libfunc;
6271 ashl_optab->libcall_basename = "ashl";
6272 ashl_optab->libcall_suffix = '3';
6273 ashl_optab->libcall_gen = gen_int_libfunc;
6274 ashr_optab->libcall_basename = "ashr";
6275 ashr_optab->libcall_suffix = '3';
6276 ashr_optab->libcall_gen = gen_int_libfunc;
6277 lshr_optab->libcall_basename = "lshr";
6278 lshr_optab->libcall_suffix = '3';
6279 lshr_optab->libcall_gen = gen_int_libfunc;
6280 smin_optab->libcall_basename = "min";
6281 smin_optab->libcall_suffix = '3';
6282 smin_optab->libcall_gen = gen_int_fp_libfunc;
6283 smax_optab->libcall_basename = "max";
6284 smax_optab->libcall_suffix = '3';
6285 smax_optab->libcall_gen = gen_int_fp_libfunc;
6286 umin_optab->libcall_basename = "umin";
6287 umin_optab->libcall_suffix = '3';
6288 umin_optab->libcall_gen = gen_int_libfunc;
6289 umax_optab->libcall_basename = "umax";
6290 umax_optab->libcall_suffix = '3';
6291 umax_optab->libcall_gen = gen_int_libfunc;
6292 neg_optab->libcall_basename = "neg";
6293 neg_optab->libcall_suffix = '2';
6294 neg_optab->libcall_gen = gen_int_fp_libfunc;
6295 negv_optab->libcall_basename = "neg";
6296 negv_optab->libcall_suffix = '2';
6297 negv_optab->libcall_gen = gen_intv_fp_libfunc;
6298 one_cmpl_optab->libcall_basename = "one_cmpl";
6299 one_cmpl_optab->libcall_suffix = '2';
6300 one_cmpl_optab->libcall_gen = gen_int_libfunc;
6301 ffs_optab->libcall_basename = "ffs";
6302 ffs_optab->libcall_suffix = '2';
6303 ffs_optab->libcall_gen = gen_int_libfunc;
6304 clz_optab->libcall_basename = "clz";
6305 clz_optab->libcall_suffix = '2';
6306 clz_optab->libcall_gen = gen_int_libfunc;
6307 ctz_optab->libcall_basename = "ctz";
6308 ctz_optab->libcall_suffix = '2';
6309 ctz_optab->libcall_gen = gen_int_libfunc;
6310 popcount_optab->libcall_basename = "popcount";
6311 popcount_optab->libcall_suffix = '2';
6312 popcount_optab->libcall_gen = gen_int_libfunc;
6313 parity_optab->libcall_basename = "parity";
6314 parity_optab->libcall_suffix = '2';
6315 parity_optab->libcall_gen = gen_int_libfunc;
6317 /* Comparison libcalls for integers MUST come in pairs,
6319 cmp_optab->libcall_basename = "cmp";
6320 cmp_optab->libcall_suffix = '2';
6321 cmp_optab->libcall_gen = gen_int_fp_libfunc;
6322 ucmp_optab->libcall_basename = "ucmp";
6323 ucmp_optab->libcall_suffix = '2';
6324 ucmp_optab->libcall_gen = gen_int_libfunc;
6326 /* EQ etc are floating point only. */
6327 eq_optab->libcall_basename = "eq";
6328 eq_optab->libcall_suffix = '2';
6329 eq_optab->libcall_gen = gen_fp_libfunc;
6330 ne_optab->libcall_basename = "ne";
6331 ne_optab->libcall_suffix = '2';
6332 ne_optab->libcall_gen = gen_fp_libfunc;
6333 gt_optab->libcall_basename = "gt";
6334 gt_optab->libcall_suffix = '2';
6335 gt_optab->libcall_gen = gen_fp_libfunc;
6336 ge_optab->libcall_basename = "ge";
6337 ge_optab->libcall_suffix = '2';
6338 ge_optab->libcall_gen = gen_fp_libfunc;
6339 lt_optab->libcall_basename = "lt";
6340 lt_optab->libcall_suffix = '2';
6341 lt_optab->libcall_gen = gen_fp_libfunc;
6342 le_optab->libcall_basename = "le";
6343 le_optab->libcall_suffix = '2';
6344 le_optab->libcall_gen = gen_fp_libfunc;
6345 unord_optab->libcall_basename = "unord";
6346 unord_optab->libcall_suffix = '2';
6347 unord_optab->libcall_gen = gen_fp_libfunc;
6349 powi_optab->libcall_basename = "powi";
6350 powi_optab->libcall_suffix = '2';
6351 powi_optab->libcall_gen = gen_fp_libfunc;
6354 sfloat_optab->libcall_basename = "float";
6355 sfloat_optab->libcall_gen = gen_int_to_fp_conv_libfunc;
6356 ufloat_optab->libcall_gen = gen_ufloat_conv_libfunc;
6357 sfix_optab->libcall_basename = "fix";
6358 sfix_optab->libcall_gen = gen_fp_to_int_conv_libfunc;
6359 ufix_optab->libcall_basename = "fixuns";
6360 ufix_optab->libcall_gen = gen_fp_to_int_conv_libfunc;
6361 lrint_optab->libcall_basename = "lrint";
6362 lrint_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6363 lround_optab->libcall_basename = "lround";
6364 lround_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6365 lfloor_optab->libcall_basename = "lfloor";
6366 lfloor_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6367 lceil_optab->libcall_basename = "lceil";
6368 lceil_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6370 /* trunc_optab is also used for FLOAT_EXTEND. */
6371 sext_optab->libcall_basename = "extend";
6372 sext_optab->libcall_gen = gen_extend_conv_libfunc;
6373 trunc_optab->libcall_basename = "trunc";
6374 trunc_optab->libcall_gen = gen_trunc_conv_libfunc;
6376 /* The ffs function operates on `int'. Fall back on it if we do not
6377 have a libgcc2 function for that width. */
6378 if (INT_TYPE_SIZE < BITS_PER_WORD)
6380 int_mode = mode_for_size (INT_TYPE_SIZE, MODE_INT, 0);
6381 set_optab_libfunc (ffs_optab, mode_for_size (INT_TYPE_SIZE, MODE_INT, 0),
6385 /* Explicitly initialize the bswap libfuncs since we need them to be
6386 valid for things other than word_mode. */
6387 set_optab_libfunc (bswap_optab, SImode, "__bswapsi2");
6388 set_optab_libfunc (bswap_optab, DImode, "__bswapdi2");
6390 /* Use cabs for double complex abs, since systems generally have cabs.
6391 Don't define any libcall for float complex, so that cabs will be used. */
6392 if (complex_double_type_node)
6393 set_optab_libfunc (abs_optab, TYPE_MODE (complex_double_type_node), "cabs");
6395 abort_libfunc = init_one_libfunc ("abort");
6396 memcpy_libfunc = init_one_libfunc ("memcpy");
6397 memmove_libfunc = init_one_libfunc ("memmove");
6398 memcmp_libfunc = init_one_libfunc ("memcmp");
6399 memset_libfunc = init_one_libfunc ("memset");
6400 setbits_libfunc = init_one_libfunc ("__setbits");
6402 #ifndef DONT_USE_BUILTIN_SETJMP
6403 setjmp_libfunc = init_one_libfunc ("__builtin_setjmp");
6404 longjmp_libfunc = init_one_libfunc ("__builtin_longjmp");
6406 setjmp_libfunc = init_one_libfunc ("setjmp");
6407 longjmp_libfunc = init_one_libfunc ("longjmp");
6409 unwind_sjlj_register_libfunc = init_one_libfunc ("_Unwind_SjLj_Register");
6410 unwind_sjlj_unregister_libfunc
6411 = init_one_libfunc ("_Unwind_SjLj_Unregister");
6413 /* For function entry/exit instrumentation. */
6414 profile_function_entry_libfunc
6415 = init_one_libfunc ("__cyg_profile_func_enter");
6416 profile_function_exit_libfunc
6417 = init_one_libfunc ("__cyg_profile_func_exit");
6419 gcov_flush_libfunc = init_one_libfunc ("__gcov_flush");
6421 if (HAVE_conditional_trap)
6422 trap_rtx = gen_rtx_fmt_ee (EQ, VOIDmode, NULL_RTX, NULL_RTX);
6424 /* Allow the target to add more libcalls or rename some, etc. */
6425 targetm.init_libfuncs ();
6428 /* Print information about the current contents of the optabs on
6432 debug_optab_libfuncs (void)
6438 /* Dump the arithmetic optabs. */
6439 for (i = 0; i != (int) OTI_MAX; i++)
6440 for (j = 0; j < NUM_MACHINE_MODES; ++j)
6446 l = optab_libfunc (optab_table[i], j);
6449 gcc_assert (GET_CODE (l) == SYMBOL_REF);
6450 fprintf (stderr, "%s\t%s:\t%s\n",
6451 GET_RTX_NAME (o->code),
6457 /* Dump the conversion optabs. */
6458 for (i = 0; i < (int) COI_MAX; ++i)
6459 for (j = 0; j < NUM_MACHINE_MODES; ++j)
6460 for (k = 0; k < NUM_MACHINE_MODES; ++k)
6465 o = convert_optab_table[i];
6466 l = convert_optab_libfunc (o, j, k);
6469 gcc_assert (GET_CODE (l) == SYMBOL_REF);
6470 fprintf (stderr, "%s\t%s\t%s:\t%s\n",
6471 GET_RTX_NAME (o->code),
6480 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
6481 CODE. Return 0 on failure. */
6484 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED, rtx op1,
6485 rtx op2 ATTRIBUTE_UNUSED, rtx tcode ATTRIBUTE_UNUSED)
6487 enum machine_mode mode = GET_MODE (op1);
6488 enum insn_code icode;
6491 if (!HAVE_conditional_trap)
6494 if (mode == VOIDmode)
6497 icode = optab_handler (cmp_optab, mode)->insn_code;
6498 if (icode == CODE_FOR_nothing)
6502 op1 = prepare_operand (icode, op1, 0, mode, mode, 0);
6503 op2 = prepare_operand (icode, op2, 1, mode, mode, 0);
6509 emit_insn (GEN_FCN (icode) (op1, op2));
6511 PUT_CODE (trap_rtx, code);
6512 gcc_assert (HAVE_conditional_trap);
6513 insn = gen_conditional_trap (trap_rtx, tcode);
6517 insn = get_insns ();
6524 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
6525 or unsigned operation code. */
6527 static enum rtx_code
6528 get_rtx_code (enum tree_code tcode, bool unsignedp)
6540 code = unsignedp ? LTU : LT;
6543 code = unsignedp ? LEU : LE;
6546 code = unsignedp ? GTU : GT;
6549 code = unsignedp ? GEU : GE;
6552 case UNORDERED_EXPR:
6583 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
6584 unsigned operators. Do not generate compare instruction. */
6587 vector_compare_rtx (tree cond, bool unsignedp, enum insn_code icode)
6589 enum rtx_code rcode;
6591 rtx rtx_op0, rtx_op1;
6593 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
6594 ensures that condition is a relational operation. */
6595 gcc_assert (COMPARISON_CLASS_P (cond));
6597 rcode = get_rtx_code (TREE_CODE (cond), unsignedp);
6598 t_op0 = TREE_OPERAND (cond, 0);
6599 t_op1 = TREE_OPERAND (cond, 1);
6601 /* Expand operands. */
6602 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)),
6604 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)),
6607 if (!insn_data[icode].operand[4].predicate (rtx_op0, GET_MODE (rtx_op0))
6608 && GET_MODE (rtx_op0) != VOIDmode)
6609 rtx_op0 = force_reg (GET_MODE (rtx_op0), rtx_op0);
6611 if (!insn_data[icode].operand[5].predicate (rtx_op1, GET_MODE (rtx_op1))
6612 && GET_MODE (rtx_op1) != VOIDmode)
6613 rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1);
6615 return gen_rtx_fmt_ee (rcode, VOIDmode, rtx_op0, rtx_op1);
6618 /* Return insn code for VEC_COND_EXPR EXPR. */
6620 static inline enum insn_code
6621 get_vcond_icode (tree expr, enum machine_mode mode)
6623 enum insn_code icode = CODE_FOR_nothing;
6625 if (TYPE_UNSIGNED (TREE_TYPE (expr)))
6626 icode = vcondu_gen_code[mode];
6628 icode = vcond_gen_code[mode];
6632 /* Return TRUE iff, appropriate vector insns are available
6633 for vector cond expr expr in VMODE mode. */
6636 expand_vec_cond_expr_p (tree expr, enum machine_mode vmode)
6638 if (get_vcond_icode (expr, vmode) == CODE_FOR_nothing)
6643 /* Generate insns for VEC_COND_EXPR. */
6646 expand_vec_cond_expr (tree vec_cond_expr, rtx target)
6648 enum insn_code icode;
6649 rtx comparison, rtx_op1, rtx_op2, cc_op0, cc_op1;
6650 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_cond_expr));
6651 bool unsignedp = TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr));
6653 icode = get_vcond_icode (vec_cond_expr, mode);
6654 if (icode == CODE_FOR_nothing)
6657 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6658 target = gen_reg_rtx (mode);
6660 /* Get comparison rtx. First expand both cond expr operands. */
6661 comparison = vector_compare_rtx (TREE_OPERAND (vec_cond_expr, 0),
6663 cc_op0 = XEXP (comparison, 0);
6664 cc_op1 = XEXP (comparison, 1);
6665 /* Expand both operands and force them in reg, if required. */
6666 rtx_op1 = expand_normal (TREE_OPERAND (vec_cond_expr, 1));
6667 if (!insn_data[icode].operand[1].predicate (rtx_op1, mode)
6668 && mode != VOIDmode)
6669 rtx_op1 = force_reg (mode, rtx_op1);
6671 rtx_op2 = expand_normal (TREE_OPERAND (vec_cond_expr, 2));
6672 if (!insn_data[icode].operand[2].predicate (rtx_op2, mode)
6673 && mode != VOIDmode)
6674 rtx_op2 = force_reg (mode, rtx_op2);
6676 /* Emit instruction! */
6677 emit_insn (GEN_FCN (icode) (target, rtx_op1, rtx_op2,
6678 comparison, cc_op0, cc_op1));
6684 /* This is an internal subroutine of the other compare_and_swap expanders.
6685 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
6686 operation. TARGET is an optional place to store the value result of
6687 the operation. ICODE is the particular instruction to expand. Return
6688 the result of the operation. */
6691 expand_val_compare_and_swap_1 (rtx mem, rtx old_val, rtx new_val,
6692 rtx target, enum insn_code icode)
6694 enum machine_mode mode = GET_MODE (mem);
6697 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6698 target = gen_reg_rtx (mode);
6700 if (GET_MODE (old_val) != VOIDmode && GET_MODE (old_val) != mode)
6701 old_val = convert_modes (mode, GET_MODE (old_val), old_val, 1);
6702 if (!insn_data[icode].operand[2].predicate (old_val, mode))
6703 old_val = force_reg (mode, old_val);
6705 if (GET_MODE (new_val) != VOIDmode && GET_MODE (new_val) != mode)
6706 new_val = convert_modes (mode, GET_MODE (new_val), new_val, 1);
6707 if (!insn_data[icode].operand[3].predicate (new_val, mode))
6708 new_val = force_reg (mode, new_val);
6710 insn = GEN_FCN (icode) (target, mem, old_val, new_val);
6711 if (insn == NULL_RTX)
6718 /* Expand a compare-and-swap operation and return its value. */
6721 expand_val_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
6723 enum machine_mode mode = GET_MODE (mem);
6724 enum insn_code icode = sync_compare_and_swap[mode];
6726 if (icode == CODE_FOR_nothing)
6729 return expand_val_compare_and_swap_1 (mem, old_val, new_val, target, icode);
6732 /* Expand a compare-and-swap operation and store true into the result if
6733 the operation was successful and false otherwise. Return the result.
6734 Unlike other routines, TARGET is not optional. */
6737 expand_bool_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
6739 enum machine_mode mode = GET_MODE (mem);
6740 enum insn_code icode;
6741 rtx subtarget, label0, label1;
6743 /* If the target supports a compare-and-swap pattern that simultaneously
6744 sets some flag for success, then use it. Otherwise use the regular
6745 compare-and-swap and follow that immediately with a compare insn. */
6746 icode = sync_compare_and_swap_cc[mode];
6750 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
6752 if (subtarget != NULL_RTX)
6756 case CODE_FOR_nothing:
6757 icode = sync_compare_and_swap[mode];
6758 if (icode == CODE_FOR_nothing)
6761 /* Ensure that if old_val == mem, that we're not comparing
6762 against an old value. */
6763 if (MEM_P (old_val))
6764 old_val = force_reg (mode, old_val);
6766 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
6768 if (subtarget == NULL_RTX)
6771 emit_cmp_insn (subtarget, old_val, EQ, const0_rtx, mode, true);
6774 /* If the target has a sane STORE_FLAG_VALUE, then go ahead and use a
6775 setcc instruction from the beginning. We don't work too hard here,
6776 but it's nice to not be stupid about initial code gen either. */
6777 if (STORE_FLAG_VALUE == 1)
6779 icode = setcc_gen_code[EQ];
6780 if (icode != CODE_FOR_nothing)
6782 enum machine_mode cmode = insn_data[icode].operand[0].mode;
6786 if (!insn_data[icode].operand[0].predicate (target, cmode))
6787 subtarget = gen_reg_rtx (cmode);
6789 insn = GEN_FCN (icode) (subtarget);
6793 if (GET_MODE (target) != GET_MODE (subtarget))
6795 convert_move (target, subtarget, 1);
6803 /* Without an appropriate setcc instruction, use a set of branches to
6804 get 1 and 0 stored into target. Presumably if the target has a
6805 STORE_FLAG_VALUE that isn't 1, then this will get cleaned up by ifcvt. */
6807 label0 = gen_label_rtx ();
6808 label1 = gen_label_rtx ();
6810 emit_jump_insn (bcc_gen_fctn[EQ] (label0));
6811 emit_move_insn (target, const0_rtx);
6812 emit_jump_insn (gen_jump (label1));
6814 emit_label (label0);
6815 emit_move_insn (target, const1_rtx);
6816 emit_label (label1);
6821 /* This is a helper function for the other atomic operations. This function
6822 emits a loop that contains SEQ that iterates until a compare-and-swap
6823 operation at the end succeeds. MEM is the memory to be modified. SEQ is
6824 a set of instructions that takes a value from OLD_REG as an input and
6825 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
6826 set to the current contents of MEM. After SEQ, a compare-and-swap will
6827 attempt to update MEM with NEW_REG. The function returns true when the
6828 loop was generated successfully. */
6831 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
6833 enum machine_mode mode = GET_MODE (mem);
6834 enum insn_code icode;
6835 rtx label, cmp_reg, subtarget;
6837 /* The loop we want to generate looks like
6843 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
6844 if (cmp_reg != old_reg)
6847 Note that we only do the plain load from memory once. Subsequent
6848 iterations use the value loaded by the compare-and-swap pattern. */
6850 label = gen_label_rtx ();
6851 cmp_reg = gen_reg_rtx (mode);
6853 emit_move_insn (cmp_reg, mem);
6855 emit_move_insn (old_reg, cmp_reg);
6859 /* If the target supports a compare-and-swap pattern that simultaneously
6860 sets some flag for success, then use it. Otherwise use the regular
6861 compare-and-swap and follow that immediately with a compare insn. */
6862 icode = sync_compare_and_swap_cc[mode];
6866 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
6868 if (subtarget != NULL_RTX)
6870 gcc_assert (subtarget == cmp_reg);
6875 case CODE_FOR_nothing:
6876 icode = sync_compare_and_swap[mode];
6877 if (icode == CODE_FOR_nothing)
6880 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
6882 if (subtarget == NULL_RTX)
6884 if (subtarget != cmp_reg)
6885 emit_move_insn (cmp_reg, subtarget);
6887 emit_cmp_insn (cmp_reg, old_reg, EQ, const0_rtx, mode, true);
6890 /* ??? Mark this jump predicted not taken? */
6891 emit_jump_insn (bcc_gen_fctn[NE] (label));
6896 /* This function generates the atomic operation MEM CODE= VAL. In this
6897 case, we do not care about any resulting value. Returns NULL if we
6898 cannot generate the operation. */
6901 expand_sync_operation (rtx mem, rtx val, enum rtx_code code)
6903 enum machine_mode mode = GET_MODE (mem);
6904 enum insn_code icode;
6907 /* Look to see if the target supports the operation directly. */
6911 icode = sync_add_optab[mode];
6914 icode = sync_ior_optab[mode];
6917 icode = sync_xor_optab[mode];
6920 icode = sync_and_optab[mode];
6923 icode = sync_nand_optab[mode];
6927 icode = sync_sub_optab[mode];
6928 if (icode == CODE_FOR_nothing)
6930 icode = sync_add_optab[mode];
6931 if (icode != CODE_FOR_nothing)
6933 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
6943 /* Generate the direct operation, if present. */
6944 if (icode != CODE_FOR_nothing)
6946 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6947 val = convert_modes (mode, GET_MODE (val), val, 1);
6948 if (!insn_data[icode].operand[1].predicate (val, mode))
6949 val = force_reg (mode, val);
6951 insn = GEN_FCN (icode) (mem, val);
6959 /* Failing that, generate a compare-and-swap loop in which we perform the
6960 operation with normal arithmetic instructions. */
6961 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6963 rtx t0 = gen_reg_rtx (mode), t1;
6970 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
6973 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
6974 true, OPTAB_LIB_WIDEN);
6976 insn = get_insns ();
6979 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
6986 /* This function generates the atomic operation MEM CODE= VAL. In this
6987 case, we do care about the resulting value: if AFTER is true then
6988 return the value MEM holds after the operation, if AFTER is false
6989 then return the value MEM holds before the operation. TARGET is an
6990 optional place for the result value to be stored. */
6993 expand_sync_fetch_operation (rtx mem, rtx val, enum rtx_code code,
6994 bool after, rtx target)
6996 enum machine_mode mode = GET_MODE (mem);
6997 enum insn_code old_code, new_code, icode;
7001 /* Look to see if the target supports the operation directly. */
7005 old_code = sync_old_add_optab[mode];
7006 new_code = sync_new_add_optab[mode];
7009 old_code = sync_old_ior_optab[mode];
7010 new_code = sync_new_ior_optab[mode];
7013 old_code = sync_old_xor_optab[mode];
7014 new_code = sync_new_xor_optab[mode];
7017 old_code = sync_old_and_optab[mode];
7018 new_code = sync_new_and_optab[mode];
7021 old_code = sync_old_nand_optab[mode];
7022 new_code = sync_new_nand_optab[mode];
7026 old_code = sync_old_sub_optab[mode];
7027 new_code = sync_new_sub_optab[mode];
7028 if (old_code == CODE_FOR_nothing && new_code == CODE_FOR_nothing)
7030 old_code = sync_old_add_optab[mode];
7031 new_code = sync_new_add_optab[mode];
7032 if (old_code != CODE_FOR_nothing || new_code != CODE_FOR_nothing)
7034 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
7044 /* If the target does supports the proper new/old operation, great. But
7045 if we only support the opposite old/new operation, check to see if we
7046 can compensate. In the case in which the old value is supported, then
7047 we can always perform the operation again with normal arithmetic. In
7048 the case in which the new value is supported, then we can only handle
7049 this in the case the operation is reversible. */
7054 if (icode == CODE_FOR_nothing)
7057 if (icode != CODE_FOR_nothing)
7064 if (icode == CODE_FOR_nothing
7065 && (code == PLUS || code == MINUS || code == XOR))
7068 if (icode != CODE_FOR_nothing)
7073 /* If we found something supported, great. */
7074 if (icode != CODE_FOR_nothing)
7076 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
7077 target = gen_reg_rtx (mode);
7079 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
7080 val = convert_modes (mode, GET_MODE (val), val, 1);
7081 if (!insn_data[icode].operand[2].predicate (val, mode))
7082 val = force_reg (mode, val);
7084 insn = GEN_FCN (icode) (target, mem, val);
7089 /* If we need to compensate for using an operation with the
7090 wrong return value, do so now. */
7097 else if (code == MINUS)
7102 target = expand_simple_unop (mode, NOT, target, NULL_RTX, true);
7103 target = expand_simple_binop (mode, code, target, val, NULL_RTX,
7104 true, OPTAB_LIB_WIDEN);
7111 /* Failing that, generate a compare-and-swap loop in which we perform the
7112 operation with normal arithmetic instructions. */
7113 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
7115 rtx t0 = gen_reg_rtx (mode), t1;
7117 if (!target || !register_operand (target, mode))
7118 target = gen_reg_rtx (mode);
7123 emit_move_insn (target, t0);
7127 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
7130 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
7131 true, OPTAB_LIB_WIDEN);
7133 emit_move_insn (target, t1);
7135 insn = get_insns ();
7138 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
7145 /* This function expands a test-and-set operation. Ideally we atomically
7146 store VAL in MEM and return the previous value in MEM. Some targets
7147 may not support this operation and only support VAL with the constant 1;
7148 in this case while the return value will be 0/1, but the exact value
7149 stored in MEM is target defined. TARGET is an option place to stick
7150 the return value. */
7153 expand_sync_lock_test_and_set (rtx mem, rtx val, rtx target)
7155 enum machine_mode mode = GET_MODE (mem);
7156 enum insn_code icode;
7159 /* If the target supports the test-and-set directly, great. */
7160 icode = sync_lock_test_and_set[mode];
7161 if (icode != CODE_FOR_nothing)
7163 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
7164 target = gen_reg_rtx (mode);
7166 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
7167 val = convert_modes (mode, GET_MODE (val), val, 1);
7168 if (!insn_data[icode].operand[2].predicate (val, mode))
7169 val = force_reg (mode, val);
7171 insn = GEN_FCN (icode) (target, mem, val);
7179 /* Otherwise, use a compare-and-swap loop for the exchange. */
7180 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
7182 if (!target || !register_operand (target, mode))
7183 target = gen_reg_rtx (mode);
7184 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
7185 val = convert_modes (mode, GET_MODE (val), val, 1);
7186 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
7193 #include "gt-optabs.h"