1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
25 #include "coretypes.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
45 #include "basic-block.h"
48 /* Each optab contains info on how this target machine
49 can perform a particular operation
50 for all sizes and kinds of operands.
52 The operation to be performed is often specified
53 by passing one of these optabs as an argument.
55 See expr.h for documentation of these optabs. */
57 optab optab_table[OTI_MAX];
59 rtx libfunc_table[LTI_MAX];
61 /* Tables of patterns for converting one mode to another. */
62 convert_optab convert_optab_table[COI_MAX];
64 /* Contains the optab used for each rtx code. */
65 optab code_to_optab[NUM_RTX_CODE + 1];
67 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
68 gives the gen_function to make a branch to test that condition. */
70 rtxfun bcc_gen_fctn[NUM_RTX_CODE];
72 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
73 gives the insn code to make a store-condition insn
74 to test that condition. */
76 enum insn_code setcc_gen_code[NUM_RTX_CODE];
78 #ifdef HAVE_conditional_move
79 /* Indexed by the machine mode, gives the insn code to make a conditional
80 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
81 setcc_gen_code to cut down on the number of named patterns. Consider a day
82 when a lot more rtx codes are conditional (eg: for the ARM). */
84 enum insn_code movcc_gen_code[NUM_MACHINE_MODES];
87 /* Indexed by the machine mode, gives the insn code for vector conditional
90 enum insn_code vcond_gen_code[NUM_MACHINE_MODES];
91 enum insn_code vcondu_gen_code[NUM_MACHINE_MODES];
93 /* The insn generating function can not take an rtx_code argument.
94 TRAP_RTX is used as an rtx argument. Its code is replaced with
95 the code to be used in the trap insn and all other fields are ignored. */
96 static GTY(()) rtx trap_rtx;
98 static int add_equal_note (rtx, rtx, enum rtx_code, rtx, rtx);
99 static rtx widen_operand (rtx, enum machine_mode, enum machine_mode, int,
101 static void prepare_cmp_insn (rtx *, rtx *, enum rtx_code *, rtx,
102 enum machine_mode *, int *,
103 enum can_compare_purpose);
104 static enum insn_code can_fix_p (enum machine_mode, enum machine_mode, int,
106 static enum insn_code can_float_p (enum machine_mode, enum machine_mode, int);
107 static optab new_optab (void);
108 static convert_optab new_convert_optab (void);
109 static inline optab init_optab (enum rtx_code);
110 static inline optab init_optabv (enum rtx_code);
111 static inline convert_optab init_convert_optab (enum rtx_code);
112 static void init_libfuncs (optab, int, int, const char *, int);
113 static void init_integral_libfuncs (optab, const char *, int);
114 static void init_floating_libfuncs (optab, const char *, int);
115 static void init_interclass_conv_libfuncs (convert_optab, const char *,
116 enum mode_class, enum mode_class);
117 static void init_intraclass_conv_libfuncs (convert_optab, const char *,
118 enum mode_class, bool);
119 static void emit_cmp_and_jump_insn_1 (rtx, rtx, enum machine_mode,
120 enum rtx_code, int, rtx);
121 static void prepare_float_lib_cmp (rtx *, rtx *, enum rtx_code *,
122 enum machine_mode *, int *);
123 static rtx widen_clz (enum machine_mode, rtx, rtx);
124 static rtx expand_parity (enum machine_mode, rtx, rtx);
125 static enum rtx_code get_rtx_code (enum tree_code, bool);
126 static rtx vector_compare_rtx (tree, bool, enum insn_code);
128 #ifndef HAVE_conditional_trap
129 #define HAVE_conditional_trap 0
130 #define gen_conditional_trap(a,b) (gcc_unreachable (), NULL_RTX)
133 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
134 the result of operation CODE applied to OP0 (and OP1 if it is a binary
137 If the last insn does not set TARGET, don't do anything, but return 1.
139 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
140 don't add the REG_EQUAL note but return 0. Our caller can then try
141 again, ensuring that TARGET is not one of the operands. */
144 add_equal_note (rtx insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
146 rtx last_insn, insn, set;
149 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
151 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
152 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
153 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
154 && GET_RTX_CLASS (code) != RTX_COMPARE
155 && GET_RTX_CLASS (code) != RTX_UNARY)
158 if (GET_CODE (target) == ZERO_EXTRACT)
161 for (last_insn = insns;
162 NEXT_INSN (last_insn) != NULL_RTX;
163 last_insn = NEXT_INSN (last_insn))
166 set = single_set (last_insn);
170 if (! rtx_equal_p (SET_DEST (set), target)
171 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
172 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
173 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
176 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
177 besides the last insn. */
178 if (reg_overlap_mentioned_p (target, op0)
179 || (op1 && reg_overlap_mentioned_p (target, op1)))
181 insn = PREV_INSN (last_insn);
182 while (insn != NULL_RTX)
184 if (reg_set_p (target, insn))
187 insn = PREV_INSN (insn);
191 if (GET_RTX_CLASS (code) == RTX_UNARY)
192 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
194 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
196 set_unique_reg_note (last_insn, REG_EQUAL, note);
201 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
202 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
203 not actually do a sign-extend or zero-extend, but can leave the
204 higher-order bits of the result rtx undefined, for example, in the case
205 of logical operations, but not right shifts. */
208 widen_operand (rtx op, enum machine_mode mode, enum machine_mode oldmode,
209 int unsignedp, int no_extend)
213 /* If we don't have to extend and this is a constant, return it. */
214 if (no_extend && GET_MODE (op) == VOIDmode)
217 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
218 extend since it will be more efficient to do so unless the signedness of
219 a promoted object differs from our extension. */
221 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
222 && SUBREG_PROMOTED_UNSIGNED_P (op) == unsignedp))
223 return convert_modes (mode, oldmode, op, unsignedp);
225 /* If MODE is no wider than a single word, we return a paradoxical
227 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
228 return gen_rtx_SUBREG (mode, force_reg (GET_MODE (op), op), 0);
230 /* Otherwise, get an object of MODE, clobber it, and set the low-order
233 result = gen_reg_rtx (mode);
234 emit_insn (gen_rtx_CLOBBER (VOIDmode, result));
235 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
239 /* Return the optab used for computing the operation given by
240 the tree code, CODE. This function is not always usable (for
241 example, it cannot give complete results for multiplication
242 or division) but probably ought to be relied on more widely
243 throughout the expander. */
245 optab_for_tree_code (enum tree_code code, tree type)
257 return one_cmpl_optab;
266 return TYPE_UNSIGNED (type) ? umod_optab : smod_optab;
274 return TYPE_UNSIGNED (type) ? udiv_optab : sdiv_optab;
280 return TYPE_UNSIGNED (type) ? lshr_optab : ashr_optab;
289 return TYPE_UNSIGNED (type) ? umax_optab : smax_optab;
292 return TYPE_UNSIGNED (type) ? umin_optab : smin_optab;
294 case REALIGN_LOAD_EXPR:
295 return vec_realign_load_optab;
298 return TYPE_UNSIGNED (type) ? usum_widen_optab : ssum_widen_optab;
301 return TYPE_UNSIGNED (type) ? udot_prod_optab : sdot_prod_optab;
304 return TYPE_UNSIGNED (type) ? reduc_umax_optab : reduc_smax_optab;
307 return TYPE_UNSIGNED (type) ? reduc_umin_optab : reduc_smin_optab;
309 case REDUC_PLUS_EXPR:
310 return TYPE_UNSIGNED (type) ? reduc_uplus_optab : reduc_splus_optab;
312 case VEC_LSHIFT_EXPR:
313 return vec_shl_optab;
315 case VEC_RSHIFT_EXPR:
316 return vec_shr_optab;
318 case VEC_WIDEN_MULT_HI_EXPR:
319 return TYPE_UNSIGNED (type) ?
320 vec_widen_umult_hi_optab : vec_widen_smult_hi_optab;
322 case VEC_WIDEN_MULT_LO_EXPR:
323 return TYPE_UNSIGNED (type) ?
324 vec_widen_umult_lo_optab : vec_widen_smult_lo_optab;
326 case VEC_UNPACK_HI_EXPR:
327 return TYPE_UNSIGNED (type) ?
328 vec_unpacku_hi_optab : vec_unpacks_hi_optab;
330 case VEC_UNPACK_LO_EXPR:
331 return TYPE_UNSIGNED (type) ?
332 vec_unpacku_lo_optab : vec_unpacks_lo_optab;
334 case VEC_PACK_MOD_EXPR:
335 return vec_pack_mod_optab;
337 case VEC_PACK_SAT_EXPR:
338 return TYPE_UNSIGNED (type) ? vec_pack_usat_optab : vec_pack_ssat_optab;
344 trapv = flag_trapv && INTEGRAL_TYPE_P (type) && !TYPE_UNSIGNED (type);
348 return trapv ? addv_optab : add_optab;
351 return trapv ? subv_optab : sub_optab;
354 return trapv ? smulv_optab : smul_optab;
357 return trapv ? negv_optab : neg_optab;
360 return trapv ? absv_optab : abs_optab;
368 /* Expand vector widening operations.
370 There are two different classes of operations handled here:
371 1) Operations whose result is wider than all the arguments to the operation.
372 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
373 In this case OP0 and optionally OP1 would be initialized,
374 but WIDE_OP wouldn't (not relevant for this case).
375 2) Operations whose result is of the same size as the last argument to the
376 operation, but wider than all the other arguments to the operation.
377 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
378 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
380 E.g, when called to expand the following operations, this is how
381 the arguments will be initialized:
383 widening-sum 2 oprnd0 - oprnd1
384 widening-dot-product 3 oprnd0 oprnd1 oprnd2
385 widening-mult 2 oprnd0 oprnd1 -
386 type-promotion (vec-unpack) 1 oprnd0 - - */
389 expand_widen_pattern_expr (tree exp, rtx op0, rtx op1, rtx wide_op, rtx target,
392 tree oprnd0, oprnd1, oprnd2;
393 enum machine_mode wmode = 0, tmode0, tmode1 = 0;
394 optab widen_pattern_optab;
396 enum machine_mode xmode0, xmode1 = 0, wxmode = 0;
399 rtx xop0, xop1, wxop;
400 int nops = TREE_CODE_LENGTH (TREE_CODE (exp));
402 oprnd0 = TREE_OPERAND (exp, 0);
403 tmode0 = TYPE_MODE (TREE_TYPE (oprnd0));
404 widen_pattern_optab =
405 optab_for_tree_code (TREE_CODE (exp), TREE_TYPE (oprnd0));
406 icode = (int) widen_pattern_optab->handlers[(int) tmode0].insn_code;
407 gcc_assert (icode != CODE_FOR_nothing);
408 xmode0 = insn_data[icode].operand[1].mode;
412 oprnd1 = TREE_OPERAND (exp, 1);
413 tmode1 = TYPE_MODE (TREE_TYPE (oprnd1));
414 xmode1 = insn_data[icode].operand[2].mode;
417 /* The last operand is of a wider mode than the rest of the operands. */
425 gcc_assert (tmode1 == tmode0);
427 oprnd2 = TREE_OPERAND (exp, 2);
428 wmode = TYPE_MODE (TREE_TYPE (oprnd2));
429 wxmode = insn_data[icode].operand[3].mode;
433 wmode = wxmode = insn_data[icode].operand[0].mode;
436 || ! (*insn_data[icode].operand[0].predicate) (target, wmode))
437 temp = gen_reg_rtx (wmode);
445 /* In case the insn wants input operands in modes different from
446 those of the actual operands, convert the operands. It would
447 seem that we don't need to convert CONST_INTs, but we do, so
448 that they're properly zero-extended, sign-extended or truncated
451 if (GET_MODE (op0) != xmode0 && xmode0 != VOIDmode)
452 xop0 = convert_modes (xmode0,
453 GET_MODE (op0) != VOIDmode
459 if (GET_MODE (op1) != xmode1 && xmode1 != VOIDmode)
460 xop1 = convert_modes (xmode1,
461 GET_MODE (op1) != VOIDmode
467 if (GET_MODE (wide_op) != wxmode && wxmode != VOIDmode)
468 wxop = convert_modes (wxmode,
469 GET_MODE (wide_op) != VOIDmode
474 /* Now, if insn's predicates don't allow our operands, put them into
477 if (! (*insn_data[icode].operand[1].predicate) (xop0, xmode0)
478 && xmode0 != VOIDmode)
479 xop0 = copy_to_mode_reg (xmode0, xop0);
483 if (! (*insn_data[icode].operand[2].predicate) (xop1, xmode1)
484 && xmode1 != VOIDmode)
485 xop1 = copy_to_mode_reg (xmode1, xop1);
489 if (! (*insn_data[icode].operand[3].predicate) (wxop, wxmode)
490 && wxmode != VOIDmode)
491 wxop = copy_to_mode_reg (wxmode, wxop);
493 pat = GEN_FCN (icode) (temp, xop0, xop1, wxop);
496 pat = GEN_FCN (icode) (temp, xop0, xop1);
502 if (! (*insn_data[icode].operand[2].predicate) (wxop, wxmode)
503 && wxmode != VOIDmode)
504 wxop = copy_to_mode_reg (wxmode, wxop);
506 pat = GEN_FCN (icode) (temp, xop0, wxop);
509 pat = GEN_FCN (icode) (temp, xop0);
516 /* Generate code to perform an operation specified by TERNARY_OPTAB
517 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
519 UNSIGNEDP is for the case where we have to widen the operands
520 to perform the operation. It says to use zero-extension.
522 If TARGET is nonzero, the value
523 is generated there, if it is convenient to do so.
524 In all cases an rtx is returned for the locus of the value;
525 this may or may not be TARGET. */
528 expand_ternary_op (enum machine_mode mode, optab ternary_optab, rtx op0,
529 rtx op1, rtx op2, rtx target, int unsignedp)
531 int icode = (int) ternary_optab->handlers[(int) mode].insn_code;
532 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
533 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
534 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
537 rtx xop0 = op0, xop1 = op1, xop2 = op2;
539 gcc_assert (ternary_optab->handlers[(int) mode].insn_code
540 != CODE_FOR_nothing);
542 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
543 temp = gen_reg_rtx (mode);
547 /* In case the insn wants input operands in modes different from
548 those of the actual operands, convert the operands. It would
549 seem that we don't need to convert CONST_INTs, but we do, so
550 that they're properly zero-extended, sign-extended or truncated
553 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
554 xop0 = convert_modes (mode0,
555 GET_MODE (op0) != VOIDmode
560 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
561 xop1 = convert_modes (mode1,
562 GET_MODE (op1) != VOIDmode
567 if (GET_MODE (op2) != mode2 && mode2 != VOIDmode)
568 xop2 = convert_modes (mode2,
569 GET_MODE (op2) != VOIDmode
574 /* Now, if insn's predicates don't allow our operands, put them into
577 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
578 && mode0 != VOIDmode)
579 xop0 = copy_to_mode_reg (mode0, xop0);
581 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
582 && mode1 != VOIDmode)
583 xop1 = copy_to_mode_reg (mode1, xop1);
585 if (!insn_data[icode].operand[3].predicate (xop2, mode2)
586 && mode2 != VOIDmode)
587 xop2 = copy_to_mode_reg (mode2, xop2);
589 pat = GEN_FCN (icode) (temp, xop0, xop1, xop2);
596 /* Like expand_binop, but return a constant rtx if the result can be
597 calculated at compile time. The arguments and return value are
598 otherwise the same as for expand_binop. */
601 simplify_expand_binop (enum machine_mode mode, optab binoptab,
602 rtx op0, rtx op1, rtx target, int unsignedp,
603 enum optab_methods methods)
605 if (CONSTANT_P (op0) && CONSTANT_P (op1))
607 rtx x = simplify_binary_operation (binoptab->code, mode, op0, op1);
613 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
616 /* Like simplify_expand_binop, but always put the result in TARGET.
617 Return true if the expansion succeeded. */
620 force_expand_binop (enum machine_mode mode, optab binoptab,
621 rtx op0, rtx op1, rtx target, int unsignedp,
622 enum optab_methods methods)
624 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
625 target, unsignedp, methods);
629 emit_move_insn (target, x);
633 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
636 expand_vec_shift_expr (tree vec_shift_expr, rtx target)
638 enum insn_code icode;
639 rtx rtx_op1, rtx_op2;
640 enum machine_mode mode1;
641 enum machine_mode mode2;
642 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_shift_expr));
643 tree vec_oprnd = TREE_OPERAND (vec_shift_expr, 0);
644 tree shift_oprnd = TREE_OPERAND (vec_shift_expr, 1);
648 switch (TREE_CODE (vec_shift_expr))
650 case VEC_RSHIFT_EXPR:
651 shift_optab = vec_shr_optab;
653 case VEC_LSHIFT_EXPR:
654 shift_optab = vec_shl_optab;
660 icode = (int) shift_optab->handlers[(int) mode].insn_code;
661 gcc_assert (icode != CODE_FOR_nothing);
663 mode1 = insn_data[icode].operand[1].mode;
664 mode2 = insn_data[icode].operand[2].mode;
666 rtx_op1 = expand_expr (vec_oprnd, NULL_RTX, VOIDmode, EXPAND_NORMAL);
667 if (!(*insn_data[icode].operand[1].predicate) (rtx_op1, mode1)
668 && mode1 != VOIDmode)
669 rtx_op1 = force_reg (mode1, rtx_op1);
671 rtx_op2 = expand_expr (shift_oprnd, NULL_RTX, VOIDmode, EXPAND_NORMAL);
672 if (!(*insn_data[icode].operand[2].predicate) (rtx_op2, mode2)
673 && mode2 != VOIDmode)
674 rtx_op2 = force_reg (mode2, rtx_op2);
677 || ! (*insn_data[icode].operand[0].predicate) (target, mode))
678 target = gen_reg_rtx (mode);
680 /* Emit instruction */
681 pat = GEN_FCN (icode) (target, rtx_op1, rtx_op2);
688 /* This subroutine of expand_doubleword_shift handles the cases in which
689 the effective shift value is >= BITS_PER_WORD. The arguments and return
690 value are the same as for the parent routine, except that SUPERWORD_OP1
691 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
692 INTO_TARGET may be null if the caller has decided to calculate it. */
695 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
696 rtx outof_target, rtx into_target,
697 int unsignedp, enum optab_methods methods)
699 if (into_target != 0)
700 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
701 into_target, unsignedp, methods))
704 if (outof_target != 0)
706 /* For a signed right shift, we must fill OUTOF_TARGET with copies
707 of the sign bit, otherwise we must fill it with zeros. */
708 if (binoptab != ashr_optab)
709 emit_move_insn (outof_target, CONST0_RTX (word_mode));
711 if (!force_expand_binop (word_mode, binoptab,
712 outof_input, GEN_INT (BITS_PER_WORD - 1),
713 outof_target, unsignedp, methods))
719 /* This subroutine of expand_doubleword_shift handles the cases in which
720 the effective shift value is < BITS_PER_WORD. The arguments and return
721 value are the same as for the parent routine. */
724 expand_subword_shift (enum machine_mode op1_mode, optab binoptab,
725 rtx outof_input, rtx into_input, rtx op1,
726 rtx outof_target, rtx into_target,
727 int unsignedp, enum optab_methods methods,
728 unsigned HOST_WIDE_INT shift_mask)
730 optab reverse_unsigned_shift, unsigned_shift;
733 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
734 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
736 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
737 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
738 the opposite direction to BINOPTAB. */
739 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
741 carries = outof_input;
742 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
743 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
748 /* We must avoid shifting by BITS_PER_WORD bits since that is either
749 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
750 has unknown behavior. Do a single shift first, then shift by the
751 remainder. It's OK to use ~OP1 as the remainder if shift counts
752 are truncated to the mode size. */
753 carries = expand_binop (word_mode, reverse_unsigned_shift,
754 outof_input, const1_rtx, 0, unsignedp, methods);
755 if (shift_mask == BITS_PER_WORD - 1)
757 tmp = immed_double_const (-1, -1, op1_mode);
758 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
763 tmp = immed_double_const (BITS_PER_WORD - 1, 0, op1_mode);
764 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
768 if (tmp == 0 || carries == 0)
770 carries = expand_binop (word_mode, reverse_unsigned_shift,
771 carries, tmp, 0, unsignedp, methods);
775 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
776 so the result can go directly into INTO_TARGET if convenient. */
777 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
778 into_target, unsignedp, methods);
782 /* Now OR in the bits carried over from OUTOF_INPUT. */
783 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
784 into_target, unsignedp, methods))
787 /* Use a standard word_mode shift for the out-of half. */
788 if (outof_target != 0)
789 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
790 outof_target, unsignedp, methods))
797 #ifdef HAVE_conditional_move
798 /* Try implementing expand_doubleword_shift using conditional moves.
799 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
800 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
801 are the shift counts to use in the former and latter case. All other
802 arguments are the same as the parent routine. */
805 expand_doubleword_shift_condmove (enum machine_mode op1_mode, optab binoptab,
806 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
807 rtx outof_input, rtx into_input,
808 rtx subword_op1, rtx superword_op1,
809 rtx outof_target, rtx into_target,
810 int unsignedp, enum optab_methods methods,
811 unsigned HOST_WIDE_INT shift_mask)
813 rtx outof_superword, into_superword;
815 /* Put the superword version of the output into OUTOF_SUPERWORD and
817 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
818 if (outof_target != 0 && subword_op1 == superword_op1)
820 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
821 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
822 into_superword = outof_target;
823 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
824 outof_superword, 0, unsignedp, methods))
829 into_superword = gen_reg_rtx (word_mode);
830 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
831 outof_superword, into_superword,
836 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
837 if (!expand_subword_shift (op1_mode, binoptab,
838 outof_input, into_input, subword_op1,
839 outof_target, into_target,
840 unsignedp, methods, shift_mask))
843 /* Select between them. Do the INTO half first because INTO_SUPERWORD
844 might be the current value of OUTOF_TARGET. */
845 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
846 into_target, into_superword, word_mode, false))
849 if (outof_target != 0)
850 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
851 outof_target, outof_superword,
859 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
860 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
861 input operand; the shift moves bits in the direction OUTOF_INPUT->
862 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
863 of the target. OP1 is the shift count and OP1_MODE is its mode.
864 If OP1 is constant, it will have been truncated as appropriate
865 and is known to be nonzero.
867 If SHIFT_MASK is zero, the result of word shifts is undefined when the
868 shift count is outside the range [0, BITS_PER_WORD). This routine must
869 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
871 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
872 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
873 fill with zeros or sign bits as appropriate.
875 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
876 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
877 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
878 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
881 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
882 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
883 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
884 function wants to calculate it itself.
886 Return true if the shift could be successfully synthesized. */
889 expand_doubleword_shift (enum machine_mode op1_mode, optab binoptab,
890 rtx outof_input, rtx into_input, rtx op1,
891 rtx outof_target, rtx into_target,
892 int unsignedp, enum optab_methods methods,
893 unsigned HOST_WIDE_INT shift_mask)
895 rtx superword_op1, tmp, cmp1, cmp2;
896 rtx subword_label, done_label;
897 enum rtx_code cmp_code;
899 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
900 fill the result with sign or zero bits as appropriate. If so, the value
901 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
902 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
903 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
905 This isn't worthwhile for constant shifts since the optimizers will
906 cope better with in-range shift counts. */
907 if (shift_mask >= BITS_PER_WORD
909 && !CONSTANT_P (op1))
911 if (!expand_doubleword_shift (op1_mode, binoptab,
912 outof_input, into_input, op1,
914 unsignedp, methods, shift_mask))
916 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
917 outof_target, unsignedp, methods))
922 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
923 is true when the effective shift value is less than BITS_PER_WORD.
924 Set SUPERWORD_OP1 to the shift count that should be used to shift
925 OUTOF_INPUT into INTO_TARGET when the condition is false. */
926 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
927 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
929 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
930 is a subword shift count. */
931 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
933 cmp2 = CONST0_RTX (op1_mode);
939 /* Set CMP1 to OP1 - BITS_PER_WORD. */
940 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
942 cmp2 = CONST0_RTX (op1_mode);
944 superword_op1 = cmp1;
949 /* If we can compute the condition at compile time, pick the
950 appropriate subroutine. */
951 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
952 if (tmp != 0 && GET_CODE (tmp) == CONST_INT)
954 if (tmp == const0_rtx)
955 return expand_superword_shift (binoptab, outof_input, superword_op1,
956 outof_target, into_target,
959 return expand_subword_shift (op1_mode, binoptab,
960 outof_input, into_input, op1,
961 outof_target, into_target,
962 unsignedp, methods, shift_mask);
965 #ifdef HAVE_conditional_move
966 /* Try using conditional moves to generate straight-line code. */
968 rtx start = get_last_insn ();
969 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
970 cmp_code, cmp1, cmp2,
971 outof_input, into_input,
973 outof_target, into_target,
974 unsignedp, methods, shift_mask))
976 delete_insns_since (start);
980 /* As a last resort, use branches to select the correct alternative. */
981 subword_label = gen_label_rtx ();
982 done_label = gen_label_rtx ();
985 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
986 0, 0, subword_label);
989 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
990 outof_target, into_target,
994 emit_jump_insn (gen_jump (done_label));
996 emit_label (subword_label);
998 if (!expand_subword_shift (op1_mode, binoptab,
999 outof_input, into_input, op1,
1000 outof_target, into_target,
1001 unsignedp, methods, shift_mask))
1004 emit_label (done_label);
1008 /* Subroutine of expand_binop. Perform a double word multiplication of
1009 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
1010 as the target's word_mode. This function return NULL_RTX if anything
1011 goes wrong, in which case it may have already emitted instructions
1012 which need to be deleted.
1014 If we want to multiply two two-word values and have normal and widening
1015 multiplies of single-word values, we can do this with three smaller
1016 multiplications. Note that we do not make a REG_NO_CONFLICT block here
1017 because we are not operating on one word at a time.
1019 The multiplication proceeds as follows:
1020 _______________________
1021 [__op0_high_|__op0_low__]
1022 _______________________
1023 * [__op1_high_|__op1_low__]
1024 _______________________________________________
1025 _______________________
1026 (1) [__op0_low__*__op1_low__]
1027 _______________________
1028 (2a) [__op0_low__*__op1_high_]
1029 _______________________
1030 (2b) [__op0_high_*__op1_low__]
1031 _______________________
1032 (3) [__op0_high_*__op1_high_]
1035 This gives a 4-word result. Since we are only interested in the
1036 lower 2 words, partial result (3) and the upper words of (2a) and
1037 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1038 calculated using non-widening multiplication.
1040 (1), however, needs to be calculated with an unsigned widening
1041 multiplication. If this operation is not directly supported we
1042 try using a signed widening multiplication and adjust the result.
1043 This adjustment works as follows:
1045 If both operands are positive then no adjustment is needed.
1047 If the operands have different signs, for example op0_low < 0 and
1048 op1_low >= 0, the instruction treats the most significant bit of
1049 op0_low as a sign bit instead of a bit with significance
1050 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1051 with 2**BITS_PER_WORD - op0_low, and two's complements the
1052 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1055 Similarly, if both operands are negative, we need to add
1056 (op0_low + op1_low) * 2**BITS_PER_WORD.
1058 We use a trick to adjust quickly. We logically shift op0_low right
1059 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1060 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1061 logical shift exists, we do an arithmetic right shift and subtract
1065 expand_doubleword_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
1066 bool umulp, enum optab_methods methods)
1068 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
1069 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
1070 rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
1071 rtx product, adjust, product_high, temp;
1073 rtx op0_high = operand_subword_force (op0, high, mode);
1074 rtx op0_low = operand_subword_force (op0, low, mode);
1075 rtx op1_high = operand_subword_force (op1, high, mode);
1076 rtx op1_low = operand_subword_force (op1, low, mode);
1078 /* If we're using an unsigned multiply to directly compute the product
1079 of the low-order words of the operands and perform any required
1080 adjustments of the operands, we begin by trying two more multiplications
1081 and then computing the appropriate sum.
1083 We have checked above that the required addition is provided.
1084 Full-word addition will normally always succeed, especially if
1085 it is provided at all, so we don't worry about its failure. The
1086 multiplication may well fail, however, so we do handle that. */
1090 /* ??? This could be done with emit_store_flag where available. */
1091 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
1092 NULL_RTX, 1, methods);
1094 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
1095 NULL_RTX, 0, OPTAB_DIRECT);
1098 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
1099 NULL_RTX, 0, methods);
1102 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
1103 NULL_RTX, 0, OPTAB_DIRECT);
1110 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
1111 NULL_RTX, 0, OPTAB_DIRECT);
1115 /* OP0_HIGH should now be dead. */
1119 /* ??? This could be done with emit_store_flag where available. */
1120 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
1121 NULL_RTX, 1, methods);
1123 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
1124 NULL_RTX, 0, OPTAB_DIRECT);
1127 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
1128 NULL_RTX, 0, methods);
1131 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
1132 NULL_RTX, 0, OPTAB_DIRECT);
1139 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
1140 NULL_RTX, 0, OPTAB_DIRECT);
1144 /* OP1_HIGH should now be dead. */
1146 adjust = expand_binop (word_mode, add_optab, adjust, temp,
1147 adjust, 0, OPTAB_DIRECT);
1149 if (target && !REG_P (target))
1153 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
1154 target, 1, OPTAB_DIRECT);
1156 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
1157 target, 1, OPTAB_DIRECT);
1162 product_high = operand_subword (product, high, 1, mode);
1163 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
1164 REG_P (product_high) ? product_high : adjust,
1166 emit_move_insn (product_high, adjust);
1170 /* Wrapper around expand_binop which takes an rtx code to specify
1171 the operation to perform, not an optab pointer. All other
1172 arguments are the same. */
1174 expand_simple_binop (enum machine_mode mode, enum rtx_code code, rtx op0,
1175 rtx op1, rtx target, int unsignedp,
1176 enum optab_methods methods)
1178 optab binop = code_to_optab[(int) code];
1181 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
1184 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1185 binop. Order them according to commutative_operand_precedence and, if
1186 possible, try to put TARGET or a pseudo first. */
1188 swap_commutative_operands_with_target (rtx target, rtx op0, rtx op1)
1190 int op0_prec = commutative_operand_precedence (op0);
1191 int op1_prec = commutative_operand_precedence (op1);
1193 if (op0_prec < op1_prec)
1196 if (op0_prec > op1_prec)
1199 /* With equal precedence, both orders are ok, but it is better if the
1200 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
1201 if (target == 0 || REG_P (target))
1202 return (REG_P (op1) && !REG_P (op0)) || target == op1;
1204 return rtx_equal_p (op1, target);
1208 /* Generate code to perform an operation specified by BINOPTAB
1209 on operands OP0 and OP1, with result having machine-mode MODE.
1211 UNSIGNEDP is for the case where we have to widen the operands
1212 to perform the operation. It says to use zero-extension.
1214 If TARGET is nonzero, the value
1215 is generated there, if it is convenient to do so.
1216 In all cases an rtx is returned for the locus of the value;
1217 this may or may not be TARGET. */
1220 expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1,
1221 rtx target, int unsignedp, enum optab_methods methods)
1223 enum optab_methods next_methods
1224 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1225 ? OPTAB_WIDEN : methods);
1226 enum mode_class class;
1227 enum machine_mode wider_mode;
1229 int commutative_op = 0;
1230 int shift_op = (binoptab->code == ASHIFT
1231 || binoptab->code == ASHIFTRT
1232 || binoptab->code == LSHIFTRT
1233 || binoptab->code == ROTATE
1234 || binoptab->code == ROTATERT);
1235 rtx entry_last = get_last_insn ();
1237 bool first_pass_p = true;
1239 class = GET_MODE_CLASS (mode);
1241 /* If subtracting an integer constant, convert this into an addition of
1242 the negated constant. */
1244 if (binoptab == sub_optab && GET_CODE (op1) == CONST_INT)
1246 op1 = negate_rtx (mode, op1);
1247 binoptab = add_optab;
1250 /* If we are inside an appropriately-short loop and we are optimizing,
1251 force expensive constants into a register. */
1252 if (CONSTANT_P (op0) && optimize
1253 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
1255 if (GET_MODE (op0) != VOIDmode)
1256 op0 = convert_modes (mode, VOIDmode, op0, unsignedp);
1257 op0 = force_reg (mode, op0);
1260 if (CONSTANT_P (op1) && optimize
1261 && ! shift_op && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
1263 if (GET_MODE (op1) != VOIDmode)
1264 op1 = convert_modes (mode, VOIDmode, op1, unsignedp);
1265 op1 = force_reg (mode, op1);
1268 /* Record where to delete back to if we backtrack. */
1269 last = get_last_insn ();
1271 /* If operation is commutative,
1272 try to make the first operand a register.
1273 Even better, try to make it the same as the target.
1274 Also try to make the last operand a constant. */
1275 if (GET_RTX_CLASS (binoptab->code) == RTX_COMM_ARITH
1276 || binoptab == smul_widen_optab
1277 || binoptab == umul_widen_optab
1278 || binoptab == smul_highpart_optab
1279 || binoptab == umul_highpart_optab)
1283 if (swap_commutative_operands_with_target (target, op0, op1))
1293 /* If we can do it with a three-operand insn, do so. */
1295 if (methods != OPTAB_MUST_WIDEN
1296 && binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1298 int icode = (int) binoptab->handlers[(int) mode].insn_code;
1299 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
1300 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
1301 enum machine_mode tmp_mode;
1303 rtx xop0 = op0, xop1 = op1;
1308 temp = gen_reg_rtx (mode);
1310 /* If it is a commutative operator and the modes would match
1311 if we would swap the operands, we can save the conversions. */
1314 if (GET_MODE (op0) != mode0 && GET_MODE (op1) != mode1
1315 && GET_MODE (op0) == mode1 && GET_MODE (op1) == mode0)
1319 tmp = op0; op0 = op1; op1 = tmp;
1320 tmp = xop0; xop0 = xop1; xop1 = tmp;
1324 /* In case the insn wants input operands in modes different from
1325 those of the actual operands, convert the operands. It would
1326 seem that we don't need to convert CONST_INTs, but we do, so
1327 that they're properly zero-extended, sign-extended or truncated
1330 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
1331 xop0 = convert_modes (mode0,
1332 GET_MODE (op0) != VOIDmode
1337 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
1338 xop1 = convert_modes (mode1,
1339 GET_MODE (op1) != VOIDmode
1344 /* Now, if insn's predicates don't allow our operands, put them into
1347 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
1348 && mode0 != VOIDmode)
1349 xop0 = copy_to_mode_reg (mode0, xop0);
1351 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
1352 && mode1 != VOIDmode)
1353 xop1 = copy_to_mode_reg (mode1, xop1);
1355 if (binoptab == vec_pack_mod_optab
1356 || binoptab == vec_pack_usat_optab
1357 || binoptab == vec_pack_ssat_optab)
1359 /* The mode of the result is different then the mode of the
1361 tmp_mode = insn_data[icode].operand[0].mode;
1362 if (GET_MODE_NUNITS (tmp_mode) != 2 * GET_MODE_NUNITS (mode))
1368 if (!insn_data[icode].operand[0].predicate (temp, tmp_mode))
1369 temp = gen_reg_rtx (tmp_mode);
1371 pat = GEN_FCN (icode) (temp, xop0, xop1);
1374 /* If PAT is composed of more than one insn, try to add an appropriate
1375 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1376 operand, call ourselves again, this time without a target. */
1377 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1378 && ! add_equal_note (pat, temp, binoptab->code, xop0, xop1))
1380 delete_insns_since (last);
1381 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1382 unsignedp, methods);
1389 delete_insns_since (last);
1392 /* If we were trying to rotate by a constant value, and that didn't
1393 work, try rotating the other direction before falling back to
1394 shifts and bitwise-or. */
1396 && (binoptab == rotl_optab || binoptab == rotr_optab)
1397 && class == MODE_INT
1398 && GET_CODE (op1) == CONST_INT
1400 && (unsigned int) INTVAL (op1) < GET_MODE_BITSIZE (mode))
1402 first_pass_p = false;
1403 op1 = GEN_INT (GET_MODE_BITSIZE (mode) - INTVAL (op1));
1404 binoptab = binoptab == rotl_optab ? rotr_optab : rotl_optab;
1408 /* If this is a multiply, see if we can do a widening operation that
1409 takes operands of this mode and makes a wider mode. */
1411 if (binoptab == smul_optab
1412 && GET_MODE_WIDER_MODE (mode) != VOIDmode
1413 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1414 ->handlers[(int) GET_MODE_WIDER_MODE (mode)].insn_code)
1415 != CODE_FOR_nothing))
1417 temp = expand_binop (GET_MODE_WIDER_MODE (mode),
1418 unsignedp ? umul_widen_optab : smul_widen_optab,
1419 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1423 if (GET_MODE_CLASS (mode) == MODE_INT
1424 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1425 GET_MODE_BITSIZE (GET_MODE (temp))))
1426 return gen_lowpart (mode, temp);
1428 return convert_to_mode (mode, temp, unsignedp);
1432 /* Look for a wider mode of the same class for which we think we
1433 can open-code the operation. Check for a widening multiply at the
1434 wider mode as well. */
1436 if (CLASS_HAS_WIDER_MODES_P (class)
1437 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1438 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1439 wider_mode != VOIDmode;
1440 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1442 if (binoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
1443 || (binoptab == smul_optab
1444 && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
1445 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1446 ->handlers[(int) GET_MODE_WIDER_MODE (wider_mode)].insn_code)
1447 != CODE_FOR_nothing)))
1449 rtx xop0 = op0, xop1 = op1;
1452 /* For certain integer operations, we need not actually extend
1453 the narrow operands, as long as we will truncate
1454 the results to the same narrowness. */
1456 if ((binoptab == ior_optab || binoptab == and_optab
1457 || binoptab == xor_optab
1458 || binoptab == add_optab || binoptab == sub_optab
1459 || binoptab == smul_optab || binoptab == ashl_optab)
1460 && class == MODE_INT)
1463 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1465 /* The second operand of a shift must always be extended. */
1466 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1467 no_extend && binoptab != ashl_optab);
1469 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1470 unsignedp, OPTAB_DIRECT);
1473 if (class != MODE_INT
1474 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1475 GET_MODE_BITSIZE (wider_mode)))
1478 target = gen_reg_rtx (mode);
1479 convert_move (target, temp, 0);
1483 return gen_lowpart (mode, temp);
1486 delete_insns_since (last);
1490 /* These can be done a word at a time. */
1491 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1492 && class == MODE_INT
1493 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
1494 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1500 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1501 won't be accurate, so use a new target. */
1502 if (target == 0 || target == op0 || target == op1)
1503 target = gen_reg_rtx (mode);
1507 /* Do the actual arithmetic. */
1508 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
1510 rtx target_piece = operand_subword (target, i, 1, mode);
1511 rtx x = expand_binop (word_mode, binoptab,
1512 operand_subword_force (op0, i, mode),
1513 operand_subword_force (op1, i, mode),
1514 target_piece, unsignedp, next_methods);
1519 if (target_piece != x)
1520 emit_move_insn (target_piece, x);
1523 insns = get_insns ();
1526 if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
1528 if (binoptab->code != UNKNOWN)
1530 = gen_rtx_fmt_ee (binoptab->code, mode,
1531 copy_rtx (op0), copy_rtx (op1));
1535 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1540 /* Synthesize double word shifts from single word shifts. */
1541 if ((binoptab == lshr_optab || binoptab == ashl_optab
1542 || binoptab == ashr_optab)
1543 && class == MODE_INT
1544 && (GET_CODE (op1) == CONST_INT || !optimize_size)
1545 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1546 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1547 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1548 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1550 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1551 enum machine_mode op1_mode;
1553 double_shift_mask = targetm.shift_truncation_mask (mode);
1554 shift_mask = targetm.shift_truncation_mask (word_mode);
1555 op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode;
1557 /* Apply the truncation to constant shifts. */
1558 if (double_shift_mask > 0 && GET_CODE (op1) == CONST_INT)
1559 op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
1561 if (op1 == CONST0_RTX (op1_mode))
1564 /* Make sure that this is a combination that expand_doubleword_shift
1565 can handle. See the comments there for details. */
1566 if (double_shift_mask == 0
1567 || (shift_mask == BITS_PER_WORD - 1
1568 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1570 rtx insns, equiv_value;
1571 rtx into_target, outof_target;
1572 rtx into_input, outof_input;
1573 int left_shift, outof_word;
1575 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1576 won't be accurate, so use a new target. */
1577 if (target == 0 || target == op0 || target == op1)
1578 target = gen_reg_rtx (mode);
1582 /* OUTOF_* is the word we are shifting bits away from, and
1583 INTO_* is the word that we are shifting bits towards, thus
1584 they differ depending on the direction of the shift and
1585 WORDS_BIG_ENDIAN. */
1587 left_shift = binoptab == ashl_optab;
1588 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1590 outof_target = operand_subword (target, outof_word, 1, mode);
1591 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1593 outof_input = operand_subword_force (op0, outof_word, mode);
1594 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1596 if (expand_doubleword_shift (op1_mode, binoptab,
1597 outof_input, into_input, op1,
1598 outof_target, into_target,
1599 unsignedp, next_methods, shift_mask))
1601 insns = get_insns ();
1604 equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, op0, op1);
1605 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1612 /* Synthesize double word rotates from single word shifts. */
1613 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1614 && class == MODE_INT
1615 && GET_CODE (op1) == CONST_INT
1616 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1617 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1618 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1621 rtx into_target, outof_target;
1622 rtx into_input, outof_input;
1624 int shift_count, left_shift, outof_word;
1626 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1627 won't be accurate, so use a new target. Do this also if target is not
1628 a REG, first because having a register instead may open optimization
1629 opportunities, and second because if target and op0 happen to be MEMs
1630 designating the same location, we would risk clobbering it too early
1631 in the code sequence we generate below. */
1632 if (target == 0 || target == op0 || target == op1 || ! REG_P (target))
1633 target = gen_reg_rtx (mode);
1637 shift_count = INTVAL (op1);
1639 /* OUTOF_* is the word we are shifting bits away from, and
1640 INTO_* is the word that we are shifting bits towards, thus
1641 they differ depending on the direction of the shift and
1642 WORDS_BIG_ENDIAN. */
1644 left_shift = (binoptab == rotl_optab);
1645 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1647 outof_target = operand_subword (target, outof_word, 1, mode);
1648 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1650 outof_input = operand_subword_force (op0, outof_word, mode);
1651 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1653 if (shift_count == BITS_PER_WORD)
1655 /* This is just a word swap. */
1656 emit_move_insn (outof_target, into_input);
1657 emit_move_insn (into_target, outof_input);
1662 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1663 rtx first_shift_count, second_shift_count;
1664 optab reverse_unsigned_shift, unsigned_shift;
1666 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1667 ? lshr_optab : ashl_optab);
1669 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1670 ? ashl_optab : lshr_optab);
1672 if (shift_count > BITS_PER_WORD)
1674 first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1675 second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1679 first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1680 second_shift_count = GEN_INT (shift_count);
1683 into_temp1 = expand_binop (word_mode, unsigned_shift,
1684 outof_input, first_shift_count,
1685 NULL_RTX, unsignedp, next_methods);
1686 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1687 into_input, second_shift_count,
1688 NULL_RTX, unsignedp, next_methods);
1690 if (into_temp1 != 0 && into_temp2 != 0)
1691 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1692 into_target, unsignedp, next_methods);
1696 if (inter != 0 && inter != into_target)
1697 emit_move_insn (into_target, inter);
1699 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1700 into_input, first_shift_count,
1701 NULL_RTX, unsignedp, next_methods);
1702 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1703 outof_input, second_shift_count,
1704 NULL_RTX, unsignedp, next_methods);
1706 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1707 inter = expand_binop (word_mode, ior_optab,
1708 outof_temp1, outof_temp2,
1709 outof_target, unsignedp, next_methods);
1711 if (inter != 0 && inter != outof_target)
1712 emit_move_insn (outof_target, inter);
1715 insns = get_insns ();
1720 /* One may be tempted to wrap the insns in a REG_NO_CONFLICT
1721 block to help the register allocator a bit. But a multi-word
1722 rotate will need all the input bits when setting the output
1723 bits, so there clearly is a conflict between the input and
1724 output registers. So we can't use a no-conflict block here. */
1730 /* These can be done a word at a time by propagating carries. */
1731 if ((binoptab == add_optab || binoptab == sub_optab)
1732 && class == MODE_INT
1733 && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
1734 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1737 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1738 const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
1739 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1740 rtx xop0, xop1, xtarget;
1742 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1743 value is one of those, use it. Otherwise, use 1 since it is the
1744 one easiest to get. */
1745 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1746 int normalizep = STORE_FLAG_VALUE;
1751 /* Prepare the operands. */
1752 xop0 = force_reg (mode, op0);
1753 xop1 = force_reg (mode, op1);
1755 xtarget = gen_reg_rtx (mode);
1757 if (target == 0 || !REG_P (target))
1760 /* Indicate for flow that the entire target reg is being set. */
1762 emit_insn (gen_rtx_CLOBBER (VOIDmode, xtarget));
1764 /* Do the actual arithmetic. */
1765 for (i = 0; i < nwords; i++)
1767 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1768 rtx target_piece = operand_subword (xtarget, index, 1, mode);
1769 rtx op0_piece = operand_subword_force (xop0, index, mode);
1770 rtx op1_piece = operand_subword_force (xop1, index, mode);
1773 /* Main add/subtract of the input operands. */
1774 x = expand_binop (word_mode, binoptab,
1775 op0_piece, op1_piece,
1776 target_piece, unsignedp, next_methods);
1782 /* Store carry from main add/subtract. */
1783 carry_out = gen_reg_rtx (word_mode);
1784 carry_out = emit_store_flag_force (carry_out,
1785 (binoptab == add_optab
1788 word_mode, 1, normalizep);
1795 /* Add/subtract previous carry to main result. */
1796 newx = expand_binop (word_mode,
1797 normalizep == 1 ? binoptab : otheroptab,
1799 NULL_RTX, 1, next_methods);
1803 /* Get out carry from adding/subtracting carry in. */
1804 rtx carry_tmp = gen_reg_rtx (word_mode);
1805 carry_tmp = emit_store_flag_force (carry_tmp,
1806 (binoptab == add_optab
1809 word_mode, 1, normalizep);
1811 /* Logical-ior the two poss. carry together. */
1812 carry_out = expand_binop (word_mode, ior_optab,
1813 carry_out, carry_tmp,
1814 carry_out, 0, next_methods);
1818 emit_move_insn (target_piece, newx);
1822 if (x != target_piece)
1823 emit_move_insn (target_piece, x);
1826 carry_in = carry_out;
1829 if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
1831 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
1832 || ! rtx_equal_p (target, xtarget))
1834 rtx temp = emit_move_insn (target, xtarget);
1836 set_unique_reg_note (temp,
1838 gen_rtx_fmt_ee (binoptab->code, mode,
1849 delete_insns_since (last);
1852 /* Attempt to synthesize double word multiplies using a sequence of word
1853 mode multiplications. We first attempt to generate a sequence using a
1854 more efficient unsigned widening multiply, and if that fails we then
1855 try using a signed widening multiply. */
1857 if (binoptab == smul_optab
1858 && class == MODE_INT
1859 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1860 && smul_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1861 && add_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1863 rtx product = NULL_RTX;
1865 if (umul_widen_optab->handlers[(int) mode].insn_code
1866 != CODE_FOR_nothing)
1868 product = expand_doubleword_mult (mode, op0, op1, target,
1871 delete_insns_since (last);
1874 if (product == NULL_RTX
1875 && smul_widen_optab->handlers[(int) mode].insn_code
1876 != CODE_FOR_nothing)
1878 product = expand_doubleword_mult (mode, op0, op1, target,
1881 delete_insns_since (last);
1884 if (product != NULL_RTX)
1886 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1888 temp = emit_move_insn (target ? target : product, product);
1889 set_unique_reg_note (temp,
1891 gen_rtx_fmt_ee (MULT, mode,
1899 /* It can't be open-coded in this mode.
1900 Use a library call if one is available and caller says that's ok. */
1902 if (binoptab->handlers[(int) mode].libfunc
1903 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
1907 enum machine_mode op1_mode = mode;
1914 op1_mode = word_mode;
1915 /* Specify unsigned here,
1916 since negative shift counts are meaningless. */
1917 op1x = convert_to_mode (word_mode, op1, 1);
1920 if (GET_MODE (op0) != VOIDmode
1921 && GET_MODE (op0) != mode)
1922 op0 = convert_to_mode (mode, op0, unsignedp);
1924 /* Pass 1 for NO_QUEUE so we don't lose any increments
1925 if the libcall is cse'd or moved. */
1926 value = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
1927 NULL_RTX, LCT_CONST, mode, 2,
1928 op0, mode, op1x, op1_mode);
1930 insns = get_insns ();
1933 target = gen_reg_rtx (mode);
1934 emit_libcall_block (insns, target, value,
1935 gen_rtx_fmt_ee (binoptab->code, mode, op0, op1));
1940 delete_insns_since (last);
1942 /* It can't be done in this mode. Can we do it in a wider mode? */
1944 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
1945 || methods == OPTAB_MUST_WIDEN))
1947 /* Caller says, don't even try. */
1948 delete_insns_since (entry_last);
1952 /* Compute the value of METHODS to pass to recursive calls.
1953 Don't allow widening to be tried recursively. */
1955 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
1957 /* Look for a wider mode of the same class for which it appears we can do
1960 if (CLASS_HAS_WIDER_MODES_P (class))
1962 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1963 wider_mode != VOIDmode;
1964 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1966 if ((binoptab->handlers[(int) wider_mode].insn_code
1967 != CODE_FOR_nothing)
1968 || (methods == OPTAB_LIB
1969 && binoptab->handlers[(int) wider_mode].libfunc))
1971 rtx xop0 = op0, xop1 = op1;
1974 /* For certain integer operations, we need not actually extend
1975 the narrow operands, as long as we will truncate
1976 the results to the same narrowness. */
1978 if ((binoptab == ior_optab || binoptab == and_optab
1979 || binoptab == xor_optab
1980 || binoptab == add_optab || binoptab == sub_optab
1981 || binoptab == smul_optab || binoptab == ashl_optab)
1982 && class == MODE_INT)
1985 xop0 = widen_operand (xop0, wider_mode, mode,
1986 unsignedp, no_extend);
1988 /* The second operand of a shift must always be extended. */
1989 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1990 no_extend && binoptab != ashl_optab);
1992 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1993 unsignedp, methods);
1996 if (class != MODE_INT
1997 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1998 GET_MODE_BITSIZE (wider_mode)))
2001 target = gen_reg_rtx (mode);
2002 convert_move (target, temp, 0);
2006 return gen_lowpart (mode, temp);
2009 delete_insns_since (last);
2014 delete_insns_since (entry_last);
2018 /* Expand a binary operator which has both signed and unsigned forms.
2019 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2022 If we widen unsigned operands, we may use a signed wider operation instead
2023 of an unsigned wider operation, since the result would be the same. */
2026 sign_expand_binop (enum machine_mode mode, optab uoptab, optab soptab,
2027 rtx op0, rtx op1, rtx target, int unsignedp,
2028 enum optab_methods methods)
2031 optab direct_optab = unsignedp ? uoptab : soptab;
2032 struct optab wide_soptab;
2034 /* Do it without widening, if possible. */
2035 temp = expand_binop (mode, direct_optab, op0, op1, target,
2036 unsignedp, OPTAB_DIRECT);
2037 if (temp || methods == OPTAB_DIRECT)
2040 /* Try widening to a signed int. Make a fake signed optab that
2041 hides any signed insn for direct use. */
2042 wide_soptab = *soptab;
2043 wide_soptab.handlers[(int) mode].insn_code = CODE_FOR_nothing;
2044 wide_soptab.handlers[(int) mode].libfunc = 0;
2046 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2047 unsignedp, OPTAB_WIDEN);
2049 /* For unsigned operands, try widening to an unsigned int. */
2050 if (temp == 0 && unsignedp)
2051 temp = expand_binop (mode, uoptab, op0, op1, target,
2052 unsignedp, OPTAB_WIDEN);
2053 if (temp || methods == OPTAB_WIDEN)
2056 /* Use the right width lib call if that exists. */
2057 temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_LIB);
2058 if (temp || methods == OPTAB_LIB)
2061 /* Must widen and use a lib call, use either signed or unsigned. */
2062 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2063 unsignedp, methods);
2067 return expand_binop (mode, uoptab, op0, op1, target,
2068 unsignedp, methods);
2072 /* Generate code to perform an operation specified by UNOPPTAB
2073 on operand OP0, with two results to TARG0 and TARG1.
2074 We assume that the order of the operands for the instruction
2075 is TARG0, TARG1, OP0.
2077 Either TARG0 or TARG1 may be zero, but what that means is that
2078 the result is not actually wanted. We will generate it into
2079 a dummy pseudo-reg and discard it. They may not both be zero.
2081 Returns 1 if this operation can be performed; 0 if not. */
2084 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
2087 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2088 enum mode_class class;
2089 enum machine_mode wider_mode;
2090 rtx entry_last = get_last_insn ();
2093 class = GET_MODE_CLASS (mode);
2096 targ0 = gen_reg_rtx (mode);
2098 targ1 = gen_reg_rtx (mode);
2100 /* Record where to go back to if we fail. */
2101 last = get_last_insn ();
2103 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2105 int icode = (int) unoptab->handlers[(int) mode].insn_code;
2106 enum machine_mode mode0 = insn_data[icode].operand[2].mode;
2110 if (GET_MODE (xop0) != VOIDmode
2111 && GET_MODE (xop0) != mode0)
2112 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2114 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2115 if (!insn_data[icode].operand[2].predicate (xop0, mode0))
2116 xop0 = copy_to_mode_reg (mode0, xop0);
2118 /* We could handle this, but we should always be called with a pseudo
2119 for our targets and all insns should take them as outputs. */
2120 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2121 gcc_assert (insn_data[icode].operand[1].predicate (targ1, mode));
2123 pat = GEN_FCN (icode) (targ0, targ1, xop0);
2130 delete_insns_since (last);
2133 /* It can't be done in this mode. Can we do it in a wider mode? */
2135 if (CLASS_HAS_WIDER_MODES_P (class))
2137 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2138 wider_mode != VOIDmode;
2139 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2141 if (unoptab->handlers[(int) wider_mode].insn_code
2142 != CODE_FOR_nothing)
2144 rtx t0 = gen_reg_rtx (wider_mode);
2145 rtx t1 = gen_reg_rtx (wider_mode);
2146 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2148 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
2150 convert_move (targ0, t0, unsignedp);
2151 convert_move (targ1, t1, unsignedp);
2155 delete_insns_since (last);
2160 delete_insns_since (entry_last);
2164 /* Generate code to perform an operation specified by BINOPTAB
2165 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2166 We assume that the order of the operands for the instruction
2167 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2168 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2170 Either TARG0 or TARG1 may be zero, but what that means is that
2171 the result is not actually wanted. We will generate it into
2172 a dummy pseudo-reg and discard it. They may not both be zero.
2174 Returns 1 if this operation can be performed; 0 if not. */
2177 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
2180 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2181 enum mode_class class;
2182 enum machine_mode wider_mode;
2183 rtx entry_last = get_last_insn ();
2186 class = GET_MODE_CLASS (mode);
2188 /* If we are inside an appropriately-short loop and we are optimizing,
2189 force expensive constants into a register. */
2190 if (CONSTANT_P (op0) && optimize
2191 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
2192 op0 = force_reg (mode, op0);
2194 if (CONSTANT_P (op1) && optimize
2195 && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
2196 op1 = force_reg (mode, op1);
2199 targ0 = gen_reg_rtx (mode);
2201 targ1 = gen_reg_rtx (mode);
2203 /* Record where to go back to if we fail. */
2204 last = get_last_insn ();
2206 if (binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2208 int icode = (int) binoptab->handlers[(int) mode].insn_code;
2209 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2210 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
2212 rtx xop0 = op0, xop1 = op1;
2214 /* In case the insn wants input operands in modes different from
2215 those of the actual operands, convert the operands. It would
2216 seem that we don't need to convert CONST_INTs, but we do, so
2217 that they're properly zero-extended, sign-extended or truncated
2220 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
2221 xop0 = convert_modes (mode0,
2222 GET_MODE (op0) != VOIDmode
2227 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
2228 xop1 = convert_modes (mode1,
2229 GET_MODE (op1) != VOIDmode
2234 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2235 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2236 xop0 = copy_to_mode_reg (mode0, xop0);
2238 if (!insn_data[icode].operand[2].predicate (xop1, mode1))
2239 xop1 = copy_to_mode_reg (mode1, xop1);
2241 /* We could handle this, but we should always be called with a pseudo
2242 for our targets and all insns should take them as outputs. */
2243 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2244 gcc_assert (insn_data[icode].operand[3].predicate (targ1, mode));
2246 pat = GEN_FCN (icode) (targ0, xop0, xop1, targ1);
2253 delete_insns_since (last);
2256 /* It can't be done in this mode. Can we do it in a wider mode? */
2258 if (CLASS_HAS_WIDER_MODES_P (class))
2260 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2261 wider_mode != VOIDmode;
2262 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2264 if (binoptab->handlers[(int) wider_mode].insn_code
2265 != CODE_FOR_nothing)
2267 rtx t0 = gen_reg_rtx (wider_mode);
2268 rtx t1 = gen_reg_rtx (wider_mode);
2269 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2270 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2272 if (expand_twoval_binop (binoptab, cop0, cop1,
2275 convert_move (targ0, t0, unsignedp);
2276 convert_move (targ1, t1, unsignedp);
2280 delete_insns_since (last);
2285 delete_insns_since (entry_last);
2289 /* Expand the two-valued library call indicated by BINOPTAB, but
2290 preserve only one of the values. If TARG0 is non-NULL, the first
2291 value is placed into TARG0; otherwise the second value is placed
2292 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2293 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2294 This routine assumes that the value returned by the library call is
2295 as if the return value was of an integral mode twice as wide as the
2296 mode of OP0. Returns 1 if the call was successful. */
2299 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2300 rtx targ0, rtx targ1, enum rtx_code code)
2302 enum machine_mode mode;
2303 enum machine_mode libval_mode;
2307 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2308 gcc_assert (!targ0 != !targ1);
2310 mode = GET_MODE (op0);
2311 if (!binoptab->handlers[(int) mode].libfunc)
2314 /* The value returned by the library function will have twice as
2315 many bits as the nominal MODE. */
2316 libval_mode = smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode),
2319 libval = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
2320 NULL_RTX, LCT_CONST,
2324 /* Get the part of VAL containing the value that we want. */
2325 libval = simplify_gen_subreg (mode, libval, libval_mode,
2326 targ0 ? 0 : GET_MODE_SIZE (mode));
2327 insns = get_insns ();
2329 /* Move the into the desired location. */
2330 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2331 gen_rtx_fmt_ee (code, mode, op0, op1));
2337 /* Wrapper around expand_unop which takes an rtx code to specify
2338 the operation to perform, not an optab pointer. All other
2339 arguments are the same. */
2341 expand_simple_unop (enum machine_mode mode, enum rtx_code code, rtx op0,
2342 rtx target, int unsignedp)
2344 optab unop = code_to_optab[(int) code];
2347 return expand_unop (mode, unop, op0, target, unsignedp);
2353 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2355 widen_clz (enum machine_mode mode, rtx op0, rtx target)
2357 enum mode_class class = GET_MODE_CLASS (mode);
2358 if (CLASS_HAS_WIDER_MODES_P (class))
2360 enum machine_mode wider_mode;
2361 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2362 wider_mode != VOIDmode;
2363 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2365 if (clz_optab->handlers[(int) wider_mode].insn_code
2366 != CODE_FOR_nothing)
2368 rtx xop0, temp, last;
2370 last = get_last_insn ();
2373 target = gen_reg_rtx (mode);
2374 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2375 temp = expand_unop (wider_mode, clz_optab, xop0, NULL_RTX, true);
2377 temp = expand_binop (wider_mode, sub_optab, temp,
2378 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2379 - GET_MODE_BITSIZE (mode)),
2380 target, true, OPTAB_DIRECT);
2382 delete_insns_since (last);
2391 /* Try calculating (parity x) as (and (popcount x) 1), where
2392 popcount can also be done in a wider mode. */
2394 expand_parity (enum machine_mode mode, rtx op0, rtx target)
2396 enum mode_class class = GET_MODE_CLASS (mode);
2397 if (CLASS_HAS_WIDER_MODES_P (class))
2399 enum machine_mode wider_mode;
2400 for (wider_mode = mode; wider_mode != VOIDmode;
2401 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2403 if (popcount_optab->handlers[(int) wider_mode].insn_code
2404 != CODE_FOR_nothing)
2406 rtx xop0, temp, last;
2408 last = get_last_insn ();
2411 target = gen_reg_rtx (mode);
2412 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2413 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2416 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2417 target, true, OPTAB_DIRECT);
2419 delete_insns_since (last);
2428 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2429 conditions, VAL may already be a SUBREG against which we cannot generate
2430 a further SUBREG. In this case, we expect forcing the value into a
2431 register will work around the situation. */
2434 lowpart_subreg_maybe_copy (enum machine_mode omode, rtx val,
2435 enum machine_mode imode)
2438 ret = lowpart_subreg (omode, val, imode);
2441 val = force_reg (imode, val);
2442 ret = lowpart_subreg (omode, val, imode);
2443 gcc_assert (ret != NULL);
2448 /* Expand a floating point absolute value or negation operation via a
2449 logical operation on the sign bit. */
2452 expand_absneg_bit (enum rtx_code code, enum machine_mode mode,
2453 rtx op0, rtx target)
2455 const struct real_format *fmt;
2456 int bitpos, word, nwords, i;
2457 enum machine_mode imode;
2458 HOST_WIDE_INT hi, lo;
2461 /* The format has to have a simple sign bit. */
2462 fmt = REAL_MODE_FORMAT (mode);
2466 bitpos = fmt->signbit_rw;
2470 /* Don't create negative zeros if the format doesn't support them. */
2471 if (code == NEG && !fmt->has_signed_zero)
2474 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2476 imode = int_mode_for_mode (mode);
2477 if (imode == BLKmode)
2486 if (FLOAT_WORDS_BIG_ENDIAN)
2487 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2489 word = bitpos / BITS_PER_WORD;
2490 bitpos = bitpos % BITS_PER_WORD;
2491 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2494 if (bitpos < HOST_BITS_PER_WIDE_INT)
2497 lo = (HOST_WIDE_INT) 1 << bitpos;
2501 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2507 if (target == 0 || target == op0)
2508 target = gen_reg_rtx (mode);
2514 for (i = 0; i < nwords; ++i)
2516 rtx targ_piece = operand_subword (target, i, 1, mode);
2517 rtx op0_piece = operand_subword_force (op0, i, mode);
2521 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2523 immed_double_const (lo, hi, imode),
2524 targ_piece, 1, OPTAB_LIB_WIDEN);
2525 if (temp != targ_piece)
2526 emit_move_insn (targ_piece, temp);
2529 emit_move_insn (targ_piece, op0_piece);
2532 insns = get_insns ();
2535 temp = gen_rtx_fmt_e (code, mode, copy_rtx (op0));
2536 emit_no_conflict_block (insns, target, op0, NULL_RTX, temp);
2540 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2541 gen_lowpart (imode, op0),
2542 immed_double_const (lo, hi, imode),
2543 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2544 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2546 set_unique_reg_note (get_last_insn (), REG_EQUAL,
2547 gen_rtx_fmt_e (code, mode, copy_rtx (op0)));
2553 /* Generate code to perform an operation specified by UNOPTAB
2554 on operand OP0, with result having machine-mode MODE.
2556 UNSIGNEDP is for the case where we have to widen the operands
2557 to perform the operation. It says to use zero-extension.
2559 If TARGET is nonzero, the value
2560 is generated there, if it is convenient to do so.
2561 In all cases an rtx is returned for the locus of the value;
2562 this may or may not be TARGET. */
2565 expand_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
2568 enum mode_class class;
2569 enum machine_mode wider_mode;
2571 rtx last = get_last_insn ();
2574 class = GET_MODE_CLASS (mode);
2576 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2578 int icode = (int) unoptab->handlers[(int) mode].insn_code;
2579 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2585 temp = gen_reg_rtx (mode);
2587 if (GET_MODE (xop0) != VOIDmode
2588 && GET_MODE (xop0) != mode0)
2589 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2591 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2593 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2594 xop0 = copy_to_mode_reg (mode0, xop0);
2596 if (!insn_data[icode].operand[0].predicate (temp, mode))
2597 temp = gen_reg_rtx (mode);
2599 pat = GEN_FCN (icode) (temp, xop0);
2602 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
2603 && ! add_equal_note (pat, temp, unoptab->code, xop0, NULL_RTX))
2605 delete_insns_since (last);
2606 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
2614 delete_insns_since (last);
2617 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2619 /* Widening clz needs special treatment. */
2620 if (unoptab == clz_optab)
2622 temp = widen_clz (mode, op0, target);
2629 /* We can't widen a bswap. */
2630 if (unoptab == bswap_optab)
2633 if (CLASS_HAS_WIDER_MODES_P (class))
2634 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2635 wider_mode != VOIDmode;
2636 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2638 if (unoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing)
2642 /* For certain operations, we need not actually extend
2643 the narrow operand, as long as we will truncate the
2644 results to the same narrowness. */
2646 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2647 (unoptab == neg_optab
2648 || unoptab == one_cmpl_optab)
2649 && class == MODE_INT);
2651 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2656 if (class != MODE_INT
2657 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
2658 GET_MODE_BITSIZE (wider_mode)))
2661 target = gen_reg_rtx (mode);
2662 convert_move (target, temp, 0);
2666 return gen_lowpart (mode, temp);
2669 delete_insns_since (last);
2673 /* These can be done a word at a time. */
2674 if (unoptab == one_cmpl_optab
2675 && class == MODE_INT
2676 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
2677 && unoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
2682 if (target == 0 || target == op0)
2683 target = gen_reg_rtx (mode);
2687 /* Do the actual arithmetic. */
2688 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
2690 rtx target_piece = operand_subword (target, i, 1, mode);
2691 rtx x = expand_unop (word_mode, unoptab,
2692 operand_subword_force (op0, i, mode),
2693 target_piece, unsignedp);
2695 if (target_piece != x)
2696 emit_move_insn (target_piece, x);
2699 insns = get_insns ();
2702 emit_no_conflict_block (insns, target, op0, NULL_RTX,
2703 gen_rtx_fmt_e (unoptab->code, mode,
2708 if (unoptab->code == NEG)
2710 /* Try negating floating point values by flipping the sign bit. */
2711 if (SCALAR_FLOAT_MODE_P (mode))
2713 temp = expand_absneg_bit (NEG, mode, op0, target);
2718 /* If there is no negation pattern, and we have no negative zero,
2719 try subtracting from zero. */
2720 if (!HONOR_SIGNED_ZEROS (mode))
2722 temp = expand_binop (mode, (unoptab == negv_optab
2723 ? subv_optab : sub_optab),
2724 CONST0_RTX (mode), op0, target,
2725 unsignedp, OPTAB_DIRECT);
2731 /* Try calculating parity (x) as popcount (x) % 2. */
2732 if (unoptab == parity_optab)
2734 temp = expand_parity (mode, op0, target);
2740 /* Now try a library call in this mode. */
2741 if (unoptab->handlers[(int) mode].libfunc)
2745 enum machine_mode outmode = mode;
2747 /* All of these functions return small values. Thus we choose to
2748 have them return something that isn't a double-word. */
2749 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
2750 || unoptab == popcount_optab || unoptab == parity_optab)
2752 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node)));
2756 /* Pass 1 for NO_QUEUE so we don't lose any increments
2757 if the libcall is cse'd or moved. */
2758 value = emit_library_call_value (unoptab->handlers[(int) mode].libfunc,
2759 NULL_RTX, LCT_CONST, outmode,
2761 insns = get_insns ();
2764 target = gen_reg_rtx (outmode);
2765 emit_libcall_block (insns, target, value,
2766 gen_rtx_fmt_e (unoptab->code, outmode, op0));
2771 /* It can't be done in this mode. Can we do it in a wider mode? */
2773 if (CLASS_HAS_WIDER_MODES_P (class))
2775 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2776 wider_mode != VOIDmode;
2777 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2779 if ((unoptab->handlers[(int) wider_mode].insn_code
2780 != CODE_FOR_nothing)
2781 || unoptab->handlers[(int) wider_mode].libfunc)
2785 /* For certain operations, we need not actually extend
2786 the narrow operand, as long as we will truncate the
2787 results to the same narrowness. */
2789 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2790 (unoptab == neg_optab
2791 || unoptab == one_cmpl_optab)
2792 && class == MODE_INT);
2794 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2797 /* If we are generating clz using wider mode, adjust the
2799 if (unoptab == clz_optab && temp != 0)
2800 temp = expand_binop (wider_mode, sub_optab, temp,
2801 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2802 - GET_MODE_BITSIZE (mode)),
2803 target, true, OPTAB_DIRECT);
2807 if (class != MODE_INT)
2810 target = gen_reg_rtx (mode);
2811 convert_move (target, temp, 0);
2815 return gen_lowpart (mode, temp);
2818 delete_insns_since (last);
2823 /* One final attempt at implementing negation via subtraction,
2824 this time allowing widening of the operand. */
2825 if (unoptab->code == NEG && !HONOR_SIGNED_ZEROS (mode))
2828 temp = expand_binop (mode,
2829 unoptab == negv_optab ? subv_optab : sub_optab,
2830 CONST0_RTX (mode), op0,
2831 target, unsignedp, OPTAB_LIB_WIDEN);
2839 /* Emit code to compute the absolute value of OP0, with result to
2840 TARGET if convenient. (TARGET may be 0.) The return value says
2841 where the result actually is to be found.
2843 MODE is the mode of the operand; the mode of the result is
2844 different but can be deduced from MODE.
2849 expand_abs_nojump (enum machine_mode mode, rtx op0, rtx target,
2850 int result_unsignedp)
2855 result_unsignedp = 1;
2857 /* First try to do it with a special abs instruction. */
2858 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
2863 /* For floating point modes, try clearing the sign bit. */
2864 if (SCALAR_FLOAT_MODE_P (mode))
2866 temp = expand_absneg_bit (ABS, mode, op0, target);
2871 /* If we have a MAX insn, we can do this as MAX (x, -x). */
2872 if (smax_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
2873 && !HONOR_SIGNED_ZEROS (mode))
2875 rtx last = get_last_insn ();
2877 temp = expand_unop (mode, neg_optab, op0, NULL_RTX, 0);
2879 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
2885 delete_insns_since (last);
2888 /* If this machine has expensive jumps, we can do integer absolute
2889 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2890 where W is the width of MODE. */
2892 if (GET_MODE_CLASS (mode) == MODE_INT && BRANCH_COST >= 2)
2894 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
2895 size_int (GET_MODE_BITSIZE (mode) - 1),
2898 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
2901 temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab,
2902 temp, extended, target, 0, OPTAB_LIB_WIDEN);
2912 expand_abs (enum machine_mode mode, rtx op0, rtx target,
2913 int result_unsignedp, int safe)
2918 result_unsignedp = 1;
2920 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
2924 /* If that does not win, use conditional jump and negate. */
2926 /* It is safe to use the target if it is the same
2927 as the source if this is also a pseudo register */
2928 if (op0 == target && REG_P (op0)
2929 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
2932 op1 = gen_label_rtx ();
2933 if (target == 0 || ! safe
2934 || GET_MODE (target) != mode
2935 || (MEM_P (target) && MEM_VOLATILE_P (target))
2937 && REGNO (target) < FIRST_PSEUDO_REGISTER))
2938 target = gen_reg_rtx (mode);
2940 emit_move_insn (target, op0);
2943 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
2944 NULL_RTX, NULL_RTX, op1);
2946 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
2949 emit_move_insn (target, op0);
2955 /* A subroutine of expand_copysign, perform the copysign operation using the
2956 abs and neg primitives advertised to exist on the target. The assumption
2957 is that we have a split register file, and leaving op0 in fp registers,
2958 and not playing with subregs so much, will help the register allocator. */
2961 expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target,
2962 int bitpos, bool op0_is_abs)
2964 enum machine_mode imode;
2965 HOST_WIDE_INT hi, lo;
2974 op0 = expand_unop (mode, abs_optab, op0, target, 0);
2981 if (target == NULL_RTX)
2982 target = copy_to_reg (op0);
2984 emit_move_insn (target, op0);
2987 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2989 imode = int_mode_for_mode (mode);
2990 if (imode == BLKmode)
2992 op1 = gen_lowpart (imode, op1);
2997 if (FLOAT_WORDS_BIG_ENDIAN)
2998 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3000 word = bitpos / BITS_PER_WORD;
3001 bitpos = bitpos % BITS_PER_WORD;
3002 op1 = operand_subword_force (op1, word, mode);
3005 if (bitpos < HOST_BITS_PER_WIDE_INT)
3008 lo = (HOST_WIDE_INT) 1 << bitpos;
3012 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
3016 op1 = expand_binop (imode, and_optab, op1,
3017 immed_double_const (lo, hi, imode),
3018 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3020 label = gen_label_rtx ();
3021 emit_cmp_and_jump_insns (op1, const0_rtx, EQ, NULL_RTX, imode, 1, label);
3023 if (GET_CODE (op0) == CONST_DOUBLE)
3024 op0 = simplify_unary_operation (NEG, mode, op0, mode);
3026 op0 = expand_unop (mode, neg_optab, op0, target, 0);
3028 emit_move_insn (target, op0);
3036 /* A subroutine of expand_copysign, perform the entire copysign operation
3037 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3038 is true if op0 is known to have its sign bit clear. */
3041 expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3042 int bitpos, bool op0_is_abs)
3044 enum machine_mode imode;
3045 HOST_WIDE_INT hi, lo;
3046 int word, nwords, i;
3049 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3051 imode = int_mode_for_mode (mode);
3052 if (imode == BLKmode)
3061 if (FLOAT_WORDS_BIG_ENDIAN)
3062 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3064 word = bitpos / BITS_PER_WORD;
3065 bitpos = bitpos % BITS_PER_WORD;
3066 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
3069 if (bitpos < HOST_BITS_PER_WIDE_INT)
3072 lo = (HOST_WIDE_INT) 1 << bitpos;
3076 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
3080 if (target == 0 || target == op0 || target == op1)
3081 target = gen_reg_rtx (mode);
3087 for (i = 0; i < nwords; ++i)
3089 rtx targ_piece = operand_subword (target, i, 1, mode);
3090 rtx op0_piece = operand_subword_force (op0, i, mode);
3095 op0_piece = expand_binop (imode, and_optab, op0_piece,
3096 immed_double_const (~lo, ~hi, imode),
3097 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3099 op1 = expand_binop (imode, and_optab,
3100 operand_subword_force (op1, i, mode),
3101 immed_double_const (lo, hi, imode),
3102 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3104 temp = expand_binop (imode, ior_optab, op0_piece, op1,
3105 targ_piece, 1, OPTAB_LIB_WIDEN);
3106 if (temp != targ_piece)
3107 emit_move_insn (targ_piece, temp);
3110 emit_move_insn (targ_piece, op0_piece);
3113 insns = get_insns ();
3116 emit_no_conflict_block (insns, target, op0, op1, NULL_RTX);
3120 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
3121 immed_double_const (lo, hi, imode),
3122 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3124 op0 = gen_lowpart (imode, op0);
3126 op0 = expand_binop (imode, and_optab, op0,
3127 immed_double_const (~lo, ~hi, imode),
3128 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3130 temp = expand_binop (imode, ior_optab, op0, op1,
3131 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3132 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3138 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3139 scalar floating point mode. Return NULL if we do not know how to
3140 expand the operation inline. */
3143 expand_copysign (rtx op0, rtx op1, rtx target)
3145 enum machine_mode mode = GET_MODE (op0);
3146 const struct real_format *fmt;
3150 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3151 gcc_assert (GET_MODE (op1) == mode);
3153 /* First try to do it with a special instruction. */
3154 temp = expand_binop (mode, copysign_optab, op0, op1,
3155 target, 0, OPTAB_DIRECT);
3159 fmt = REAL_MODE_FORMAT (mode);
3160 if (fmt == NULL || !fmt->has_signed_zero)
3164 if (GET_CODE (op0) == CONST_DOUBLE)
3166 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
3167 op0 = simplify_unary_operation (ABS, mode, op0, mode);
3171 if (fmt->signbit_ro >= 0
3172 && (GET_CODE (op0) == CONST_DOUBLE
3173 || (neg_optab->handlers[mode].insn_code != CODE_FOR_nothing
3174 && abs_optab->handlers[mode].insn_code != CODE_FOR_nothing)))
3176 temp = expand_copysign_absneg (mode, op0, op1, target,
3177 fmt->signbit_ro, op0_is_abs);
3182 if (fmt->signbit_rw < 0)
3184 return expand_copysign_bit (mode, op0, op1, target,
3185 fmt->signbit_rw, op0_is_abs);
3188 /* Generate an instruction whose insn-code is INSN_CODE,
3189 with two operands: an output TARGET and an input OP0.
3190 TARGET *must* be nonzero, and the output is always stored there.
3191 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3192 the value that is stored into TARGET. */
3195 emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code)
3198 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
3203 /* Now, if insn does not accept our operands, put them into pseudos. */
3205 if (!insn_data[icode].operand[1].predicate (op0, mode0))
3206 op0 = copy_to_mode_reg (mode0, op0);
3208 if (!insn_data[icode].operand[0].predicate (temp, GET_MODE (temp)))
3209 temp = gen_reg_rtx (GET_MODE (temp));
3211 pat = GEN_FCN (icode) (temp, op0);
3213 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && code != UNKNOWN)
3214 add_equal_note (pat, temp, code, op0, NULL_RTX);
3219 emit_move_insn (target, temp);
3222 struct no_conflict_data
3224 rtx target, first, insn;
3228 /* Called via note_stores by emit_no_conflict_block and emit_libcall_block.
3229 Set P->must_stay if the currently examined clobber / store has to stay
3230 in the list of insns that constitute the actual no_conflict block /
3233 no_conflict_move_test (rtx dest, rtx set, void *p0)
3235 struct no_conflict_data *p= p0;
3237 /* If this inns directly contributes to setting the target, it must stay. */
3238 if (reg_overlap_mentioned_p (p->target, dest))
3239 p->must_stay = true;
3240 /* If we haven't committed to keeping any other insns in the list yet,
3241 there is nothing more to check. */
3242 else if (p->insn == p->first)
3244 /* If this insn sets / clobbers a register that feeds one of the insns
3245 already in the list, this insn has to stay too. */
3246 else if (reg_overlap_mentioned_p (dest, PATTERN (p->first))
3247 || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest)))
3248 || reg_used_between_p (dest, p->first, p->insn)
3249 /* Likewise if this insn depends on a register set by a previous
3250 insn in the list, or if it sets a result (presumably a hard
3251 register) that is set or clobbered by a previous insn.
3252 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3253 SET_DEST perform the former check on the address, and the latter
3254 check on the MEM. */
3255 || (GET_CODE (set) == SET
3256 && (modified_in_p (SET_SRC (set), p->first)
3257 || modified_in_p (SET_DEST (set), p->first)
3258 || modified_between_p (SET_SRC (set), p->first, p->insn)
3259 || modified_between_p (SET_DEST (set), p->first, p->insn))))
3260 p->must_stay = true;
3263 /* Encapsulate the block starting at FIRST and ending with LAST, which is
3264 logically equivalent to EQUIV, so it gets manipulated as a unit if it
3265 is possible to do so. */
3268 maybe_encapsulate_block (rtx first, rtx last, rtx equiv)
3270 if (!flag_non_call_exceptions || !may_trap_p (equiv))
3272 /* We can't attach the REG_LIBCALL and REG_RETVAL notes when the
3273 encapsulated region would not be in one basic block, i.e. when
3274 there is a control_flow_insn_p insn between FIRST and LAST. */
3275 bool attach_libcall_retval_notes = true;
3276 rtx insn, next = NEXT_INSN (last);
3278 for (insn = first; insn != next; insn = NEXT_INSN (insn))
3279 if (control_flow_insn_p (insn))
3281 attach_libcall_retval_notes = false;
3285 if (attach_libcall_retval_notes)
3287 REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
3289 REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first,
3295 /* Emit code to perform a series of operations on a multi-word quantity, one
3298 Such a block is preceded by a CLOBBER of the output, consists of multiple
3299 insns, each setting one word of the output, and followed by a SET copying
3300 the output to itself.
3302 Each of the insns setting words of the output receives a REG_NO_CONFLICT
3303 note indicating that it doesn't conflict with the (also multi-word)
3304 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
3307 INSNS is a block of code generated to perform the operation, not including
3308 the CLOBBER and final copy. All insns that compute intermediate values
3309 are first emitted, followed by the block as described above.
3311 TARGET, OP0, and OP1 are the output and inputs of the operations,
3312 respectively. OP1 may be zero for a unary operation.
3314 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
3317 If TARGET is not a register, INSNS is simply emitted with no special
3318 processing. Likewise if anything in INSNS is not an INSN or if
3319 there is a libcall block inside INSNS.
3321 The final insn emitted is returned. */
3324 emit_no_conflict_block (rtx insns, rtx target, rtx op0, rtx op1, rtx equiv)
3326 rtx prev, next, first, last, insn;
3328 if (!REG_P (target) || reload_in_progress)
3329 return emit_insn (insns);
3331 for (insn = insns; insn; insn = NEXT_INSN (insn))
3332 if (!NONJUMP_INSN_P (insn)
3333 || find_reg_note (insn, REG_LIBCALL, NULL_RTX))
3334 return emit_insn (insns);
3336 /* First emit all insns that do not store into words of the output and remove
3337 these from the list. */
3338 for (insn = insns; insn; insn = next)
3341 struct no_conflict_data data;
3343 next = NEXT_INSN (insn);
3345 /* Some ports (cris) create a libcall regions at their own. We must
3346 avoid any potential nesting of LIBCALLs. */
3347 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3348 remove_note (insn, note);
3349 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3350 remove_note (insn, note);
3352 data.target = target;
3356 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3357 if (! data.must_stay)
3359 if (PREV_INSN (insn))
3360 NEXT_INSN (PREV_INSN (insn)) = next;
3365 PREV_INSN (next) = PREV_INSN (insn);
3371 prev = get_last_insn ();
3373 /* Now write the CLOBBER of the output, followed by the setting of each
3374 of the words, followed by the final copy. */
3375 if (target != op0 && target != op1)
3376 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
3378 for (insn = insns; insn; insn = next)
3380 next = NEXT_INSN (insn);
3383 if (op1 && REG_P (op1))
3384 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op1,
3387 if (op0 && REG_P (op0))
3388 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op0,
3392 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3393 != CODE_FOR_nothing)
3395 last = emit_move_insn (target, target);
3397 set_unique_reg_note (last, REG_EQUAL, equiv);
3401 last = get_last_insn ();
3403 /* Remove any existing REG_EQUAL note from "last", or else it will
3404 be mistaken for a note referring to the full contents of the
3405 alleged libcall value when found together with the REG_RETVAL
3406 note added below. An existing note can come from an insn
3407 expansion at "last". */
3408 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3412 first = get_insns ();
3414 first = NEXT_INSN (prev);
3416 maybe_encapsulate_block (first, last, equiv);
3421 /* Emit code to make a call to a constant function or a library call.
3423 INSNS is a list containing all insns emitted in the call.
3424 These insns leave the result in RESULT. Our block is to copy RESULT
3425 to TARGET, which is logically equivalent to EQUIV.
3427 We first emit any insns that set a pseudo on the assumption that these are
3428 loading constants into registers; doing so allows them to be safely cse'ed
3429 between blocks. Then we emit all the other insns in the block, followed by
3430 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3431 note with an operand of EQUIV.
3433 Moving assignments to pseudos outside of the block is done to improve
3434 the generated code, but is not required to generate correct code,
3435 hence being unable to move an assignment is not grounds for not making
3436 a libcall block. There are two reasons why it is safe to leave these
3437 insns inside the block: First, we know that these pseudos cannot be
3438 used in generated RTL outside the block since they are created for
3439 temporary purposes within the block. Second, CSE will not record the
3440 values of anything set inside a libcall block, so we know they must
3441 be dead at the end of the block.
3443 Except for the first group of insns (the ones setting pseudos), the
3444 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3447 emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv)
3449 rtx final_dest = target;
3450 rtx prev, next, first, last, insn;
3452 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3453 into a MEM later. Protect the libcall block from this change. */
3454 if (! REG_P (target) || REG_USERVAR_P (target))
3455 target = gen_reg_rtx (GET_MODE (target));
3457 /* If we're using non-call exceptions, a libcall corresponding to an
3458 operation that may trap may also trap. */
3459 if (flag_non_call_exceptions && may_trap_p (equiv))
3461 for (insn = insns; insn; insn = NEXT_INSN (insn))
3464 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3466 if (note != 0 && INTVAL (XEXP (note, 0)) <= 0)
3467 remove_note (insn, note);
3471 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3472 reg note to indicate that this call cannot throw or execute a nonlocal
3473 goto (unless there is already a REG_EH_REGION note, in which case
3475 for (insn = insns; insn; insn = NEXT_INSN (insn))
3478 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3481 XEXP (note, 0) = constm1_rtx;
3483 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx,
3487 /* First emit all insns that set pseudos. Remove them from the list as
3488 we go. Avoid insns that set pseudos which were referenced in previous
3489 insns. These can be generated by move_by_pieces, for example,
3490 to update an address. Similarly, avoid insns that reference things
3491 set in previous insns. */
3493 for (insn = insns; insn; insn = next)
3495 rtx set = single_set (insn);
3498 /* Some ports (cris) create a libcall regions at their own. We must
3499 avoid any potential nesting of LIBCALLs. */
3500 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3501 remove_note (insn, note);
3502 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3503 remove_note (insn, note);
3505 next = NEXT_INSN (insn);
3507 if (set != 0 && REG_P (SET_DEST (set))
3508 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3510 struct no_conflict_data data;
3512 data.target = const0_rtx;
3516 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3517 if (! data.must_stay)
3519 if (PREV_INSN (insn))
3520 NEXT_INSN (PREV_INSN (insn)) = next;
3525 PREV_INSN (next) = PREV_INSN (insn);
3531 /* Some ports use a loop to copy large arguments onto the stack.
3532 Don't move anything outside such a loop. */
3537 prev = get_last_insn ();
3539 /* Write the remaining insns followed by the final copy. */
3541 for (insn = insns; insn; insn = next)
3543 next = NEXT_INSN (insn);
3548 last = emit_move_insn (target, result);
3549 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3550 != CODE_FOR_nothing)
3551 set_unique_reg_note (last, REG_EQUAL, copy_rtx (equiv));
3554 /* Remove any existing REG_EQUAL note from "last", or else it will
3555 be mistaken for a note referring to the full contents of the
3556 libcall value when found together with the REG_RETVAL note added
3557 below. An existing note can come from an insn expansion at
3559 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3562 if (final_dest != target)
3563 emit_move_insn (final_dest, target);
3566 first = get_insns ();
3568 first = NEXT_INSN (prev);
3570 maybe_encapsulate_block (first, last, equiv);
3573 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3574 PURPOSE describes how this comparison will be used. CODE is the rtx
3575 comparison code we will be using.
3577 ??? Actually, CODE is slightly weaker than that. A target is still
3578 required to implement all of the normal bcc operations, but not
3579 required to implement all (or any) of the unordered bcc operations. */
3582 can_compare_p (enum rtx_code code, enum machine_mode mode,
3583 enum can_compare_purpose purpose)
3587 if (cmp_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3589 if (purpose == ccp_jump)
3590 return bcc_gen_fctn[(int) code] != NULL;
3591 else if (purpose == ccp_store_flag)
3592 return setcc_gen_code[(int) code] != CODE_FOR_nothing;
3594 /* There's only one cmov entry point, and it's allowed to fail. */
3597 if (purpose == ccp_jump
3598 && cbranch_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3600 if (purpose == ccp_cmov
3601 && cmov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3603 if (purpose == ccp_store_flag
3604 && cstore_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3606 mode = GET_MODE_WIDER_MODE (mode);
3608 while (mode != VOIDmode);
3613 /* This function is called when we are going to emit a compare instruction that
3614 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3616 *PMODE is the mode of the inputs (in case they are const_int).
3617 *PUNSIGNEDP nonzero says that the operands are unsigned;
3618 this matters if they need to be widened.
3620 If they have mode BLKmode, then SIZE specifies the size of both operands.
3622 This function performs all the setup necessary so that the caller only has
3623 to emit a single comparison insn. This setup can involve doing a BLKmode
3624 comparison or emitting a library call to perform the comparison if no insn
3625 is available to handle it.
3626 The values which are passed in through pointers can be modified; the caller
3627 should perform the comparison on the modified values. Constant
3628 comparisons must have already been folded. */
3631 prepare_cmp_insn (rtx *px, rtx *py, enum rtx_code *pcomparison, rtx size,
3632 enum machine_mode *pmode, int *punsignedp,
3633 enum can_compare_purpose purpose)
3635 enum machine_mode mode = *pmode;
3636 rtx x = *px, y = *py;
3637 int unsignedp = *punsignedp;
3639 /* If we are inside an appropriately-short loop and we are optimizing,
3640 force expensive constants into a register. */
3641 if (CONSTANT_P (x) && optimize
3642 && rtx_cost (x, COMPARE) > COSTS_N_INSNS (1))
3643 x = force_reg (mode, x);
3645 if (CONSTANT_P (y) && optimize
3646 && rtx_cost (y, COMPARE) > COSTS_N_INSNS (1))
3647 y = force_reg (mode, y);
3650 /* Make sure if we have a canonical comparison. The RTL
3651 documentation states that canonical comparisons are required only
3652 for targets which have cc0. */
3653 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
3656 /* Don't let both operands fail to indicate the mode. */
3657 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
3658 x = force_reg (mode, x);
3660 /* Handle all BLKmode compares. */
3662 if (mode == BLKmode)
3664 enum machine_mode cmp_mode, result_mode;
3665 enum insn_code cmp_code;
3670 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
3674 /* Try to use a memory block compare insn - either cmpstr
3675 or cmpmem will do. */
3676 for (cmp_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
3677 cmp_mode != VOIDmode;
3678 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode))
3680 cmp_code = cmpmem_optab[cmp_mode];
3681 if (cmp_code == CODE_FOR_nothing)
3682 cmp_code = cmpstr_optab[cmp_mode];
3683 if (cmp_code == CODE_FOR_nothing)
3684 cmp_code = cmpstrn_optab[cmp_mode];
3685 if (cmp_code == CODE_FOR_nothing)
3688 /* Must make sure the size fits the insn's mode. */
3689 if ((GET_CODE (size) == CONST_INT
3690 && INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode)))
3691 || (GET_MODE_BITSIZE (GET_MODE (size))
3692 > GET_MODE_BITSIZE (cmp_mode)))
3695 result_mode = insn_data[cmp_code].operand[0].mode;
3696 result = gen_reg_rtx (result_mode);
3697 size = convert_to_mode (cmp_mode, size, 1);
3698 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
3702 *pmode = result_mode;
3706 /* Otherwise call a library function, memcmp. */
3707 libfunc = memcmp_libfunc;
3708 length_type = sizetype;
3709 result_mode = TYPE_MODE (integer_type_node);
3710 cmp_mode = TYPE_MODE (length_type);
3711 size = convert_to_mode (TYPE_MODE (length_type), size,
3712 TYPE_UNSIGNED (length_type));
3714 result = emit_library_call_value (libfunc, 0, LCT_PURE_MAKE_BLOCK,
3721 *pmode = result_mode;
3725 /* Don't allow operands to the compare to trap, as that can put the
3726 compare and branch in different basic blocks. */
3727 if (flag_non_call_exceptions)
3730 x = force_reg (mode, x);
3732 y = force_reg (mode, y);
3737 if (can_compare_p (*pcomparison, mode, purpose))
3740 /* Handle a lib call just for the mode we are using. */
3742 if (cmp_optab->handlers[(int) mode].libfunc && !SCALAR_FLOAT_MODE_P (mode))
3744 rtx libfunc = cmp_optab->handlers[(int) mode].libfunc;
3747 /* If we want unsigned, and this mode has a distinct unsigned
3748 comparison routine, use that. */
3749 if (unsignedp && ucmp_optab->handlers[(int) mode].libfunc)
3750 libfunc = ucmp_optab->handlers[(int) mode].libfunc;
3752 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST_MAKE_BLOCK,
3753 word_mode, 2, x, mode, y, mode);
3755 /* There are two kinds of comparison routines. Biased routines
3756 return 0/1/2, and unbiased routines return -1/0/1. Other parts
3757 of gcc expect that the comparison operation is equivalent
3758 to the modified comparison. For signed comparisons compare the
3759 result against 1 in the biased case, and zero in the unbiased
3760 case. For unsigned comparisons always compare against 1 after
3761 biasing the unbiased result by adding 1. This gives us a way to
3767 if (!TARGET_LIB_INT_CMP_BIASED)
3770 *px = plus_constant (result, 1);
3777 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3778 prepare_float_lib_cmp (px, py, pcomparison, pmode, punsignedp);
3781 /* Before emitting an insn with code ICODE, make sure that X, which is going
3782 to be used for operand OPNUM of the insn, is converted from mode MODE to
3783 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3784 that it is accepted by the operand predicate. Return the new value. */
3787 prepare_operand (int icode, rtx x, int opnum, enum machine_mode mode,
3788 enum machine_mode wider_mode, int unsignedp)
3790 if (mode != wider_mode)
3791 x = convert_modes (wider_mode, mode, x, unsignedp);
3793 if (!insn_data[icode].operand[opnum].predicate
3794 (x, insn_data[icode].operand[opnum].mode))
3798 x = copy_to_mode_reg (insn_data[icode].operand[opnum].mode, x);
3804 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3805 we can do the comparison.
3806 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3807 be NULL_RTX which indicates that only a comparison is to be generated. */
3810 emit_cmp_and_jump_insn_1 (rtx x, rtx y, enum machine_mode mode,
3811 enum rtx_code comparison, int unsignedp, rtx label)
3813 rtx test = gen_rtx_fmt_ee (comparison, mode, x, y);
3814 enum mode_class class = GET_MODE_CLASS (mode);
3815 enum machine_mode wider_mode = mode;
3817 /* Try combined insns first. */
3820 enum insn_code icode;
3821 PUT_MODE (test, wider_mode);
3825 icode = cbranch_optab->handlers[(int) wider_mode].insn_code;
3827 if (icode != CODE_FOR_nothing
3828 && insn_data[icode].operand[0].predicate (test, wider_mode))
3830 x = prepare_operand (icode, x, 1, mode, wider_mode, unsignedp);
3831 y = prepare_operand (icode, y, 2, mode, wider_mode, unsignedp);
3832 emit_jump_insn (GEN_FCN (icode) (test, x, y, label));
3837 /* Handle some compares against zero. */
3838 icode = (int) tst_optab->handlers[(int) wider_mode].insn_code;
3839 if (y == CONST0_RTX (mode) && icode != CODE_FOR_nothing)
3841 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
3842 emit_insn (GEN_FCN (icode) (x));
3844 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
3848 /* Handle compares for which there is a directly suitable insn. */
3850 icode = (int) cmp_optab->handlers[(int) wider_mode].insn_code;
3851 if (icode != CODE_FOR_nothing)
3853 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
3854 y = prepare_operand (icode, y, 1, mode, wider_mode, unsignedp);
3855 emit_insn (GEN_FCN (icode) (x, y));
3857 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
3861 if (!CLASS_HAS_WIDER_MODES_P (class))
3864 wider_mode = GET_MODE_WIDER_MODE (wider_mode);
3866 while (wider_mode != VOIDmode);
3871 /* Generate code to compare X with Y so that the condition codes are
3872 set and to jump to LABEL if the condition is true. If X is a
3873 constant and Y is not a constant, then the comparison is swapped to
3874 ensure that the comparison RTL has the canonical form.
3876 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3877 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
3878 the proper branch condition code.
3880 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
3882 MODE is the mode of the inputs (in case they are const_int).
3884 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
3885 be passed unchanged to emit_cmp_insn, then potentially converted into an
3886 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
3889 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
3890 enum machine_mode mode, int unsignedp, rtx label)
3892 rtx op0 = x, op1 = y;
3894 /* Swap operands and condition to ensure canonical RTL. */
3895 if (swap_commutative_operands_p (x, y))
3897 /* If we're not emitting a branch, this means some caller
3902 comparison = swap_condition (comparison);
3906 /* If OP0 is still a constant, then both X and Y must be constants.
3907 Force X into a register to create canonical RTL. */
3908 if (CONSTANT_P (op0))
3909 op0 = force_reg (mode, op0);
3913 comparison = unsigned_condition (comparison);
3915 prepare_cmp_insn (&op0, &op1, &comparison, size, &mode, &unsignedp,
3917 emit_cmp_and_jump_insn_1 (op0, op1, mode, comparison, unsignedp, label);
3920 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
3923 emit_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
3924 enum machine_mode mode, int unsignedp)
3926 emit_cmp_and_jump_insns (x, y, comparison, size, mode, unsignedp, 0);
3929 /* Emit a library call comparison between floating point X and Y.
3930 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
3933 prepare_float_lib_cmp (rtx *px, rtx *py, enum rtx_code *pcomparison,
3934 enum machine_mode *pmode, int *punsignedp)
3936 enum rtx_code comparison = *pcomparison;
3937 enum rtx_code swapped = swap_condition (comparison);
3938 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
3941 enum machine_mode orig_mode = GET_MODE (x);
3942 enum machine_mode mode;
3943 rtx value, target, insns, equiv;
3945 bool reversed_p = false;
3947 for (mode = orig_mode;
3949 mode = GET_MODE_WIDER_MODE (mode))
3951 if ((libfunc = code_to_optab[comparison]->handlers[mode].libfunc))
3954 if ((libfunc = code_to_optab[swapped]->handlers[mode].libfunc))
3957 tmp = x; x = y; y = tmp;
3958 comparison = swapped;
3962 if ((libfunc = code_to_optab[reversed]->handlers[mode].libfunc)
3963 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, reversed))
3965 comparison = reversed;
3971 gcc_assert (mode != VOIDmode);
3973 if (mode != orig_mode)
3975 x = convert_to_mode (mode, x, 0);
3976 y = convert_to_mode (mode, y, 0);
3979 /* Attach a REG_EQUAL note describing the semantics of the libcall to
3980 the RTL. The allows the RTL optimizers to delete the libcall if the
3981 condition can be determined at compile-time. */
3982 if (comparison == UNORDERED)
3984 rtx temp = simplify_gen_relational (NE, word_mode, mode, x, x);
3985 equiv = simplify_gen_relational (NE, word_mode, mode, y, y);
3986 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
3987 temp, const_true_rtx, equiv);
3991 equiv = simplify_gen_relational (comparison, word_mode, mode, x, y);
3992 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
3994 rtx true_rtx, false_rtx;
3999 true_rtx = const0_rtx;
4000 false_rtx = const_true_rtx;
4004 true_rtx = const_true_rtx;
4005 false_rtx = const0_rtx;
4009 true_rtx = const1_rtx;
4010 false_rtx = const0_rtx;
4014 true_rtx = const0_rtx;
4015 false_rtx = constm1_rtx;
4019 true_rtx = constm1_rtx;
4020 false_rtx = const0_rtx;
4024 true_rtx = const0_rtx;
4025 false_rtx = const1_rtx;
4031 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
4032 equiv, true_rtx, false_rtx);
4037 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4038 word_mode, 2, x, mode, y, mode);
4039 insns = get_insns ();
4042 target = gen_reg_rtx (word_mode);
4043 emit_libcall_block (insns, target, value, equiv);
4045 if (comparison == UNORDERED
4046 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4047 comparison = reversed_p ? EQ : NE;
4052 *pcomparison = comparison;
4056 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4059 emit_indirect_jump (rtx loc)
4061 if (!insn_data[(int) CODE_FOR_indirect_jump].operand[0].predicate
4063 loc = copy_to_mode_reg (Pmode, loc);
4065 emit_jump_insn (gen_indirect_jump (loc));
4069 #ifdef HAVE_conditional_move
4071 /* Emit a conditional move instruction if the machine supports one for that
4072 condition and machine mode.
4074 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4075 the mode to use should they be constants. If it is VOIDmode, they cannot
4078 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4079 should be stored there. MODE is the mode to use should they be constants.
4080 If it is VOIDmode, they cannot both be constants.
4082 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4083 is not supported. */
4086 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
4087 enum machine_mode cmode, rtx op2, rtx op3,
4088 enum machine_mode mode, int unsignedp)
4090 rtx tem, subtarget, comparison, insn;
4091 enum insn_code icode;
4092 enum rtx_code reversed;
4094 /* If one operand is constant, make it the second one. Only do this
4095 if the other operand is not constant as well. */
4097 if (swap_commutative_operands_p (op0, op1))
4102 code = swap_condition (code);
4105 /* get_condition will prefer to generate LT and GT even if the old
4106 comparison was against zero, so undo that canonicalization here since
4107 comparisons against zero are cheaper. */
4108 if (code == LT && op1 == const1_rtx)
4109 code = LE, op1 = const0_rtx;
4110 else if (code == GT && op1 == constm1_rtx)
4111 code = GE, op1 = const0_rtx;
4113 if (cmode == VOIDmode)
4114 cmode = GET_MODE (op0);
4116 if (swap_commutative_operands_p (op2, op3)
4117 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4126 if (mode == VOIDmode)
4127 mode = GET_MODE (op2);
4129 icode = movcc_gen_code[mode];
4131 if (icode == CODE_FOR_nothing)
4135 target = gen_reg_rtx (mode);
4139 /* If the insn doesn't accept these operands, put them in pseudos. */
4141 if (!insn_data[icode].operand[0].predicate
4142 (subtarget, insn_data[icode].operand[0].mode))
4143 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4145 if (!insn_data[icode].operand[2].predicate
4146 (op2, insn_data[icode].operand[2].mode))
4147 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4149 if (!insn_data[icode].operand[3].predicate
4150 (op3, insn_data[icode].operand[3].mode))
4151 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4153 /* Everything should now be in the suitable form, so emit the compare insn
4154 and then the conditional move. */
4157 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4159 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4160 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4161 return NULL and let the caller figure out how best to deal with this
4163 if (GET_CODE (comparison) != code)
4166 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4168 /* If that failed, then give up. */
4174 if (subtarget != target)
4175 convert_move (target, subtarget, 0);
4180 /* Return nonzero if a conditional move of mode MODE is supported.
4182 This function is for combine so it can tell whether an insn that looks
4183 like a conditional move is actually supported by the hardware. If we
4184 guess wrong we lose a bit on optimization, but that's it. */
4185 /* ??? sparc64 supports conditionally moving integers values based on fp
4186 comparisons, and vice versa. How do we handle them? */
4189 can_conditionally_move_p (enum machine_mode mode)
4191 if (movcc_gen_code[mode] != CODE_FOR_nothing)
4197 #endif /* HAVE_conditional_move */
4199 /* Emit a conditional addition instruction if the machine supports one for that
4200 condition and machine mode.
4202 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4203 the mode to use should they be constants. If it is VOIDmode, they cannot
4206 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
4207 should be stored there. MODE is the mode to use should they be constants.
4208 If it is VOIDmode, they cannot both be constants.
4210 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4211 is not supported. */
4214 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
4215 enum machine_mode cmode, rtx op2, rtx op3,
4216 enum machine_mode mode, int unsignedp)
4218 rtx tem, subtarget, comparison, insn;
4219 enum insn_code icode;
4220 enum rtx_code reversed;
4222 /* If one operand is constant, make it the second one. Only do this
4223 if the other operand is not constant as well. */
4225 if (swap_commutative_operands_p (op0, op1))
4230 code = swap_condition (code);
4233 /* get_condition will prefer to generate LT and GT even if the old
4234 comparison was against zero, so undo that canonicalization here since
4235 comparisons against zero are cheaper. */
4236 if (code == LT && op1 == const1_rtx)
4237 code = LE, op1 = const0_rtx;
4238 else if (code == GT && op1 == constm1_rtx)
4239 code = GE, op1 = const0_rtx;
4241 if (cmode == VOIDmode)
4242 cmode = GET_MODE (op0);
4244 if (swap_commutative_operands_p (op2, op3)
4245 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4254 if (mode == VOIDmode)
4255 mode = GET_MODE (op2);
4257 icode = addcc_optab->handlers[(int) mode].insn_code;
4259 if (icode == CODE_FOR_nothing)
4263 target = gen_reg_rtx (mode);
4265 /* If the insn doesn't accept these operands, put them in pseudos. */
4267 if (!insn_data[icode].operand[0].predicate
4268 (target, insn_data[icode].operand[0].mode))
4269 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4273 if (!insn_data[icode].operand[2].predicate
4274 (op2, insn_data[icode].operand[2].mode))
4275 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4277 if (!insn_data[icode].operand[3].predicate
4278 (op3, insn_data[icode].operand[3].mode))
4279 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4281 /* Everything should now be in the suitable form, so emit the compare insn
4282 and then the conditional move. */
4285 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4287 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4288 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4289 return NULL and let the caller figure out how best to deal with this
4291 if (GET_CODE (comparison) != code)
4294 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4296 /* If that failed, then give up. */
4302 if (subtarget != target)
4303 convert_move (target, subtarget, 0);
4308 /* These functions attempt to generate an insn body, rather than
4309 emitting the insn, but if the gen function already emits them, we
4310 make no attempt to turn them back into naked patterns. */
4312 /* Generate and return an insn body to add Y to X. */
4315 gen_add2_insn (rtx x, rtx y)
4317 int icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4319 gcc_assert (insn_data[icode].operand[0].predicate
4320 (x, insn_data[icode].operand[0].mode));
4321 gcc_assert (insn_data[icode].operand[1].predicate
4322 (x, insn_data[icode].operand[1].mode));
4323 gcc_assert (insn_data[icode].operand[2].predicate
4324 (y, insn_data[icode].operand[2].mode));
4326 return GEN_FCN (icode) (x, x, y);
4329 /* Generate and return an insn body to add r1 and c,
4330 storing the result in r0. */
4332 gen_add3_insn (rtx r0, rtx r1, rtx c)
4334 int icode = (int) add_optab->handlers[(int) GET_MODE (r0)].insn_code;
4336 if (icode == CODE_FOR_nothing
4337 || !(insn_data[icode].operand[0].predicate
4338 (r0, insn_data[icode].operand[0].mode))
4339 || !(insn_data[icode].operand[1].predicate
4340 (r1, insn_data[icode].operand[1].mode))
4341 || !(insn_data[icode].operand[2].predicate
4342 (c, insn_data[icode].operand[2].mode)))
4345 return GEN_FCN (icode) (r0, r1, c);
4349 have_add2_insn (rtx x, rtx y)
4353 gcc_assert (GET_MODE (x) != VOIDmode);
4355 icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4357 if (icode == CODE_FOR_nothing)
4360 if (!(insn_data[icode].operand[0].predicate
4361 (x, insn_data[icode].operand[0].mode))
4362 || !(insn_data[icode].operand[1].predicate
4363 (x, insn_data[icode].operand[1].mode))
4364 || !(insn_data[icode].operand[2].predicate
4365 (y, insn_data[icode].operand[2].mode)))
4371 /* Generate and return an insn body to subtract Y from X. */
4374 gen_sub2_insn (rtx x, rtx y)
4376 int icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4378 gcc_assert (insn_data[icode].operand[0].predicate
4379 (x, insn_data[icode].operand[0].mode));
4380 gcc_assert (insn_data[icode].operand[1].predicate
4381 (x, insn_data[icode].operand[1].mode));
4382 gcc_assert (insn_data[icode].operand[2].predicate
4383 (y, insn_data[icode].operand[2].mode));
4385 return GEN_FCN (icode) (x, x, y);
4388 /* Generate and return an insn body to subtract r1 and c,
4389 storing the result in r0. */
4391 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4393 int icode = (int) sub_optab->handlers[(int) GET_MODE (r0)].insn_code;
4395 if (icode == CODE_FOR_nothing
4396 || !(insn_data[icode].operand[0].predicate
4397 (r0, insn_data[icode].operand[0].mode))
4398 || !(insn_data[icode].operand[1].predicate
4399 (r1, insn_data[icode].operand[1].mode))
4400 || !(insn_data[icode].operand[2].predicate
4401 (c, insn_data[icode].operand[2].mode)))
4404 return GEN_FCN (icode) (r0, r1, c);
4408 have_sub2_insn (rtx x, rtx y)
4412 gcc_assert (GET_MODE (x) != VOIDmode);
4414 icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4416 if (icode == CODE_FOR_nothing)
4419 if (!(insn_data[icode].operand[0].predicate
4420 (x, insn_data[icode].operand[0].mode))
4421 || !(insn_data[icode].operand[1].predicate
4422 (x, insn_data[icode].operand[1].mode))
4423 || !(insn_data[icode].operand[2].predicate
4424 (y, insn_data[icode].operand[2].mode)))
4430 /* Generate the body of an instruction to copy Y into X.
4431 It may be a list of insns, if one insn isn't enough. */
4434 gen_move_insn (rtx x, rtx y)
4439 emit_move_insn_1 (x, y);
4445 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4446 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4447 no such operation exists, CODE_FOR_nothing will be returned. */
4450 can_extend_p (enum machine_mode to_mode, enum machine_mode from_mode,
4454 #ifdef HAVE_ptr_extend
4456 return CODE_FOR_ptr_extend;
4459 tab = unsignedp ? zext_optab : sext_optab;
4460 return tab->handlers[to_mode][from_mode].insn_code;
4463 /* Generate the body of an insn to extend Y (with mode MFROM)
4464 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4467 gen_extend_insn (rtx x, rtx y, enum machine_mode mto,
4468 enum machine_mode mfrom, int unsignedp)
4470 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
4471 return GEN_FCN (icode) (x, y);
4474 /* can_fix_p and can_float_p say whether the target machine
4475 can directly convert a given fixed point type to
4476 a given floating point type, or vice versa.
4477 The returned value is the CODE_FOR_... value to use,
4478 or CODE_FOR_nothing if these modes cannot be directly converted.
4480 *TRUNCP_PTR is set to 1 if it is necessary to output
4481 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4483 static enum insn_code
4484 can_fix_p (enum machine_mode fixmode, enum machine_mode fltmode,
4485 int unsignedp, int *truncp_ptr)
4488 enum insn_code icode;
4490 tab = unsignedp ? ufixtrunc_optab : sfixtrunc_optab;
4491 icode = tab->handlers[fixmode][fltmode].insn_code;
4492 if (icode != CODE_FOR_nothing)
4498 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4499 for this to work. We need to rework the fix* and ftrunc* patterns
4500 and documentation. */
4501 tab = unsignedp ? ufix_optab : sfix_optab;
4502 icode = tab->handlers[fixmode][fltmode].insn_code;
4503 if (icode != CODE_FOR_nothing
4504 && ftrunc_optab->handlers[fltmode].insn_code != CODE_FOR_nothing)
4511 return CODE_FOR_nothing;
4514 static enum insn_code
4515 can_float_p (enum machine_mode fltmode, enum machine_mode fixmode,
4520 tab = unsignedp ? ufloat_optab : sfloat_optab;
4521 return tab->handlers[fltmode][fixmode].insn_code;
4524 /* Generate code to convert FROM to floating point
4525 and store in TO. FROM must be fixed point and not VOIDmode.
4526 UNSIGNEDP nonzero means regard FROM as unsigned.
4527 Normally this is done by correcting the final value
4528 if it is negative. */
4531 expand_float (rtx to, rtx from, int unsignedp)
4533 enum insn_code icode;
4535 enum machine_mode fmode, imode;
4536 bool can_do_signed = false;
4538 /* Crash now, because we won't be able to decide which mode to use. */
4539 gcc_assert (GET_MODE (from) != VOIDmode);
4541 /* Look for an insn to do the conversion. Do it in the specified
4542 modes if possible; otherwise convert either input, output or both to
4543 wider mode. If the integer mode is wider than the mode of FROM,
4544 we can do the conversion signed even if the input is unsigned. */
4546 for (fmode = GET_MODE (to); fmode != VOIDmode;
4547 fmode = GET_MODE_WIDER_MODE (fmode))
4548 for (imode = GET_MODE (from); imode != VOIDmode;
4549 imode = GET_MODE_WIDER_MODE (imode))
4551 int doing_unsigned = unsignedp;
4553 if (fmode != GET_MODE (to)
4554 && significand_size (fmode) < GET_MODE_BITSIZE (GET_MODE (from)))
4557 icode = can_float_p (fmode, imode, unsignedp);
4558 if (icode == CODE_FOR_nothing && unsignedp)
4560 enum insn_code scode = can_float_p (fmode, imode, 0);
4561 if (scode != CODE_FOR_nothing)
4562 can_do_signed = true;
4563 if (imode != GET_MODE (from))
4564 icode = scode, doing_unsigned = 0;
4567 if (icode != CODE_FOR_nothing)
4569 if (imode != GET_MODE (from))
4570 from = convert_to_mode (imode, from, unsignedp);
4572 if (fmode != GET_MODE (to))
4573 target = gen_reg_rtx (fmode);
4575 emit_unop_insn (icode, target, from,
4576 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
4579 convert_move (to, target, 0);
4584 /* Unsigned integer, and no way to convert directly. For binary
4585 floating point modes, convert as signed, then conditionally adjust
4587 if (unsignedp && can_do_signed && !DECIMAL_FLOAT_MODE_P (GET_MODE (to)))
4589 rtx label = gen_label_rtx ();
4591 REAL_VALUE_TYPE offset;
4593 /* Look for a usable floating mode FMODE wider than the source and at
4594 least as wide as the target. Using FMODE will avoid rounding woes
4595 with unsigned values greater than the signed maximum value. */
4597 for (fmode = GET_MODE (to); fmode != VOIDmode;
4598 fmode = GET_MODE_WIDER_MODE (fmode))
4599 if (GET_MODE_BITSIZE (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
4600 && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
4603 if (fmode == VOIDmode)
4605 /* There is no such mode. Pretend the target is wide enough. */
4606 fmode = GET_MODE (to);
4608 /* Avoid double-rounding when TO is narrower than FROM. */
4609 if ((significand_size (fmode) + 1)
4610 < GET_MODE_BITSIZE (GET_MODE (from)))
4613 rtx neglabel = gen_label_rtx ();
4615 /* Don't use TARGET if it isn't a register, is a hard register,
4616 or is the wrong mode. */
4618 || REGNO (target) < FIRST_PSEUDO_REGISTER
4619 || GET_MODE (target) != fmode)
4620 target = gen_reg_rtx (fmode);
4622 imode = GET_MODE (from);
4623 do_pending_stack_adjust ();
4625 /* Test whether the sign bit is set. */
4626 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
4629 /* The sign bit is not set. Convert as signed. */
4630 expand_float (target, from, 0);
4631 emit_jump_insn (gen_jump (label));
4634 /* The sign bit is set.
4635 Convert to a usable (positive signed) value by shifting right
4636 one bit, while remembering if a nonzero bit was shifted
4637 out; i.e., compute (from & 1) | (from >> 1). */
4639 emit_label (neglabel);
4640 temp = expand_binop (imode, and_optab, from, const1_rtx,
4641 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4642 temp1 = expand_shift (RSHIFT_EXPR, imode, from, integer_one_node,
4644 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
4646 expand_float (target, temp, 0);
4648 /* Multiply by 2 to undo the shift above. */
4649 temp = expand_binop (fmode, add_optab, target, target,
4650 target, 0, OPTAB_LIB_WIDEN);
4652 emit_move_insn (target, temp);
4654 do_pending_stack_adjust ();
4660 /* If we are about to do some arithmetic to correct for an
4661 unsigned operand, do it in a pseudo-register. */
4663 if (GET_MODE (to) != fmode
4664 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
4665 target = gen_reg_rtx (fmode);
4667 /* Convert as signed integer to floating. */
4668 expand_float (target, from, 0);
4670 /* If FROM is negative (and therefore TO is negative),
4671 correct its value by 2**bitwidth. */
4673 do_pending_stack_adjust ();
4674 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from),
4678 real_2expN (&offset, GET_MODE_BITSIZE (GET_MODE (from)));
4679 temp = expand_binop (fmode, add_optab, target,
4680 CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode),
4681 target, 0, OPTAB_LIB_WIDEN);
4683 emit_move_insn (target, temp);
4685 do_pending_stack_adjust ();
4690 /* No hardware instruction available; call a library routine. */
4695 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
4697 if (GET_MODE_SIZE (GET_MODE (from)) < GET_MODE_SIZE (SImode))
4698 from = convert_to_mode (SImode, from, unsignedp);
4700 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
4701 gcc_assert (libfunc);
4705 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4706 GET_MODE (to), 1, from,
4708 insns = get_insns ();
4711 emit_libcall_block (insns, target, value,
4712 gen_rtx_FLOAT (GET_MODE (to), from));
4717 /* Copy result to requested destination
4718 if we have been computing in a temp location. */
4722 if (GET_MODE (target) == GET_MODE (to))
4723 emit_move_insn (to, target);
4725 convert_move (to, target, 0);
4729 /* Generate code to convert FROM to fixed point and store in TO. FROM
4730 must be floating point. */
4733 expand_fix (rtx to, rtx from, int unsignedp)
4735 enum insn_code icode;
4737 enum machine_mode fmode, imode;
4740 /* We first try to find a pair of modes, one real and one integer, at
4741 least as wide as FROM and TO, respectively, in which we can open-code
4742 this conversion. If the integer mode is wider than the mode of TO,
4743 we can do the conversion either signed or unsigned. */
4745 for (fmode = GET_MODE (from); fmode != VOIDmode;
4746 fmode = GET_MODE_WIDER_MODE (fmode))
4747 for (imode = GET_MODE (to); imode != VOIDmode;
4748 imode = GET_MODE_WIDER_MODE (imode))
4750 int doing_unsigned = unsignedp;
4752 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
4753 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
4754 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
4756 if (icode != CODE_FOR_nothing)
4758 if (fmode != GET_MODE (from))
4759 from = convert_to_mode (fmode, from, 0);
4763 rtx temp = gen_reg_rtx (GET_MODE (from));
4764 from = expand_unop (GET_MODE (from), ftrunc_optab, from,
4768 if (imode != GET_MODE (to))
4769 target = gen_reg_rtx (imode);
4771 emit_unop_insn (icode, target, from,
4772 doing_unsigned ? UNSIGNED_FIX : FIX);
4774 convert_move (to, target, unsignedp);
4779 /* For an unsigned conversion, there is one more way to do it.
4780 If we have a signed conversion, we generate code that compares
4781 the real value to the largest representable positive number. If if
4782 is smaller, the conversion is done normally. Otherwise, subtract
4783 one plus the highest signed number, convert, and add it back.
4785 We only need to check all real modes, since we know we didn't find
4786 anything with a wider integer mode.
4788 This code used to extend FP value into mode wider than the destination.
4789 This is not needed. Consider, for instance conversion from SFmode
4792 The hot path through the code is dealing with inputs smaller than 2^63
4793 and doing just the conversion, so there is no bits to lose.
4795 In the other path we know the value is positive in the range 2^63..2^64-1
4796 inclusive. (as for other imput overflow happens and result is undefined)
4797 So we know that the most important bit set in mantissa corresponds to
4798 2^63. The subtraction of 2^63 should not generate any rounding as it
4799 simply clears out that bit. The rest is trivial. */
4801 if (unsignedp && GET_MODE_BITSIZE (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
4802 for (fmode = GET_MODE (from); fmode != VOIDmode;
4803 fmode = GET_MODE_WIDER_MODE (fmode))
4804 if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0,
4808 REAL_VALUE_TYPE offset;
4809 rtx limit, lab1, lab2, insn;
4811 bitsize = GET_MODE_BITSIZE (GET_MODE (to));
4812 real_2expN (&offset, bitsize - 1);
4813 limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode);
4814 lab1 = gen_label_rtx ();
4815 lab2 = gen_label_rtx ();
4817 if (fmode != GET_MODE (from))
4818 from = convert_to_mode (fmode, from, 0);
4820 /* See if we need to do the subtraction. */
4821 do_pending_stack_adjust ();
4822 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from),
4825 /* If not, do the signed "fix" and branch around fixup code. */
4826 expand_fix (to, from, 0);
4827 emit_jump_insn (gen_jump (lab2));
4830 /* Otherwise, subtract 2**(N-1), convert to signed number,
4831 then add 2**(N-1). Do the addition using XOR since this
4832 will often generate better code. */
4834 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
4835 NULL_RTX, 0, OPTAB_LIB_WIDEN);
4836 expand_fix (to, target, 0);
4837 target = expand_binop (GET_MODE (to), xor_optab, to,
4839 ((HOST_WIDE_INT) 1 << (bitsize - 1),
4841 to, 1, OPTAB_LIB_WIDEN);
4844 emit_move_insn (to, target);
4848 if (mov_optab->handlers[(int) GET_MODE (to)].insn_code
4849 != CODE_FOR_nothing)
4851 /* Make a place for a REG_NOTE and add it. */
4852 insn = emit_move_insn (to, to);
4853 set_unique_reg_note (insn,
4855 gen_rtx_fmt_e (UNSIGNED_FIX,
4863 /* We can't do it with an insn, so use a library call. But first ensure
4864 that the mode of TO is at least as wide as SImode, since those are the
4865 only library calls we know about. */
4867 if (GET_MODE_SIZE (GET_MODE (to)) < GET_MODE_SIZE (SImode))
4869 target = gen_reg_rtx (SImode);
4871 expand_fix (target, from, unsignedp);
4879 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
4880 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
4881 gcc_assert (libfunc);
4885 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4886 GET_MODE (to), 1, from,
4888 insns = get_insns ();
4891 emit_libcall_block (insns, target, value,
4892 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
4893 GET_MODE (to), from));
4898 if (GET_MODE (to) == GET_MODE (target))
4899 emit_move_insn (to, target);
4901 convert_move (to, target, 0);
4905 /* Generate code to convert FROM to fixed point and store in TO. FROM
4906 must be floating point, TO must be signed. Use the conversion optab
4907 TAB to do the conversion. */
4910 expand_sfix_optab (rtx to, rtx from, convert_optab tab)
4912 enum insn_code icode;
4914 enum machine_mode fmode, imode;
4916 /* We first try to find a pair of modes, one real and one integer, at
4917 least as wide as FROM and TO, respectively, in which we can open-code
4918 this conversion. If the integer mode is wider than the mode of TO,
4919 we can do the conversion either signed or unsigned. */
4921 for (fmode = GET_MODE (from); fmode != VOIDmode;
4922 fmode = GET_MODE_WIDER_MODE (fmode))
4923 for (imode = GET_MODE (to); imode != VOIDmode;
4924 imode = GET_MODE_WIDER_MODE (imode))
4926 icode = tab->handlers[imode][fmode].insn_code;
4927 if (icode != CODE_FOR_nothing)
4929 if (fmode != GET_MODE (from))
4930 from = convert_to_mode (fmode, from, 0);
4932 if (imode != GET_MODE (to))
4933 target = gen_reg_rtx (imode);
4935 emit_unop_insn (icode, target, from, UNKNOWN);
4937 convert_move (to, target, 0);
4945 /* Report whether we have an instruction to perform the operation
4946 specified by CODE on operands of mode MODE. */
4948 have_insn_for (enum rtx_code code, enum machine_mode mode)
4950 return (code_to_optab[(int) code] != 0
4951 && (code_to_optab[(int) code]->handlers[(int) mode].insn_code
4952 != CODE_FOR_nothing));
4955 /* Create a blank optab. */
4960 optab op = ggc_alloc (sizeof (struct optab));
4961 for (i = 0; i < NUM_MACHINE_MODES; i++)
4963 op->handlers[i].insn_code = CODE_FOR_nothing;
4964 op->handlers[i].libfunc = 0;
4970 static convert_optab
4971 new_convert_optab (void)
4974 convert_optab op = ggc_alloc (sizeof (struct convert_optab));
4975 for (i = 0; i < NUM_MACHINE_MODES; i++)
4976 for (j = 0; j < NUM_MACHINE_MODES; j++)
4978 op->handlers[i][j].insn_code = CODE_FOR_nothing;
4979 op->handlers[i][j].libfunc = 0;
4984 /* Same, but fill in its code as CODE, and write it into the
4985 code_to_optab table. */
4987 init_optab (enum rtx_code code)
4989 optab op = new_optab ();
4991 code_to_optab[(int) code] = op;
4995 /* Same, but fill in its code as CODE, and do _not_ write it into
4996 the code_to_optab table. */
4998 init_optabv (enum rtx_code code)
5000 optab op = new_optab ();
5005 /* Conversion optabs never go in the code_to_optab table. */
5006 static inline convert_optab
5007 init_convert_optab (enum rtx_code code)
5009 convert_optab op = new_convert_optab ();
5014 /* Initialize the libfunc fields of an entire group of entries in some
5015 optab. Each entry is set equal to a string consisting of a leading
5016 pair of underscores followed by a generic operation name followed by
5017 a mode name (downshifted to lowercase) followed by a single character
5018 representing the number of operands for the given operation (which is
5019 usually one of the characters '2', '3', or '4').
5021 OPTABLE is the table in which libfunc fields are to be initialized.
5022 FIRST_MODE is the first machine mode index in the given optab to
5024 LAST_MODE is the last machine mode index in the given optab to
5026 OPNAME is the generic (string) name of the operation.
5027 SUFFIX is the character which specifies the number of operands for
5028 the given generic operation.
5032 init_libfuncs (optab optable, int first_mode, int last_mode,
5033 const char *opname, int suffix)
5036 unsigned opname_len = strlen (opname);
5038 for (mode = first_mode; (int) mode <= (int) last_mode;
5039 mode = (enum machine_mode) ((int) mode + 1))
5041 const char *mname = GET_MODE_NAME (mode);
5042 unsigned mname_len = strlen (mname);
5043 char *libfunc_name = alloca (2 + opname_len + mname_len + 1 + 1);
5050 for (q = opname; *q; )
5052 for (q = mname; *q; q++)
5053 *p++ = TOLOWER (*q);
5057 optable->handlers[(int) mode].libfunc
5058 = init_one_libfunc (ggc_alloc_string (libfunc_name, p - libfunc_name));
5062 /* Initialize the libfunc fields of an entire group of entries in some
5063 optab which correspond to all integer mode operations. The parameters
5064 have the same meaning as similarly named ones for the `init_libfuncs'
5065 routine. (See above). */
5068 init_integral_libfuncs (optab optable, const char *opname, int suffix)
5070 int maxsize = 2*BITS_PER_WORD;
5071 if (maxsize < LONG_LONG_TYPE_SIZE)
5072 maxsize = LONG_LONG_TYPE_SIZE;
5073 init_libfuncs (optable, word_mode,
5074 mode_for_size (maxsize, MODE_INT, 0),
5078 /* Initialize the libfunc fields of an entire group of entries in some
5079 optab which correspond to all real mode operations. The parameters
5080 have the same meaning as similarly named ones for the `init_libfuncs'
5081 routine. (See above). */
5084 init_floating_libfuncs (optab optable, const char *opname, int suffix)
5086 init_libfuncs (optable, MIN_MODE_FLOAT, MAX_MODE_FLOAT, opname, suffix);
5087 init_libfuncs (optable, MIN_MODE_DECIMAL_FLOAT, MAX_MODE_DECIMAL_FLOAT,
5091 /* Initialize the libfunc fields of an entire group of entries of an
5092 inter-mode-class conversion optab. The string formation rules are
5093 similar to the ones for init_libfuncs, above, but instead of having
5094 a mode name and an operand count these functions have two mode names
5095 and no operand count. */
5097 init_interclass_conv_libfuncs (convert_optab tab, const char *opname,
5098 enum mode_class from_class,
5099 enum mode_class to_class)
5101 enum machine_mode first_from_mode = GET_CLASS_NARROWEST_MODE (from_class);
5102 enum machine_mode first_to_mode = GET_CLASS_NARROWEST_MODE (to_class);
5103 size_t opname_len = strlen (opname);
5104 size_t max_mname_len = 0;
5106 enum machine_mode fmode, tmode;
5107 const char *fname, *tname;
5109 char *libfunc_name, *suffix;
5112 for (fmode = first_from_mode;
5114 fmode = GET_MODE_WIDER_MODE (fmode))
5115 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (fmode)));
5117 for (tmode = first_to_mode;
5119 tmode = GET_MODE_WIDER_MODE (tmode))
5120 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (tmode)));
5122 libfunc_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
5123 libfunc_name[0] = '_';
5124 libfunc_name[1] = '_';
5125 memcpy (&libfunc_name[2], opname, opname_len);
5126 suffix = libfunc_name + opname_len + 2;
5128 for (fmode = first_from_mode; fmode != VOIDmode;
5129 fmode = GET_MODE_WIDER_MODE (fmode))
5130 for (tmode = first_to_mode; tmode != VOIDmode;
5131 tmode = GET_MODE_WIDER_MODE (tmode))
5133 fname = GET_MODE_NAME (fmode);
5134 tname = GET_MODE_NAME (tmode);
5137 for (q = fname; *q; p++, q++)
5139 for (q = tname; *q; p++, q++)
5144 tab->handlers[tmode][fmode].libfunc
5145 = init_one_libfunc (ggc_alloc_string (libfunc_name,
5150 /* Initialize the libfunc fields of an entire group of entries of an
5151 intra-mode-class conversion optab. The string formation rules are
5152 similar to the ones for init_libfunc, above. WIDENING says whether
5153 the optab goes from narrow to wide modes or vice versa. These functions
5154 have two mode names _and_ an operand count. */
5156 init_intraclass_conv_libfuncs (convert_optab tab, const char *opname,
5157 enum mode_class class, bool widening)
5159 enum machine_mode first_mode = GET_CLASS_NARROWEST_MODE (class);
5160 size_t opname_len = strlen (opname);
5161 size_t max_mname_len = 0;
5163 enum machine_mode nmode, wmode;
5164 const char *nname, *wname;
5166 char *libfunc_name, *suffix;
5169 for (nmode = first_mode; nmode != VOIDmode;
5170 nmode = GET_MODE_WIDER_MODE (nmode))
5171 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (nmode)));
5173 libfunc_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
5174 libfunc_name[0] = '_';
5175 libfunc_name[1] = '_';
5176 memcpy (&libfunc_name[2], opname, opname_len);
5177 suffix = libfunc_name + opname_len + 2;
5179 for (nmode = first_mode; nmode != VOIDmode;
5180 nmode = GET_MODE_WIDER_MODE (nmode))
5181 for (wmode = GET_MODE_WIDER_MODE (nmode); wmode != VOIDmode;
5182 wmode = GET_MODE_WIDER_MODE (wmode))
5184 nname = GET_MODE_NAME (nmode);
5185 wname = GET_MODE_NAME (wmode);
5188 for (q = widening ? nname : wname; *q; p++, q++)
5190 for (q = widening ? wname : nname; *q; p++, q++)
5196 tab->handlers[widening ? wmode : nmode]
5197 [widening ? nmode : wmode].libfunc
5198 = init_one_libfunc (ggc_alloc_string (libfunc_name,
5205 init_one_libfunc (const char *name)
5209 /* Create a FUNCTION_DECL that can be passed to
5210 targetm.encode_section_info. */
5211 /* ??? We don't have any type information except for this is
5212 a function. Pretend this is "int foo()". */
5213 tree decl = build_decl (FUNCTION_DECL, get_identifier (name),
5214 build_function_type (integer_type_node, NULL_TREE));
5215 DECL_ARTIFICIAL (decl) = 1;
5216 DECL_EXTERNAL (decl) = 1;
5217 TREE_PUBLIC (decl) = 1;
5219 symbol = XEXP (DECL_RTL (decl), 0);
5221 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
5222 are the flags assigned by targetm.encode_section_info. */
5223 SET_SYMBOL_REF_DECL (symbol, 0);
5228 /* Call this to reset the function entry for one optab (OPTABLE) in mode
5229 MODE to NAME, which should be either 0 or a string constant. */
5231 set_optab_libfunc (optab optable, enum machine_mode mode, const char *name)
5234 optable->handlers[mode].libfunc = init_one_libfunc (name);
5236 optable->handlers[mode].libfunc = 0;
5239 /* Call this to reset the function entry for one conversion optab
5240 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
5241 either 0 or a string constant. */
5243 set_conv_libfunc (convert_optab optable, enum machine_mode tmode,
5244 enum machine_mode fmode, const char *name)
5247 optable->handlers[tmode][fmode].libfunc = init_one_libfunc (name);
5249 optable->handlers[tmode][fmode].libfunc = 0;
5252 /* Call this once to initialize the contents of the optabs
5253 appropriately for the current target machine. */
5260 /* Start by initializing all tables to contain CODE_FOR_nothing. */
5262 for (i = 0; i < NUM_RTX_CODE; i++)
5263 setcc_gen_code[i] = CODE_FOR_nothing;
5265 #ifdef HAVE_conditional_move
5266 for (i = 0; i < NUM_MACHINE_MODES; i++)
5267 movcc_gen_code[i] = CODE_FOR_nothing;
5270 for (i = 0; i < NUM_MACHINE_MODES; i++)
5272 vcond_gen_code[i] = CODE_FOR_nothing;
5273 vcondu_gen_code[i] = CODE_FOR_nothing;
5276 add_optab = init_optab (PLUS);
5277 addv_optab = init_optabv (PLUS);
5278 sub_optab = init_optab (MINUS);
5279 subv_optab = init_optabv (MINUS);
5280 smul_optab = init_optab (MULT);
5281 smulv_optab = init_optabv (MULT);
5282 smul_highpart_optab = init_optab (UNKNOWN);
5283 umul_highpart_optab = init_optab (UNKNOWN);
5284 smul_widen_optab = init_optab (UNKNOWN);
5285 umul_widen_optab = init_optab (UNKNOWN);
5286 usmul_widen_optab = init_optab (UNKNOWN);
5287 sdiv_optab = init_optab (DIV);
5288 sdivv_optab = init_optabv (DIV);
5289 sdivmod_optab = init_optab (UNKNOWN);
5290 udiv_optab = init_optab (UDIV);
5291 udivmod_optab = init_optab (UNKNOWN);
5292 smod_optab = init_optab (MOD);
5293 umod_optab = init_optab (UMOD);
5294 fmod_optab = init_optab (UNKNOWN);
5295 remainder_optab = init_optab (UNKNOWN);
5296 ftrunc_optab = init_optab (UNKNOWN);
5297 and_optab = init_optab (AND);
5298 ior_optab = init_optab (IOR);
5299 xor_optab = init_optab (XOR);
5300 ashl_optab = init_optab (ASHIFT);
5301 ashr_optab = init_optab (ASHIFTRT);
5302 lshr_optab = init_optab (LSHIFTRT);
5303 rotl_optab = init_optab (ROTATE);
5304 rotr_optab = init_optab (ROTATERT);
5305 smin_optab = init_optab (SMIN);
5306 smax_optab = init_optab (SMAX);
5307 umin_optab = init_optab (UMIN);
5308 umax_optab = init_optab (UMAX);
5309 pow_optab = init_optab (UNKNOWN);
5310 atan2_optab = init_optab (UNKNOWN);
5312 /* These three have codes assigned exclusively for the sake of
5314 mov_optab = init_optab (SET);
5315 movstrict_optab = init_optab (STRICT_LOW_PART);
5316 cmp_optab = init_optab (COMPARE);
5318 ucmp_optab = init_optab (UNKNOWN);
5319 tst_optab = init_optab (UNKNOWN);
5321 eq_optab = init_optab (EQ);
5322 ne_optab = init_optab (NE);
5323 gt_optab = init_optab (GT);
5324 ge_optab = init_optab (GE);
5325 lt_optab = init_optab (LT);
5326 le_optab = init_optab (LE);
5327 unord_optab = init_optab (UNORDERED);
5329 neg_optab = init_optab (NEG);
5330 negv_optab = init_optabv (NEG);
5331 abs_optab = init_optab (ABS);
5332 absv_optab = init_optabv (ABS);
5333 addcc_optab = init_optab (UNKNOWN);
5334 one_cmpl_optab = init_optab (NOT);
5335 bswap_optab = init_optab (BSWAP);
5336 ffs_optab = init_optab (FFS);
5337 clz_optab = init_optab (CLZ);
5338 ctz_optab = init_optab (CTZ);
5339 popcount_optab = init_optab (POPCOUNT);
5340 parity_optab = init_optab (PARITY);
5341 sqrt_optab = init_optab (SQRT);
5342 floor_optab = init_optab (UNKNOWN);
5343 ceil_optab = init_optab (UNKNOWN);
5344 round_optab = init_optab (UNKNOWN);
5345 btrunc_optab = init_optab (UNKNOWN);
5346 nearbyint_optab = init_optab (UNKNOWN);
5347 rint_optab = init_optab (UNKNOWN);
5348 sincos_optab = init_optab (UNKNOWN);
5349 sin_optab = init_optab (UNKNOWN);
5350 asin_optab = init_optab (UNKNOWN);
5351 cos_optab = init_optab (UNKNOWN);
5352 acos_optab = init_optab (UNKNOWN);
5353 exp_optab = init_optab (UNKNOWN);
5354 exp10_optab = init_optab (UNKNOWN);
5355 exp2_optab = init_optab (UNKNOWN);
5356 expm1_optab = init_optab (UNKNOWN);
5357 ldexp_optab = init_optab (UNKNOWN);
5358 logb_optab = init_optab (UNKNOWN);
5359 ilogb_optab = init_optab (UNKNOWN);
5360 log_optab = init_optab (UNKNOWN);
5361 log10_optab = init_optab (UNKNOWN);
5362 log2_optab = init_optab (UNKNOWN);
5363 log1p_optab = init_optab (UNKNOWN);
5364 tan_optab = init_optab (UNKNOWN);
5365 atan_optab = init_optab (UNKNOWN);
5366 copysign_optab = init_optab (UNKNOWN);
5368 strlen_optab = init_optab (UNKNOWN);
5369 cbranch_optab = init_optab (UNKNOWN);
5370 cmov_optab = init_optab (UNKNOWN);
5371 cstore_optab = init_optab (UNKNOWN);
5372 push_optab = init_optab (UNKNOWN);
5374 reduc_smax_optab = init_optab (UNKNOWN);
5375 reduc_umax_optab = init_optab (UNKNOWN);
5376 reduc_smin_optab = init_optab (UNKNOWN);
5377 reduc_umin_optab = init_optab (UNKNOWN);
5378 reduc_splus_optab = init_optab (UNKNOWN);
5379 reduc_uplus_optab = init_optab (UNKNOWN);
5381 ssum_widen_optab = init_optab (UNKNOWN);
5382 usum_widen_optab = init_optab (UNKNOWN);
5383 sdot_prod_optab = init_optab (UNKNOWN);
5384 udot_prod_optab = init_optab (UNKNOWN);
5386 vec_extract_optab = init_optab (UNKNOWN);
5387 vec_set_optab = init_optab (UNKNOWN);
5388 vec_init_optab = init_optab (UNKNOWN);
5389 vec_shl_optab = init_optab (UNKNOWN);
5390 vec_shr_optab = init_optab (UNKNOWN);
5391 vec_realign_load_optab = init_optab (UNKNOWN);
5392 movmisalign_optab = init_optab (UNKNOWN);
5393 vec_widen_umult_hi_optab = init_optab (UNKNOWN);
5394 vec_widen_umult_lo_optab = init_optab (UNKNOWN);
5395 vec_widen_smult_hi_optab = init_optab (UNKNOWN);
5396 vec_widen_smult_lo_optab = init_optab (UNKNOWN);
5397 vec_unpacks_hi_optab = init_optab (UNKNOWN);
5398 vec_unpacks_lo_optab = init_optab (UNKNOWN);
5399 vec_unpacku_hi_optab = init_optab (UNKNOWN);
5400 vec_unpacku_lo_optab = init_optab (UNKNOWN);
5401 vec_pack_mod_optab = init_optab (UNKNOWN);
5402 vec_pack_usat_optab = init_optab (UNKNOWN);
5403 vec_pack_ssat_optab = init_optab (UNKNOWN);
5405 powi_optab = init_optab (UNKNOWN);
5408 sext_optab = init_convert_optab (SIGN_EXTEND);
5409 zext_optab = init_convert_optab (ZERO_EXTEND);
5410 trunc_optab = init_convert_optab (TRUNCATE);
5411 sfix_optab = init_convert_optab (FIX);
5412 ufix_optab = init_convert_optab (UNSIGNED_FIX);
5413 sfixtrunc_optab = init_convert_optab (UNKNOWN);
5414 ufixtrunc_optab = init_convert_optab (UNKNOWN);
5415 sfloat_optab = init_convert_optab (FLOAT);
5416 ufloat_optab = init_convert_optab (UNSIGNED_FLOAT);
5417 lrint_optab = init_convert_optab (UNKNOWN);
5418 lround_optab = init_convert_optab (UNKNOWN);
5419 lfloor_optab = init_convert_optab (UNKNOWN);
5420 lceil_optab = init_convert_optab (UNKNOWN);
5422 for (i = 0; i < NUM_MACHINE_MODES; i++)
5424 movmem_optab[i] = CODE_FOR_nothing;
5425 cmpstr_optab[i] = CODE_FOR_nothing;
5426 cmpstrn_optab[i] = CODE_FOR_nothing;
5427 cmpmem_optab[i] = CODE_FOR_nothing;
5428 setmem_optab[i] = CODE_FOR_nothing;
5430 sync_add_optab[i] = CODE_FOR_nothing;
5431 sync_sub_optab[i] = CODE_FOR_nothing;
5432 sync_ior_optab[i] = CODE_FOR_nothing;
5433 sync_and_optab[i] = CODE_FOR_nothing;
5434 sync_xor_optab[i] = CODE_FOR_nothing;
5435 sync_nand_optab[i] = CODE_FOR_nothing;
5436 sync_old_add_optab[i] = CODE_FOR_nothing;
5437 sync_old_sub_optab[i] = CODE_FOR_nothing;
5438 sync_old_ior_optab[i] = CODE_FOR_nothing;
5439 sync_old_and_optab[i] = CODE_FOR_nothing;
5440 sync_old_xor_optab[i] = CODE_FOR_nothing;
5441 sync_old_nand_optab[i] = CODE_FOR_nothing;
5442 sync_new_add_optab[i] = CODE_FOR_nothing;
5443 sync_new_sub_optab[i] = CODE_FOR_nothing;
5444 sync_new_ior_optab[i] = CODE_FOR_nothing;
5445 sync_new_and_optab[i] = CODE_FOR_nothing;
5446 sync_new_xor_optab[i] = CODE_FOR_nothing;
5447 sync_new_nand_optab[i] = CODE_FOR_nothing;
5448 sync_compare_and_swap[i] = CODE_FOR_nothing;
5449 sync_compare_and_swap_cc[i] = CODE_FOR_nothing;
5450 sync_lock_test_and_set[i] = CODE_FOR_nothing;
5451 sync_lock_release[i] = CODE_FOR_nothing;
5453 reload_in_optab[i] = reload_out_optab[i] = CODE_FOR_nothing;
5456 /* Fill in the optabs with the insns we support. */
5459 /* Initialize the optabs with the names of the library functions. */
5460 init_integral_libfuncs (add_optab, "add", '3');
5461 init_floating_libfuncs (add_optab, "add", '3');
5462 init_integral_libfuncs (addv_optab, "addv", '3');
5463 init_floating_libfuncs (addv_optab, "add", '3');
5464 init_integral_libfuncs (sub_optab, "sub", '3');
5465 init_floating_libfuncs (sub_optab, "sub", '3');
5466 init_integral_libfuncs (subv_optab, "subv", '3');
5467 init_floating_libfuncs (subv_optab, "sub", '3');
5468 init_integral_libfuncs (smul_optab, "mul", '3');
5469 init_floating_libfuncs (smul_optab, "mul", '3');
5470 init_integral_libfuncs (smulv_optab, "mulv", '3');
5471 init_floating_libfuncs (smulv_optab, "mul", '3');
5472 init_integral_libfuncs (sdiv_optab, "div", '3');
5473 init_floating_libfuncs (sdiv_optab, "div", '3');
5474 init_integral_libfuncs (sdivv_optab, "divv", '3');
5475 init_integral_libfuncs (udiv_optab, "udiv", '3');
5476 init_integral_libfuncs (sdivmod_optab, "divmod", '4');
5477 init_integral_libfuncs (udivmod_optab, "udivmod", '4');
5478 init_integral_libfuncs (smod_optab, "mod", '3');
5479 init_integral_libfuncs (umod_optab, "umod", '3');
5480 init_floating_libfuncs (ftrunc_optab, "ftrunc", '2');
5481 init_integral_libfuncs (and_optab, "and", '3');
5482 init_integral_libfuncs (ior_optab, "ior", '3');
5483 init_integral_libfuncs (xor_optab, "xor", '3');
5484 init_integral_libfuncs (ashl_optab, "ashl", '3');
5485 init_integral_libfuncs (ashr_optab, "ashr", '3');
5486 init_integral_libfuncs (lshr_optab, "lshr", '3');
5487 init_integral_libfuncs (smin_optab, "min", '3');
5488 init_floating_libfuncs (smin_optab, "min", '3');
5489 init_integral_libfuncs (smax_optab, "max", '3');
5490 init_floating_libfuncs (smax_optab, "max", '3');
5491 init_integral_libfuncs (umin_optab, "umin", '3');
5492 init_integral_libfuncs (umax_optab, "umax", '3');
5493 init_integral_libfuncs (neg_optab, "neg", '2');
5494 init_floating_libfuncs (neg_optab, "neg", '2');
5495 init_integral_libfuncs (negv_optab, "negv", '2');
5496 init_floating_libfuncs (negv_optab, "neg", '2');
5497 init_integral_libfuncs (one_cmpl_optab, "one_cmpl", '2');
5498 init_integral_libfuncs (ffs_optab, "ffs", '2');
5499 init_integral_libfuncs (clz_optab, "clz", '2');
5500 init_integral_libfuncs (ctz_optab, "ctz", '2');
5501 init_integral_libfuncs (popcount_optab, "popcount", '2');
5502 init_integral_libfuncs (parity_optab, "parity", '2');
5504 /* Comparison libcalls for integers MUST come in pairs,
5506 init_integral_libfuncs (cmp_optab, "cmp", '2');
5507 init_integral_libfuncs (ucmp_optab, "ucmp", '2');
5508 init_floating_libfuncs (cmp_optab, "cmp", '2');
5510 /* EQ etc are floating point only. */
5511 init_floating_libfuncs (eq_optab, "eq", '2');
5512 init_floating_libfuncs (ne_optab, "ne", '2');
5513 init_floating_libfuncs (gt_optab, "gt", '2');
5514 init_floating_libfuncs (ge_optab, "ge", '2');
5515 init_floating_libfuncs (lt_optab, "lt", '2');
5516 init_floating_libfuncs (le_optab, "le", '2');
5517 init_floating_libfuncs (unord_optab, "unord", '2');
5519 init_floating_libfuncs (powi_optab, "powi", '2');
5522 init_interclass_conv_libfuncs (sfloat_optab, "float",
5523 MODE_INT, MODE_FLOAT);
5524 init_interclass_conv_libfuncs (sfloat_optab, "float",
5525 MODE_INT, MODE_DECIMAL_FLOAT);
5526 init_interclass_conv_libfuncs (ufloat_optab, "floatun",
5527 MODE_INT, MODE_FLOAT);
5528 init_interclass_conv_libfuncs (ufloat_optab, "floatun",
5529 MODE_INT, MODE_DECIMAL_FLOAT);
5530 init_interclass_conv_libfuncs (sfix_optab, "fix",
5531 MODE_FLOAT, MODE_INT);
5532 init_interclass_conv_libfuncs (sfix_optab, "fix",
5533 MODE_DECIMAL_FLOAT, MODE_INT);
5534 init_interclass_conv_libfuncs (ufix_optab, "fixuns",
5535 MODE_FLOAT, MODE_INT);
5536 init_interclass_conv_libfuncs (ufix_optab, "fixuns",
5537 MODE_DECIMAL_FLOAT, MODE_INT);
5538 init_interclass_conv_libfuncs (ufloat_optab, "floatuns",
5539 MODE_INT, MODE_DECIMAL_FLOAT);
5540 init_interclass_conv_libfuncs (lrint_optab, "lrint",
5541 MODE_INT, MODE_FLOAT);
5542 init_interclass_conv_libfuncs (lround_optab, "lround",
5543 MODE_INT, MODE_FLOAT);
5544 init_interclass_conv_libfuncs (lfloor_optab, "lfloor",
5545 MODE_INT, MODE_FLOAT);
5546 init_interclass_conv_libfuncs (lceil_optab, "lceil",
5547 MODE_INT, MODE_FLOAT);
5549 /* sext_optab is also used for FLOAT_EXTEND. */
5550 init_intraclass_conv_libfuncs (sext_optab, "extend", MODE_FLOAT, true);
5551 init_intraclass_conv_libfuncs (sext_optab, "extend", MODE_DECIMAL_FLOAT, true);
5552 init_interclass_conv_libfuncs (sext_optab, "extend", MODE_FLOAT, MODE_DECIMAL_FLOAT);
5553 init_interclass_conv_libfuncs (sext_optab, "extend", MODE_DECIMAL_FLOAT, MODE_FLOAT);
5554 init_intraclass_conv_libfuncs (trunc_optab, "trunc", MODE_FLOAT, false);
5555 init_intraclass_conv_libfuncs (trunc_optab, "trunc", MODE_DECIMAL_FLOAT, false);
5556 init_interclass_conv_libfuncs (trunc_optab, "trunc", MODE_FLOAT, MODE_DECIMAL_FLOAT);
5557 init_interclass_conv_libfuncs (trunc_optab, "trunc", MODE_DECIMAL_FLOAT, MODE_FLOAT);
5559 /* Explicitly initialize the bswap libfuncs since we need them to be
5560 valid for things other than word_mode. */
5561 set_optab_libfunc (bswap_optab, SImode, "__bswapsi2");
5562 set_optab_libfunc (bswap_optab, DImode, "__bswapdi2");
5564 /* Use cabs for double complex abs, since systems generally have cabs.
5565 Don't define any libcall for float complex, so that cabs will be used. */
5566 if (complex_double_type_node)
5567 abs_optab->handlers[TYPE_MODE (complex_double_type_node)].libfunc
5568 = init_one_libfunc ("cabs");
5570 /* The ffs function operates on `int'. */
5571 ffs_optab->handlers[(int) mode_for_size (INT_TYPE_SIZE, MODE_INT, 0)].libfunc
5572 = init_one_libfunc ("ffs");
5574 abort_libfunc = init_one_libfunc ("abort");
5575 memcpy_libfunc = init_one_libfunc ("memcpy");
5576 memmove_libfunc = init_one_libfunc ("memmove");
5577 memcmp_libfunc = init_one_libfunc ("memcmp");
5578 memset_libfunc = init_one_libfunc ("memset");
5579 setbits_libfunc = init_one_libfunc ("__setbits");
5581 #ifndef DONT_USE_BUILTIN_SETJMP
5582 setjmp_libfunc = init_one_libfunc ("__builtin_setjmp");
5583 longjmp_libfunc = init_one_libfunc ("__builtin_longjmp");
5585 setjmp_libfunc = init_one_libfunc ("setjmp");
5586 longjmp_libfunc = init_one_libfunc ("longjmp");
5588 unwind_sjlj_register_libfunc = init_one_libfunc ("_Unwind_SjLj_Register");
5589 unwind_sjlj_unregister_libfunc
5590 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5592 /* For function entry/exit instrumentation. */
5593 profile_function_entry_libfunc
5594 = init_one_libfunc ("__cyg_profile_func_enter");
5595 profile_function_exit_libfunc
5596 = init_one_libfunc ("__cyg_profile_func_exit");
5598 gcov_flush_libfunc = init_one_libfunc ("__gcov_flush");
5600 if (HAVE_conditional_trap)
5601 trap_rtx = gen_rtx_fmt_ee (EQ, VOIDmode, NULL_RTX, NULL_RTX);
5603 /* Allow the target to add more libcalls or rename some, etc. */
5604 targetm.init_libfuncs ();
5609 /* Print information about the current contents of the optabs on
5613 debug_optab_libfuncs (void)
5619 /* Dump the arithmetic optabs. */
5620 for (i = 0; i != (int) OTI_MAX; i++)
5621 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5624 struct optab_handlers *h;
5627 h = &o->handlers[j];
5630 gcc_assert (GET_CODE (h->libfunc) = SYMBOL_REF);
5631 fprintf (stderr, "%s\t%s:\t%s\n",
5632 GET_RTX_NAME (o->code),
5634 XSTR (h->libfunc, 0));
5638 /* Dump the conversion optabs. */
5639 for (i = 0; i < (int) COI_MAX; ++i)
5640 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5641 for (k = 0; k < NUM_MACHINE_MODES; ++k)
5644 struct optab_handlers *h;
5646 o = &convert_optab_table[i];
5647 h = &o->handlers[j][k];
5650 gcc_assert (GET_CODE (h->libfunc) = SYMBOL_REF);
5651 fprintf (stderr, "%s\t%s\t%s:\t%s\n",
5652 GET_RTX_NAME (o->code),
5655 XSTR (h->libfunc, 0));
5663 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5664 CODE. Return 0 on failure. */
5667 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED, rtx op1,
5668 rtx op2 ATTRIBUTE_UNUSED, rtx tcode ATTRIBUTE_UNUSED)
5670 enum machine_mode mode = GET_MODE (op1);
5671 enum insn_code icode;
5674 if (!HAVE_conditional_trap)
5677 if (mode == VOIDmode)
5680 icode = cmp_optab->handlers[(int) mode].insn_code;
5681 if (icode == CODE_FOR_nothing)
5685 op1 = prepare_operand (icode, op1, 0, mode, mode, 0);
5686 op2 = prepare_operand (icode, op2, 1, mode, mode, 0);
5692 emit_insn (GEN_FCN (icode) (op1, op2));
5694 PUT_CODE (trap_rtx, code);
5695 gcc_assert (HAVE_conditional_trap);
5696 insn = gen_conditional_trap (trap_rtx, tcode);
5700 insn = get_insns ();
5707 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5708 or unsigned operation code. */
5710 static enum rtx_code
5711 get_rtx_code (enum tree_code tcode, bool unsignedp)
5723 code = unsignedp ? LTU : LT;
5726 code = unsignedp ? LEU : LE;
5729 code = unsignedp ? GTU : GT;
5732 code = unsignedp ? GEU : GE;
5735 case UNORDERED_EXPR:
5766 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
5767 unsigned operators. Do not generate compare instruction. */
5770 vector_compare_rtx (tree cond, bool unsignedp, enum insn_code icode)
5772 enum rtx_code rcode;
5774 rtx rtx_op0, rtx_op1;
5776 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
5777 ensures that condition is a relational operation. */
5778 gcc_assert (COMPARISON_CLASS_P (cond));
5780 rcode = get_rtx_code (TREE_CODE (cond), unsignedp);
5781 t_op0 = TREE_OPERAND (cond, 0);
5782 t_op1 = TREE_OPERAND (cond, 1);
5784 /* Expand operands. */
5785 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)), 1);
5786 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)), 1);
5788 if (!insn_data[icode].operand[4].predicate (rtx_op0, GET_MODE (rtx_op0))
5789 && GET_MODE (rtx_op0) != VOIDmode)
5790 rtx_op0 = force_reg (GET_MODE (rtx_op0), rtx_op0);
5792 if (!insn_data[icode].operand[5].predicate (rtx_op1, GET_MODE (rtx_op1))
5793 && GET_MODE (rtx_op1) != VOIDmode)
5794 rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1);
5796 return gen_rtx_fmt_ee (rcode, VOIDmode, rtx_op0, rtx_op1);
5799 /* Return insn code for VEC_COND_EXPR EXPR. */
5801 static inline enum insn_code
5802 get_vcond_icode (tree expr, enum machine_mode mode)
5804 enum insn_code icode = CODE_FOR_nothing;
5806 if (TYPE_UNSIGNED (TREE_TYPE (expr)))
5807 icode = vcondu_gen_code[mode];
5809 icode = vcond_gen_code[mode];
5813 /* Return TRUE iff, appropriate vector insns are available
5814 for vector cond expr expr in VMODE mode. */
5817 expand_vec_cond_expr_p (tree expr, enum machine_mode vmode)
5819 if (get_vcond_icode (expr, vmode) == CODE_FOR_nothing)
5824 /* Generate insns for VEC_COND_EXPR. */
5827 expand_vec_cond_expr (tree vec_cond_expr, rtx target)
5829 enum insn_code icode;
5830 rtx comparison, rtx_op1, rtx_op2, cc_op0, cc_op1;
5831 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_cond_expr));
5832 bool unsignedp = TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr));
5834 icode = get_vcond_icode (vec_cond_expr, mode);
5835 if (icode == CODE_FOR_nothing)
5838 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
5839 target = gen_reg_rtx (mode);
5841 /* Get comparison rtx. First expand both cond expr operands. */
5842 comparison = vector_compare_rtx (TREE_OPERAND (vec_cond_expr, 0),
5844 cc_op0 = XEXP (comparison, 0);
5845 cc_op1 = XEXP (comparison, 1);
5846 /* Expand both operands and force them in reg, if required. */
5847 rtx_op1 = expand_expr (TREE_OPERAND (vec_cond_expr, 1),
5848 NULL_RTX, VOIDmode, EXPAND_NORMAL);
5849 if (!insn_data[icode].operand[1].predicate (rtx_op1, mode)
5850 && mode != VOIDmode)
5851 rtx_op1 = force_reg (mode, rtx_op1);
5853 rtx_op2 = expand_expr (TREE_OPERAND (vec_cond_expr, 2),
5854 NULL_RTX, VOIDmode, EXPAND_NORMAL);
5855 if (!insn_data[icode].operand[2].predicate (rtx_op2, mode)
5856 && mode != VOIDmode)
5857 rtx_op2 = force_reg (mode, rtx_op2);
5859 /* Emit instruction! */
5860 emit_insn (GEN_FCN (icode) (target, rtx_op1, rtx_op2,
5861 comparison, cc_op0, cc_op1));
5867 /* This is an internal subroutine of the other compare_and_swap expanders.
5868 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
5869 operation. TARGET is an optional place to store the value result of
5870 the operation. ICODE is the particular instruction to expand. Return
5871 the result of the operation. */
5874 expand_val_compare_and_swap_1 (rtx mem, rtx old_val, rtx new_val,
5875 rtx target, enum insn_code icode)
5877 enum machine_mode mode = GET_MODE (mem);
5880 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
5881 target = gen_reg_rtx (mode);
5883 if (GET_MODE (old_val) != VOIDmode && GET_MODE (old_val) != mode)
5884 old_val = convert_modes (mode, GET_MODE (old_val), old_val, 1);
5885 if (!insn_data[icode].operand[2].predicate (old_val, mode))
5886 old_val = force_reg (mode, old_val);
5888 if (GET_MODE (new_val) != VOIDmode && GET_MODE (new_val) != mode)
5889 new_val = convert_modes (mode, GET_MODE (new_val), new_val, 1);
5890 if (!insn_data[icode].operand[3].predicate (new_val, mode))
5891 new_val = force_reg (mode, new_val);
5893 insn = GEN_FCN (icode) (target, mem, old_val, new_val);
5894 if (insn == NULL_RTX)
5901 /* Expand a compare-and-swap operation and return its value. */
5904 expand_val_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
5906 enum machine_mode mode = GET_MODE (mem);
5907 enum insn_code icode = sync_compare_and_swap[mode];
5909 if (icode == CODE_FOR_nothing)
5912 return expand_val_compare_and_swap_1 (mem, old_val, new_val, target, icode);
5915 /* Expand a compare-and-swap operation and store true into the result if
5916 the operation was successful and false otherwise. Return the result.
5917 Unlike other routines, TARGET is not optional. */
5920 expand_bool_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
5922 enum machine_mode mode = GET_MODE (mem);
5923 enum insn_code icode;
5924 rtx subtarget, label0, label1;
5926 /* If the target supports a compare-and-swap pattern that simultaneously
5927 sets some flag for success, then use it. Otherwise use the regular
5928 compare-and-swap and follow that immediately with a compare insn. */
5929 icode = sync_compare_and_swap_cc[mode];
5933 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
5935 if (subtarget != NULL_RTX)
5939 case CODE_FOR_nothing:
5940 icode = sync_compare_and_swap[mode];
5941 if (icode == CODE_FOR_nothing)
5944 /* Ensure that if old_val == mem, that we're not comparing
5945 against an old value. */
5946 if (MEM_P (old_val))
5947 old_val = force_reg (mode, old_val);
5949 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
5951 if (subtarget == NULL_RTX)
5954 emit_cmp_insn (subtarget, old_val, EQ, const0_rtx, mode, true);
5957 /* If the target has a sane STORE_FLAG_VALUE, then go ahead and use a
5958 setcc instruction from the beginning. We don't work too hard here,
5959 but it's nice to not be stupid about initial code gen either. */
5960 if (STORE_FLAG_VALUE == 1)
5962 icode = setcc_gen_code[EQ];
5963 if (icode != CODE_FOR_nothing)
5965 enum machine_mode cmode = insn_data[icode].operand[0].mode;
5969 if (!insn_data[icode].operand[0].predicate (target, cmode))
5970 subtarget = gen_reg_rtx (cmode);
5972 insn = GEN_FCN (icode) (subtarget);
5976 if (GET_MODE (target) != GET_MODE (subtarget))
5978 convert_move (target, subtarget, 1);
5986 /* Without an appropriate setcc instruction, use a set of branches to
5987 get 1 and 0 stored into target. Presumably if the target has a
5988 STORE_FLAG_VALUE that isn't 1, then this will get cleaned up by ifcvt. */
5990 label0 = gen_label_rtx ();
5991 label1 = gen_label_rtx ();
5993 emit_jump_insn (bcc_gen_fctn[EQ] (label0));
5994 emit_move_insn (target, const0_rtx);
5995 emit_jump_insn (gen_jump (label1));
5997 emit_label (label0);
5998 emit_move_insn (target, const1_rtx);
5999 emit_label (label1);
6004 /* This is a helper function for the other atomic operations. This function
6005 emits a loop that contains SEQ that iterates until a compare-and-swap
6006 operation at the end succeeds. MEM is the memory to be modified. SEQ is
6007 a set of instructions that takes a value from OLD_REG as an input and
6008 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
6009 set to the current contents of MEM. After SEQ, a compare-and-swap will
6010 attempt to update MEM with NEW_REG. The function returns true when the
6011 loop was generated successfully. */
6014 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
6016 enum machine_mode mode = GET_MODE (mem);
6017 enum insn_code icode;
6018 rtx label, cmp_reg, subtarget;
6020 /* The loop we want to generate looks like
6026 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
6027 if (cmp_reg != old_reg)
6030 Note that we only do the plain load from memory once. Subsequent
6031 iterations use the value loaded by the compare-and-swap pattern. */
6033 label = gen_label_rtx ();
6034 cmp_reg = gen_reg_rtx (mode);
6036 emit_move_insn (cmp_reg, mem);
6038 emit_move_insn (old_reg, cmp_reg);
6042 /* If the target supports a compare-and-swap pattern that simultaneously
6043 sets some flag for success, then use it. Otherwise use the regular
6044 compare-and-swap and follow that immediately with a compare insn. */
6045 icode = sync_compare_and_swap_cc[mode];
6049 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
6051 if (subtarget != NULL_RTX)
6053 gcc_assert (subtarget == cmp_reg);
6058 case CODE_FOR_nothing:
6059 icode = sync_compare_and_swap[mode];
6060 if (icode == CODE_FOR_nothing)
6063 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
6065 if (subtarget == NULL_RTX)
6067 if (subtarget != cmp_reg)
6068 emit_move_insn (cmp_reg, subtarget);
6070 emit_cmp_insn (cmp_reg, old_reg, EQ, const0_rtx, mode, true);
6073 /* ??? Mark this jump predicted not taken? */
6074 emit_jump_insn (bcc_gen_fctn[NE] (label));
6079 /* This function generates the atomic operation MEM CODE= VAL. In this
6080 case, we do not care about any resulting value. Returns NULL if we
6081 cannot generate the operation. */
6084 expand_sync_operation (rtx mem, rtx val, enum rtx_code code)
6086 enum machine_mode mode = GET_MODE (mem);
6087 enum insn_code icode;
6090 /* Look to see if the target supports the operation directly. */
6094 icode = sync_add_optab[mode];
6097 icode = sync_ior_optab[mode];
6100 icode = sync_xor_optab[mode];
6103 icode = sync_and_optab[mode];
6106 icode = sync_nand_optab[mode];
6110 icode = sync_sub_optab[mode];
6111 if (icode == CODE_FOR_nothing)
6113 icode = sync_add_optab[mode];
6114 if (icode != CODE_FOR_nothing)
6116 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
6126 /* Generate the direct operation, if present. */
6127 if (icode != CODE_FOR_nothing)
6129 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6130 val = convert_modes (mode, GET_MODE (val), val, 1);
6131 if (!insn_data[icode].operand[1].predicate (val, mode))
6132 val = force_reg (mode, val);
6134 insn = GEN_FCN (icode) (mem, val);
6142 /* Failing that, generate a compare-and-swap loop in which we perform the
6143 operation with normal arithmetic instructions. */
6144 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6146 rtx t0 = gen_reg_rtx (mode), t1;
6153 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
6156 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
6157 true, OPTAB_LIB_WIDEN);
6159 insn = get_insns ();
6162 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
6169 /* This function generates the atomic operation MEM CODE= VAL. In this
6170 case, we do care about the resulting value: if AFTER is true then
6171 return the value MEM holds after the operation, if AFTER is false
6172 then return the value MEM holds before the operation. TARGET is an
6173 optional place for the result value to be stored. */
6176 expand_sync_fetch_operation (rtx mem, rtx val, enum rtx_code code,
6177 bool after, rtx target)
6179 enum machine_mode mode = GET_MODE (mem);
6180 enum insn_code old_code, new_code, icode;
6184 /* Look to see if the target supports the operation directly. */
6188 old_code = sync_old_add_optab[mode];
6189 new_code = sync_new_add_optab[mode];
6192 old_code = sync_old_ior_optab[mode];
6193 new_code = sync_new_ior_optab[mode];
6196 old_code = sync_old_xor_optab[mode];
6197 new_code = sync_new_xor_optab[mode];
6200 old_code = sync_old_and_optab[mode];
6201 new_code = sync_new_and_optab[mode];
6204 old_code = sync_old_nand_optab[mode];
6205 new_code = sync_new_nand_optab[mode];
6209 old_code = sync_old_sub_optab[mode];
6210 new_code = sync_new_sub_optab[mode];
6211 if (old_code == CODE_FOR_nothing && new_code == CODE_FOR_nothing)
6213 old_code = sync_old_add_optab[mode];
6214 new_code = sync_new_add_optab[mode];
6215 if (old_code != CODE_FOR_nothing || new_code != CODE_FOR_nothing)
6217 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
6227 /* If the target does supports the proper new/old operation, great. But
6228 if we only support the opposite old/new operation, check to see if we
6229 can compensate. In the case in which the old value is supported, then
6230 we can always perform the operation again with normal arithmetic. In
6231 the case in which the new value is supported, then we can only handle
6232 this in the case the operation is reversible. */
6237 if (icode == CODE_FOR_nothing)
6240 if (icode != CODE_FOR_nothing)
6247 if (icode == CODE_FOR_nothing
6248 && (code == PLUS || code == MINUS || code == XOR))
6251 if (icode != CODE_FOR_nothing)
6256 /* If we found something supported, great. */
6257 if (icode != CODE_FOR_nothing)
6259 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6260 target = gen_reg_rtx (mode);
6262 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6263 val = convert_modes (mode, GET_MODE (val), val, 1);
6264 if (!insn_data[icode].operand[2].predicate (val, mode))
6265 val = force_reg (mode, val);
6267 insn = GEN_FCN (icode) (target, mem, val);
6272 /* If we need to compensate for using an operation with the
6273 wrong return value, do so now. */
6280 else if (code == MINUS)
6285 target = expand_simple_unop (mode, NOT, target, NULL_RTX, true);
6286 target = expand_simple_binop (mode, code, target, val, NULL_RTX,
6287 true, OPTAB_LIB_WIDEN);
6294 /* Failing that, generate a compare-and-swap loop in which we perform the
6295 operation with normal arithmetic instructions. */
6296 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6298 rtx t0 = gen_reg_rtx (mode), t1;
6300 if (!target || !register_operand (target, mode))
6301 target = gen_reg_rtx (mode);
6306 emit_move_insn (target, t0);
6310 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
6313 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
6314 true, OPTAB_LIB_WIDEN);
6316 emit_move_insn (target, t1);
6318 insn = get_insns ();
6321 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
6328 /* This function expands a test-and-set operation. Ideally we atomically
6329 store VAL in MEM and return the previous value in MEM. Some targets
6330 may not support this operation and only support VAL with the constant 1;
6331 in this case while the return value will be 0/1, but the exact value
6332 stored in MEM is target defined. TARGET is an option place to stick
6333 the return value. */
6336 expand_sync_lock_test_and_set (rtx mem, rtx val, rtx target)
6338 enum machine_mode mode = GET_MODE (mem);
6339 enum insn_code icode;
6342 /* If the target supports the test-and-set directly, great. */
6343 icode = sync_lock_test_and_set[mode];
6344 if (icode != CODE_FOR_nothing)
6346 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6347 target = gen_reg_rtx (mode);
6349 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6350 val = convert_modes (mode, GET_MODE (val), val, 1);
6351 if (!insn_data[icode].operand[2].predicate (val, mode))
6352 val = force_reg (mode, val);
6354 insn = GEN_FCN (icode) (target, mem, val);
6362 /* Otherwise, use a compare-and-swap loop for the exchange. */
6363 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6365 if (!target || !register_operand (target, mode))
6366 target = gen_reg_rtx (mode);
6367 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6368 val = convert_modes (mode, GET_MODE (val), val, 1);
6369 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
6376 #include "gt-optabs.h"