1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
26 #include "coretypes.h"
30 /* Include insn-config.h before expr.h so that HAVE_conditional_move
31 is properly defined. */
32 #include "insn-config.h"
46 #include "basic-block.h"
49 /* Each optab contains info on how this target machine
50 can perform a particular operation
51 for all sizes and kinds of operands.
53 The operation to be performed is often specified
54 by passing one of these optabs as an argument.
56 See expr.h for documentation of these optabs. */
58 optab optab_table[OTI_MAX];
60 rtx libfunc_table[LTI_MAX];
62 /* Tables of patterns for converting one mode to another. */
63 convert_optab convert_optab_table[COI_MAX];
65 /* Contains the optab used for each rtx code. */
66 optab code_to_optab[NUM_RTX_CODE + 1];
68 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
69 gives the gen_function to make a branch to test that condition. */
71 rtxfun bcc_gen_fctn[NUM_RTX_CODE];
73 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
74 gives the insn code to make a store-condition insn
75 to test that condition. */
77 enum insn_code setcc_gen_code[NUM_RTX_CODE];
79 #ifdef HAVE_conditional_move
80 /* Indexed by the machine mode, gives the insn code to make a conditional
81 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
82 setcc_gen_code to cut down on the number of named patterns. Consider a day
83 when a lot more rtx codes are conditional (eg: for the ARM). */
85 enum insn_code movcc_gen_code[NUM_MACHINE_MODES];
88 /* Indexed by the machine mode, gives the insn code for vector conditional
91 enum insn_code vcond_gen_code[NUM_MACHINE_MODES];
92 enum insn_code vcondu_gen_code[NUM_MACHINE_MODES];
94 /* The insn generating function can not take an rtx_code argument.
95 TRAP_RTX is used as an rtx argument. Its code is replaced with
96 the code to be used in the trap insn and all other fields are ignored. */
97 static GTY(()) rtx trap_rtx;
99 static int add_equal_note (rtx, rtx, enum rtx_code, rtx, rtx);
100 static rtx widen_operand (rtx, enum machine_mode, enum machine_mode, int,
102 static void prepare_cmp_insn (rtx *, rtx *, enum rtx_code *, rtx,
103 enum machine_mode *, int *,
104 enum can_compare_purpose);
105 static enum insn_code can_fix_p (enum machine_mode, enum machine_mode, int,
107 static enum insn_code can_float_p (enum machine_mode, enum machine_mode, int);
108 static optab new_optab (void);
109 static convert_optab new_convert_optab (void);
110 static inline optab init_optab (enum rtx_code);
111 static inline optab init_optabv (enum rtx_code);
112 static inline convert_optab init_convert_optab (enum rtx_code);
113 static void init_libfuncs (optab, int, int, const char *, int);
114 static void init_integral_libfuncs (optab, const char *, int);
115 static void init_floating_libfuncs (optab, const char *, int);
116 static void init_interclass_conv_libfuncs (convert_optab, const char *,
117 enum mode_class, enum mode_class);
118 static void init_intraclass_conv_libfuncs (convert_optab, const char *,
119 enum mode_class, bool);
120 static void emit_cmp_and_jump_insn_1 (rtx, rtx, enum machine_mode,
121 enum rtx_code, int, rtx);
122 static void prepare_float_lib_cmp (rtx *, rtx *, enum rtx_code *,
123 enum machine_mode *, int *);
124 static rtx widen_clz (enum machine_mode, rtx, rtx);
125 static rtx expand_parity (enum machine_mode, rtx, rtx);
126 static enum rtx_code get_rtx_code (enum tree_code, bool);
127 static rtx vector_compare_rtx (tree, bool, enum insn_code);
129 /* Current libcall id. It doesn't matter what these are, as long
130 as they are unique to each libcall that is emitted. */
131 static HOST_WIDE_INT libcall_id = 0;
133 #ifndef HAVE_conditional_trap
134 #define HAVE_conditional_trap 0
135 #define gen_conditional_trap(a,b) (gcc_unreachable (), NULL_RTX)
138 /* Prefixes for the current version of decimal floating point (BID vs. DPD) */
139 #if ENABLE_DECIMAL_BID_FORMAT
140 #define DECIMAL_PREFIX "bid_"
142 #define DECIMAL_PREFIX "dpd_"
146 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
147 the result of operation CODE applied to OP0 (and OP1 if it is a binary
150 If the last insn does not set TARGET, don't do anything, but return 1.
152 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
153 don't add the REG_EQUAL note but return 0. Our caller can then try
154 again, ensuring that TARGET is not one of the operands. */
157 add_equal_note (rtx insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
159 rtx last_insn, insn, set;
162 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
164 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
165 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
166 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
167 && GET_RTX_CLASS (code) != RTX_COMPARE
168 && GET_RTX_CLASS (code) != RTX_UNARY)
171 if (GET_CODE (target) == ZERO_EXTRACT)
174 for (last_insn = insns;
175 NEXT_INSN (last_insn) != NULL_RTX;
176 last_insn = NEXT_INSN (last_insn))
179 set = single_set (last_insn);
183 if (! rtx_equal_p (SET_DEST (set), target)
184 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
185 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
186 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
189 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
190 besides the last insn. */
191 if (reg_overlap_mentioned_p (target, op0)
192 || (op1 && reg_overlap_mentioned_p (target, op1)))
194 insn = PREV_INSN (last_insn);
195 while (insn != NULL_RTX)
197 if (reg_set_p (target, insn))
200 insn = PREV_INSN (insn);
204 if (GET_RTX_CLASS (code) == RTX_UNARY)
205 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
207 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
209 set_unique_reg_note (last_insn, REG_EQUAL, note);
214 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
215 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
216 not actually do a sign-extend or zero-extend, but can leave the
217 higher-order bits of the result rtx undefined, for example, in the case
218 of logical operations, but not right shifts. */
221 widen_operand (rtx op, enum machine_mode mode, enum machine_mode oldmode,
222 int unsignedp, int no_extend)
226 /* If we don't have to extend and this is a constant, return it. */
227 if (no_extend && GET_MODE (op) == VOIDmode)
230 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
231 extend since it will be more efficient to do so unless the signedness of
232 a promoted object differs from our extension. */
234 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
235 && SUBREG_PROMOTED_UNSIGNED_P (op) == unsignedp))
236 return convert_modes (mode, oldmode, op, unsignedp);
238 /* If MODE is no wider than a single word, we return a paradoxical
240 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
241 return gen_rtx_SUBREG (mode, force_reg (GET_MODE (op), op), 0);
243 /* Otherwise, get an object of MODE, clobber it, and set the low-order
246 result = gen_reg_rtx (mode);
247 emit_insn (gen_rtx_CLOBBER (VOIDmode, result));
248 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
252 /* Return the optab used for computing the operation given by
253 the tree code, CODE. This function is not always usable (for
254 example, it cannot give complete results for multiplication
255 or division) but probably ought to be relied on more widely
256 throughout the expander. */
258 optab_for_tree_code (enum tree_code code, tree type)
270 return one_cmpl_optab;
279 return TYPE_UNSIGNED (type) ? umod_optab : smod_optab;
287 return TYPE_UNSIGNED (type) ? udiv_optab : sdiv_optab;
293 return TYPE_UNSIGNED (type) ? lshr_optab : ashr_optab;
302 return TYPE_UNSIGNED (type) ? umax_optab : smax_optab;
305 return TYPE_UNSIGNED (type) ? umin_optab : smin_optab;
307 case REALIGN_LOAD_EXPR:
308 return vec_realign_load_optab;
311 return TYPE_UNSIGNED (type) ? usum_widen_optab : ssum_widen_optab;
314 return TYPE_UNSIGNED (type) ? udot_prod_optab : sdot_prod_optab;
317 return TYPE_UNSIGNED (type) ? reduc_umax_optab : reduc_smax_optab;
320 return TYPE_UNSIGNED (type) ? reduc_umin_optab : reduc_smin_optab;
322 case REDUC_PLUS_EXPR:
323 return TYPE_UNSIGNED (type) ? reduc_uplus_optab : reduc_splus_optab;
325 case VEC_LSHIFT_EXPR:
326 return vec_shl_optab;
328 case VEC_RSHIFT_EXPR:
329 return vec_shr_optab;
331 case VEC_WIDEN_MULT_HI_EXPR:
332 return TYPE_UNSIGNED (type) ?
333 vec_widen_umult_hi_optab : vec_widen_smult_hi_optab;
335 case VEC_WIDEN_MULT_LO_EXPR:
336 return TYPE_UNSIGNED (type) ?
337 vec_widen_umult_lo_optab : vec_widen_smult_lo_optab;
339 case VEC_UNPACK_HI_EXPR:
340 return TYPE_UNSIGNED (type) ?
341 vec_unpacku_hi_optab : vec_unpacks_hi_optab;
343 case VEC_UNPACK_LO_EXPR:
344 return TYPE_UNSIGNED (type) ?
345 vec_unpacku_lo_optab : vec_unpacks_lo_optab;
347 case VEC_UNPACK_FLOAT_HI_EXPR:
348 /* The signedness is determined from input operand. */
349 return TYPE_UNSIGNED (type) ?
350 vec_unpacku_float_hi_optab : vec_unpacks_float_hi_optab;
352 case VEC_UNPACK_FLOAT_LO_EXPR:
353 /* The signedness is determined from input operand. */
354 return TYPE_UNSIGNED (type) ?
355 vec_unpacku_float_lo_optab : vec_unpacks_float_lo_optab;
357 case VEC_PACK_TRUNC_EXPR:
358 return vec_pack_trunc_optab;
360 case VEC_PACK_SAT_EXPR:
361 return TYPE_UNSIGNED (type) ? vec_pack_usat_optab : vec_pack_ssat_optab;
363 case VEC_PACK_FIX_TRUNC_EXPR:
364 /* The signedness is determined from output operand. */
365 return TYPE_UNSIGNED (type) ?
366 vec_pack_ufix_trunc_optab : vec_pack_sfix_trunc_optab;
372 trapv = INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type);
376 return trapv ? addv_optab : add_optab;
379 return trapv ? subv_optab : sub_optab;
382 return trapv ? smulv_optab : smul_optab;
385 return trapv ? negv_optab : neg_optab;
388 return trapv ? absv_optab : abs_optab;
390 case VEC_EXTRACT_EVEN_EXPR:
391 return vec_extract_even_optab;
393 case VEC_EXTRACT_ODD_EXPR:
394 return vec_extract_odd_optab;
396 case VEC_INTERLEAVE_HIGH_EXPR:
397 return vec_interleave_high_optab;
399 case VEC_INTERLEAVE_LOW_EXPR:
400 return vec_interleave_low_optab;
408 /* Expand vector widening operations.
410 There are two different classes of operations handled here:
411 1) Operations whose result is wider than all the arguments to the operation.
412 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
413 In this case OP0 and optionally OP1 would be initialized,
414 but WIDE_OP wouldn't (not relevant for this case).
415 2) Operations whose result is of the same size as the last argument to the
416 operation, but wider than all the other arguments to the operation.
417 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
418 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
420 E.g, when called to expand the following operations, this is how
421 the arguments will be initialized:
423 widening-sum 2 oprnd0 - oprnd1
424 widening-dot-product 3 oprnd0 oprnd1 oprnd2
425 widening-mult 2 oprnd0 oprnd1 -
426 type-promotion (vec-unpack) 1 oprnd0 - - */
429 expand_widen_pattern_expr (tree exp, rtx op0, rtx op1, rtx wide_op, rtx target,
432 tree oprnd0, oprnd1, oprnd2;
433 enum machine_mode wmode = 0, tmode0, tmode1 = 0;
434 optab widen_pattern_optab;
436 enum machine_mode xmode0, xmode1 = 0, wxmode = 0;
439 rtx xop0, xop1, wxop;
440 int nops = TREE_OPERAND_LENGTH (exp);
442 oprnd0 = TREE_OPERAND (exp, 0);
443 tmode0 = TYPE_MODE (TREE_TYPE (oprnd0));
444 widen_pattern_optab =
445 optab_for_tree_code (TREE_CODE (exp), TREE_TYPE (oprnd0));
446 icode = (int) widen_pattern_optab->handlers[(int) tmode0].insn_code;
447 gcc_assert (icode != CODE_FOR_nothing);
448 xmode0 = insn_data[icode].operand[1].mode;
452 oprnd1 = TREE_OPERAND (exp, 1);
453 tmode1 = TYPE_MODE (TREE_TYPE (oprnd1));
454 xmode1 = insn_data[icode].operand[2].mode;
457 /* The last operand is of a wider mode than the rest of the operands. */
465 gcc_assert (tmode1 == tmode0);
467 oprnd2 = TREE_OPERAND (exp, 2);
468 wmode = TYPE_MODE (TREE_TYPE (oprnd2));
469 wxmode = insn_data[icode].operand[3].mode;
473 wmode = wxmode = insn_data[icode].operand[0].mode;
476 || ! (*insn_data[icode].operand[0].predicate) (target, wmode))
477 temp = gen_reg_rtx (wmode);
485 /* In case the insn wants input operands in modes different from
486 those of the actual operands, convert the operands. It would
487 seem that we don't need to convert CONST_INTs, but we do, so
488 that they're properly zero-extended, sign-extended or truncated
491 if (GET_MODE (op0) != xmode0 && xmode0 != VOIDmode)
492 xop0 = convert_modes (xmode0,
493 GET_MODE (op0) != VOIDmode
499 if (GET_MODE (op1) != xmode1 && xmode1 != VOIDmode)
500 xop1 = convert_modes (xmode1,
501 GET_MODE (op1) != VOIDmode
507 if (GET_MODE (wide_op) != wxmode && wxmode != VOIDmode)
508 wxop = convert_modes (wxmode,
509 GET_MODE (wide_op) != VOIDmode
514 /* Now, if insn's predicates don't allow our operands, put them into
517 if (! (*insn_data[icode].operand[1].predicate) (xop0, xmode0)
518 && xmode0 != VOIDmode)
519 xop0 = copy_to_mode_reg (xmode0, xop0);
523 if (! (*insn_data[icode].operand[2].predicate) (xop1, xmode1)
524 && xmode1 != VOIDmode)
525 xop1 = copy_to_mode_reg (xmode1, xop1);
529 if (! (*insn_data[icode].operand[3].predicate) (wxop, wxmode)
530 && wxmode != VOIDmode)
531 wxop = copy_to_mode_reg (wxmode, wxop);
533 pat = GEN_FCN (icode) (temp, xop0, xop1, wxop);
536 pat = GEN_FCN (icode) (temp, xop0, xop1);
542 if (! (*insn_data[icode].operand[2].predicate) (wxop, wxmode)
543 && wxmode != VOIDmode)
544 wxop = copy_to_mode_reg (wxmode, wxop);
546 pat = GEN_FCN (icode) (temp, xop0, wxop);
549 pat = GEN_FCN (icode) (temp, xop0);
556 /* Generate code to perform an operation specified by TERNARY_OPTAB
557 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
559 UNSIGNEDP is for the case where we have to widen the operands
560 to perform the operation. It says to use zero-extension.
562 If TARGET is nonzero, the value
563 is generated there, if it is convenient to do so.
564 In all cases an rtx is returned for the locus of the value;
565 this may or may not be TARGET. */
568 expand_ternary_op (enum machine_mode mode, optab ternary_optab, rtx op0,
569 rtx op1, rtx op2, rtx target, int unsignedp)
571 int icode = (int) ternary_optab->handlers[(int) mode].insn_code;
572 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
573 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
574 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
577 rtx xop0 = op0, xop1 = op1, xop2 = op2;
579 gcc_assert (ternary_optab->handlers[(int) mode].insn_code
580 != CODE_FOR_nothing);
582 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
583 temp = gen_reg_rtx (mode);
587 /* In case the insn wants input operands in modes different from
588 those of the actual operands, convert the operands. It would
589 seem that we don't need to convert CONST_INTs, but we do, so
590 that they're properly zero-extended, sign-extended or truncated
593 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
594 xop0 = convert_modes (mode0,
595 GET_MODE (op0) != VOIDmode
600 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
601 xop1 = convert_modes (mode1,
602 GET_MODE (op1) != VOIDmode
607 if (GET_MODE (op2) != mode2 && mode2 != VOIDmode)
608 xop2 = convert_modes (mode2,
609 GET_MODE (op2) != VOIDmode
614 /* Now, if insn's predicates don't allow our operands, put them into
617 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
618 && mode0 != VOIDmode)
619 xop0 = copy_to_mode_reg (mode0, xop0);
621 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
622 && mode1 != VOIDmode)
623 xop1 = copy_to_mode_reg (mode1, xop1);
625 if (!insn_data[icode].operand[3].predicate (xop2, mode2)
626 && mode2 != VOIDmode)
627 xop2 = copy_to_mode_reg (mode2, xop2);
629 pat = GEN_FCN (icode) (temp, xop0, xop1, xop2);
636 /* Like expand_binop, but return a constant rtx if the result can be
637 calculated at compile time. The arguments and return value are
638 otherwise the same as for expand_binop. */
641 simplify_expand_binop (enum machine_mode mode, optab binoptab,
642 rtx op0, rtx op1, rtx target, int unsignedp,
643 enum optab_methods methods)
645 if (CONSTANT_P (op0) && CONSTANT_P (op1))
647 rtx x = simplify_binary_operation (binoptab->code, mode, op0, op1);
653 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
656 /* Like simplify_expand_binop, but always put the result in TARGET.
657 Return true if the expansion succeeded. */
660 force_expand_binop (enum machine_mode mode, optab binoptab,
661 rtx op0, rtx op1, rtx target, int unsignedp,
662 enum optab_methods methods)
664 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
665 target, unsignedp, methods);
669 emit_move_insn (target, x);
673 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
676 expand_vec_shift_expr (tree vec_shift_expr, rtx target)
678 enum insn_code icode;
679 rtx rtx_op1, rtx_op2;
680 enum machine_mode mode1;
681 enum machine_mode mode2;
682 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_shift_expr));
683 tree vec_oprnd = TREE_OPERAND (vec_shift_expr, 0);
684 tree shift_oprnd = TREE_OPERAND (vec_shift_expr, 1);
688 switch (TREE_CODE (vec_shift_expr))
690 case VEC_RSHIFT_EXPR:
691 shift_optab = vec_shr_optab;
693 case VEC_LSHIFT_EXPR:
694 shift_optab = vec_shl_optab;
700 icode = (int) shift_optab->handlers[(int) mode].insn_code;
701 gcc_assert (icode != CODE_FOR_nothing);
703 mode1 = insn_data[icode].operand[1].mode;
704 mode2 = insn_data[icode].operand[2].mode;
706 rtx_op1 = expand_normal (vec_oprnd);
707 if (!(*insn_data[icode].operand[1].predicate) (rtx_op1, mode1)
708 && mode1 != VOIDmode)
709 rtx_op1 = force_reg (mode1, rtx_op1);
711 rtx_op2 = expand_normal (shift_oprnd);
712 if (!(*insn_data[icode].operand[2].predicate) (rtx_op2, mode2)
713 && mode2 != VOIDmode)
714 rtx_op2 = force_reg (mode2, rtx_op2);
717 || ! (*insn_data[icode].operand[0].predicate) (target, mode))
718 target = gen_reg_rtx (mode);
720 /* Emit instruction */
721 pat = GEN_FCN (icode) (target, rtx_op1, rtx_op2);
728 /* This subroutine of expand_doubleword_shift handles the cases in which
729 the effective shift value is >= BITS_PER_WORD. The arguments and return
730 value are the same as for the parent routine, except that SUPERWORD_OP1
731 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
732 INTO_TARGET may be null if the caller has decided to calculate it. */
735 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
736 rtx outof_target, rtx into_target,
737 int unsignedp, enum optab_methods methods)
739 if (into_target != 0)
740 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
741 into_target, unsignedp, methods))
744 if (outof_target != 0)
746 /* For a signed right shift, we must fill OUTOF_TARGET with copies
747 of the sign bit, otherwise we must fill it with zeros. */
748 if (binoptab != ashr_optab)
749 emit_move_insn (outof_target, CONST0_RTX (word_mode));
751 if (!force_expand_binop (word_mode, binoptab,
752 outof_input, GEN_INT (BITS_PER_WORD - 1),
753 outof_target, unsignedp, methods))
759 /* This subroutine of expand_doubleword_shift handles the cases in which
760 the effective shift value is < BITS_PER_WORD. The arguments and return
761 value are the same as for the parent routine. */
764 expand_subword_shift (enum machine_mode op1_mode, optab binoptab,
765 rtx outof_input, rtx into_input, rtx op1,
766 rtx outof_target, rtx into_target,
767 int unsignedp, enum optab_methods methods,
768 unsigned HOST_WIDE_INT shift_mask)
770 optab reverse_unsigned_shift, unsigned_shift;
773 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
774 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
776 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
777 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
778 the opposite direction to BINOPTAB. */
779 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
781 carries = outof_input;
782 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
783 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
788 /* We must avoid shifting by BITS_PER_WORD bits since that is either
789 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
790 has unknown behavior. Do a single shift first, then shift by the
791 remainder. It's OK to use ~OP1 as the remainder if shift counts
792 are truncated to the mode size. */
793 carries = expand_binop (word_mode, reverse_unsigned_shift,
794 outof_input, const1_rtx, 0, unsignedp, methods);
795 if (shift_mask == BITS_PER_WORD - 1)
797 tmp = immed_double_const (-1, -1, op1_mode);
798 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
803 tmp = immed_double_const (BITS_PER_WORD - 1, 0, op1_mode);
804 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
808 if (tmp == 0 || carries == 0)
810 carries = expand_binop (word_mode, reverse_unsigned_shift,
811 carries, tmp, 0, unsignedp, methods);
815 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
816 so the result can go directly into INTO_TARGET if convenient. */
817 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
818 into_target, unsignedp, methods);
822 /* Now OR in the bits carried over from OUTOF_INPUT. */
823 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
824 into_target, unsignedp, methods))
827 /* Use a standard word_mode shift for the out-of half. */
828 if (outof_target != 0)
829 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
830 outof_target, unsignedp, methods))
837 #ifdef HAVE_conditional_move
838 /* Try implementing expand_doubleword_shift using conditional moves.
839 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
840 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
841 are the shift counts to use in the former and latter case. All other
842 arguments are the same as the parent routine. */
845 expand_doubleword_shift_condmove (enum machine_mode op1_mode, optab binoptab,
846 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
847 rtx outof_input, rtx into_input,
848 rtx subword_op1, rtx superword_op1,
849 rtx outof_target, rtx into_target,
850 int unsignedp, enum optab_methods methods,
851 unsigned HOST_WIDE_INT shift_mask)
853 rtx outof_superword, into_superword;
855 /* Put the superword version of the output into OUTOF_SUPERWORD and
857 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
858 if (outof_target != 0 && subword_op1 == superword_op1)
860 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
861 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
862 into_superword = outof_target;
863 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
864 outof_superword, 0, unsignedp, methods))
869 into_superword = gen_reg_rtx (word_mode);
870 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
871 outof_superword, into_superword,
876 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
877 if (!expand_subword_shift (op1_mode, binoptab,
878 outof_input, into_input, subword_op1,
879 outof_target, into_target,
880 unsignedp, methods, shift_mask))
883 /* Select between them. Do the INTO half first because INTO_SUPERWORD
884 might be the current value of OUTOF_TARGET. */
885 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
886 into_target, into_superword, word_mode, false))
889 if (outof_target != 0)
890 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
891 outof_target, outof_superword,
899 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
900 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
901 input operand; the shift moves bits in the direction OUTOF_INPUT->
902 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
903 of the target. OP1 is the shift count and OP1_MODE is its mode.
904 If OP1 is constant, it will have been truncated as appropriate
905 and is known to be nonzero.
907 If SHIFT_MASK is zero, the result of word shifts is undefined when the
908 shift count is outside the range [0, BITS_PER_WORD). This routine must
909 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
911 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
912 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
913 fill with zeros or sign bits as appropriate.
915 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
916 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
917 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
918 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
921 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
922 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
923 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
924 function wants to calculate it itself.
926 Return true if the shift could be successfully synthesized. */
929 expand_doubleword_shift (enum machine_mode op1_mode, optab binoptab,
930 rtx outof_input, rtx into_input, rtx op1,
931 rtx outof_target, rtx into_target,
932 int unsignedp, enum optab_methods methods,
933 unsigned HOST_WIDE_INT shift_mask)
935 rtx superword_op1, tmp, cmp1, cmp2;
936 rtx subword_label, done_label;
937 enum rtx_code cmp_code;
939 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
940 fill the result with sign or zero bits as appropriate. If so, the value
941 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
942 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
943 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
945 This isn't worthwhile for constant shifts since the optimizers will
946 cope better with in-range shift counts. */
947 if (shift_mask >= BITS_PER_WORD
949 && !CONSTANT_P (op1))
951 if (!expand_doubleword_shift (op1_mode, binoptab,
952 outof_input, into_input, op1,
954 unsignedp, methods, shift_mask))
956 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
957 outof_target, unsignedp, methods))
962 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
963 is true when the effective shift value is less than BITS_PER_WORD.
964 Set SUPERWORD_OP1 to the shift count that should be used to shift
965 OUTOF_INPUT into INTO_TARGET when the condition is false. */
966 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
967 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
969 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
970 is a subword shift count. */
971 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
973 cmp2 = CONST0_RTX (op1_mode);
979 /* Set CMP1 to OP1 - BITS_PER_WORD. */
980 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
982 cmp2 = CONST0_RTX (op1_mode);
984 superword_op1 = cmp1;
989 /* If we can compute the condition at compile time, pick the
990 appropriate subroutine. */
991 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
992 if (tmp != 0 && GET_CODE (tmp) == CONST_INT)
994 if (tmp == const0_rtx)
995 return expand_superword_shift (binoptab, outof_input, superword_op1,
996 outof_target, into_target,
999 return expand_subword_shift (op1_mode, binoptab,
1000 outof_input, into_input, op1,
1001 outof_target, into_target,
1002 unsignedp, methods, shift_mask);
1005 #ifdef HAVE_conditional_move
1006 /* Try using conditional moves to generate straight-line code. */
1008 rtx start = get_last_insn ();
1009 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
1010 cmp_code, cmp1, cmp2,
1011 outof_input, into_input,
1013 outof_target, into_target,
1014 unsignedp, methods, shift_mask))
1016 delete_insns_since (start);
1020 /* As a last resort, use branches to select the correct alternative. */
1021 subword_label = gen_label_rtx ();
1022 done_label = gen_label_rtx ();
1025 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
1026 0, 0, subword_label);
1029 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
1030 outof_target, into_target,
1031 unsignedp, methods))
1034 emit_jump_insn (gen_jump (done_label));
1036 emit_label (subword_label);
1038 if (!expand_subword_shift (op1_mode, binoptab,
1039 outof_input, into_input, op1,
1040 outof_target, into_target,
1041 unsignedp, methods, shift_mask))
1044 emit_label (done_label);
1048 /* Subroutine of expand_binop. Perform a double word multiplication of
1049 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
1050 as the target's word_mode. This function return NULL_RTX if anything
1051 goes wrong, in which case it may have already emitted instructions
1052 which need to be deleted.
1054 If we want to multiply two two-word values and have normal and widening
1055 multiplies of single-word values, we can do this with three smaller
1056 multiplications. Note that we do not make a REG_NO_CONFLICT block here
1057 because we are not operating on one word at a time.
1059 The multiplication proceeds as follows:
1060 _______________________
1061 [__op0_high_|__op0_low__]
1062 _______________________
1063 * [__op1_high_|__op1_low__]
1064 _______________________________________________
1065 _______________________
1066 (1) [__op0_low__*__op1_low__]
1067 _______________________
1068 (2a) [__op0_low__*__op1_high_]
1069 _______________________
1070 (2b) [__op0_high_*__op1_low__]
1071 _______________________
1072 (3) [__op0_high_*__op1_high_]
1075 This gives a 4-word result. Since we are only interested in the
1076 lower 2 words, partial result (3) and the upper words of (2a) and
1077 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1078 calculated using non-widening multiplication.
1080 (1), however, needs to be calculated with an unsigned widening
1081 multiplication. If this operation is not directly supported we
1082 try using a signed widening multiplication and adjust the result.
1083 This adjustment works as follows:
1085 If both operands are positive then no adjustment is needed.
1087 If the operands have different signs, for example op0_low < 0 and
1088 op1_low >= 0, the instruction treats the most significant bit of
1089 op0_low as a sign bit instead of a bit with significance
1090 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1091 with 2**BITS_PER_WORD - op0_low, and two's complements the
1092 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1095 Similarly, if both operands are negative, we need to add
1096 (op0_low + op1_low) * 2**BITS_PER_WORD.
1098 We use a trick to adjust quickly. We logically shift op0_low right
1099 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1100 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1101 logical shift exists, we do an arithmetic right shift and subtract
1105 expand_doubleword_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
1106 bool umulp, enum optab_methods methods)
1108 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
1109 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
1110 rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
1111 rtx product, adjust, product_high, temp;
1113 rtx op0_high = operand_subword_force (op0, high, mode);
1114 rtx op0_low = operand_subword_force (op0, low, mode);
1115 rtx op1_high = operand_subword_force (op1, high, mode);
1116 rtx op1_low = operand_subword_force (op1, low, mode);
1118 /* If we're using an unsigned multiply to directly compute the product
1119 of the low-order words of the operands and perform any required
1120 adjustments of the operands, we begin by trying two more multiplications
1121 and then computing the appropriate sum.
1123 We have checked above that the required addition is provided.
1124 Full-word addition will normally always succeed, especially if
1125 it is provided at all, so we don't worry about its failure. The
1126 multiplication may well fail, however, so we do handle that. */
1130 /* ??? This could be done with emit_store_flag where available. */
1131 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
1132 NULL_RTX, 1, methods);
1134 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
1135 NULL_RTX, 0, OPTAB_DIRECT);
1138 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
1139 NULL_RTX, 0, methods);
1142 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
1143 NULL_RTX, 0, OPTAB_DIRECT);
1150 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
1151 NULL_RTX, 0, OPTAB_DIRECT);
1155 /* OP0_HIGH should now be dead. */
1159 /* ??? This could be done with emit_store_flag where available. */
1160 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
1161 NULL_RTX, 1, methods);
1163 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
1164 NULL_RTX, 0, OPTAB_DIRECT);
1167 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
1168 NULL_RTX, 0, methods);
1171 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
1172 NULL_RTX, 0, OPTAB_DIRECT);
1179 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
1180 NULL_RTX, 0, OPTAB_DIRECT);
1184 /* OP1_HIGH should now be dead. */
1186 adjust = expand_binop (word_mode, add_optab, adjust, temp,
1187 adjust, 0, OPTAB_DIRECT);
1189 if (target && !REG_P (target))
1193 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
1194 target, 1, OPTAB_DIRECT);
1196 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
1197 target, 1, OPTAB_DIRECT);
1202 product_high = operand_subword (product, high, 1, mode);
1203 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
1204 REG_P (product_high) ? product_high : adjust,
1206 emit_move_insn (product_high, adjust);
1210 /* Wrapper around expand_binop which takes an rtx code to specify
1211 the operation to perform, not an optab pointer. All other
1212 arguments are the same. */
1214 expand_simple_binop (enum machine_mode mode, enum rtx_code code, rtx op0,
1215 rtx op1, rtx target, int unsignedp,
1216 enum optab_methods methods)
1218 optab binop = code_to_optab[(int) code];
1221 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
1224 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1225 binop. Order them according to commutative_operand_precedence and, if
1226 possible, try to put TARGET or a pseudo first. */
1228 swap_commutative_operands_with_target (rtx target, rtx op0, rtx op1)
1230 int op0_prec = commutative_operand_precedence (op0);
1231 int op1_prec = commutative_operand_precedence (op1);
1233 if (op0_prec < op1_prec)
1236 if (op0_prec > op1_prec)
1239 /* With equal precedence, both orders are ok, but it is better if the
1240 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
1241 if (target == 0 || REG_P (target))
1242 return (REG_P (op1) && !REG_P (op0)) || target == op1;
1244 return rtx_equal_p (op1, target);
1248 /* Generate code to perform an operation specified by BINOPTAB
1249 on operands OP0 and OP1, with result having machine-mode MODE.
1251 UNSIGNEDP is for the case where we have to widen the operands
1252 to perform the operation. It says to use zero-extension.
1254 If TARGET is nonzero, the value
1255 is generated there, if it is convenient to do so.
1256 In all cases an rtx is returned for the locus of the value;
1257 this may or may not be TARGET. */
1260 expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1,
1261 rtx target, int unsignedp, enum optab_methods methods)
1263 enum optab_methods next_methods
1264 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1265 ? OPTAB_WIDEN : methods);
1266 enum mode_class class;
1267 enum machine_mode wider_mode;
1269 int commutative_op = 0;
1270 int shift_op = (binoptab->code == ASHIFT
1271 || binoptab->code == ASHIFTRT
1272 || binoptab->code == LSHIFTRT
1273 || binoptab->code == ROTATE
1274 || binoptab->code == ROTATERT);
1275 rtx entry_last = get_last_insn ();
1277 bool first_pass_p = true;
1279 class = GET_MODE_CLASS (mode);
1281 /* If subtracting an integer constant, convert this into an addition of
1282 the negated constant. */
1284 if (binoptab == sub_optab && GET_CODE (op1) == CONST_INT)
1286 op1 = negate_rtx (mode, op1);
1287 binoptab = add_optab;
1290 /* If we are inside an appropriately-short loop and we are optimizing,
1291 force expensive constants into a register. */
1292 if (CONSTANT_P (op0) && optimize
1293 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
1295 if (GET_MODE (op0) != VOIDmode)
1296 op0 = convert_modes (mode, VOIDmode, op0, unsignedp);
1297 op0 = force_reg (mode, op0);
1300 if (CONSTANT_P (op1) && optimize
1301 && ! shift_op && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
1303 if (GET_MODE (op1) != VOIDmode)
1304 op1 = convert_modes (mode, VOIDmode, op1, unsignedp);
1305 op1 = force_reg (mode, op1);
1308 /* Record where to delete back to if we backtrack. */
1309 last = get_last_insn ();
1311 /* If operation is commutative,
1312 try to make the first operand a register.
1313 Even better, try to make it the same as the target.
1314 Also try to make the last operand a constant. */
1315 if (GET_RTX_CLASS (binoptab->code) == RTX_COMM_ARITH
1316 || binoptab == smul_widen_optab
1317 || binoptab == umul_widen_optab
1318 || binoptab == smul_highpart_optab
1319 || binoptab == umul_highpart_optab)
1323 if (swap_commutative_operands_with_target (target, op0, op1))
1333 /* If we can do it with a three-operand insn, do so. */
1335 if (methods != OPTAB_MUST_WIDEN
1336 && binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1338 int icode = (int) binoptab->handlers[(int) mode].insn_code;
1339 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
1340 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
1341 enum machine_mode tmp_mode;
1343 rtx xop0 = op0, xop1 = op1;
1348 temp = gen_reg_rtx (mode);
1350 /* If it is a commutative operator and the modes would match
1351 if we would swap the operands, we can save the conversions. */
1354 if (GET_MODE (op0) != mode0 && GET_MODE (op1) != mode1
1355 && GET_MODE (op0) == mode1 && GET_MODE (op1) == mode0)
1359 tmp = op0; op0 = op1; op1 = tmp;
1360 tmp = xop0; xop0 = xop1; xop1 = tmp;
1364 /* In case the insn wants input operands in modes different from
1365 those of the actual operands, convert the operands. It would
1366 seem that we don't need to convert CONST_INTs, but we do, so
1367 that they're properly zero-extended, sign-extended or truncated
1370 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
1371 xop0 = convert_modes (mode0,
1372 GET_MODE (op0) != VOIDmode
1377 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
1378 xop1 = convert_modes (mode1,
1379 GET_MODE (op1) != VOIDmode
1384 /* Now, if insn's predicates don't allow our operands, put them into
1387 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
1388 && mode0 != VOIDmode)
1389 xop0 = copy_to_mode_reg (mode0, xop0);
1391 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
1392 && mode1 != VOIDmode)
1393 xop1 = copy_to_mode_reg (mode1, xop1);
1395 if (binoptab == vec_pack_trunc_optab
1396 || binoptab == vec_pack_usat_optab
1397 || binoptab == vec_pack_ssat_optab
1398 || binoptab == vec_pack_ufix_trunc_optab
1399 || binoptab == vec_pack_sfix_trunc_optab)
1401 /* The mode of the result is different then the mode of the
1403 tmp_mode = insn_data[icode].operand[0].mode;
1404 if (GET_MODE_NUNITS (tmp_mode) != 2 * GET_MODE_NUNITS (mode))
1410 if (!insn_data[icode].operand[0].predicate (temp, tmp_mode))
1411 temp = gen_reg_rtx (tmp_mode);
1413 pat = GEN_FCN (icode) (temp, xop0, xop1);
1416 /* If PAT is composed of more than one insn, try to add an appropriate
1417 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1418 operand, call ourselves again, this time without a target. */
1419 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1420 && ! add_equal_note (pat, temp, binoptab->code, xop0, xop1))
1422 delete_insns_since (last);
1423 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1424 unsignedp, methods);
1431 delete_insns_since (last);
1434 /* If we were trying to rotate by a constant value, and that didn't
1435 work, try rotating the other direction before falling back to
1436 shifts and bitwise-or. */
1438 && (binoptab == rotl_optab || binoptab == rotr_optab)
1439 && class == MODE_INT
1440 && GET_CODE (op1) == CONST_INT
1442 && (unsigned int) INTVAL (op1) < GET_MODE_BITSIZE (mode))
1444 first_pass_p = false;
1445 op1 = GEN_INT (GET_MODE_BITSIZE (mode) - INTVAL (op1));
1446 binoptab = binoptab == rotl_optab ? rotr_optab : rotl_optab;
1450 /* If this is a multiply, see if we can do a widening operation that
1451 takes operands of this mode and makes a wider mode. */
1453 if (binoptab == smul_optab
1454 && GET_MODE_WIDER_MODE (mode) != VOIDmode
1455 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1456 ->handlers[(int) GET_MODE_WIDER_MODE (mode)].insn_code)
1457 != CODE_FOR_nothing))
1459 temp = expand_binop (GET_MODE_WIDER_MODE (mode),
1460 unsignedp ? umul_widen_optab : smul_widen_optab,
1461 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1465 if (GET_MODE_CLASS (mode) == MODE_INT
1466 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1467 GET_MODE_BITSIZE (GET_MODE (temp))))
1468 return gen_lowpart (mode, temp);
1470 return convert_to_mode (mode, temp, unsignedp);
1474 /* Look for a wider mode of the same class for which we think we
1475 can open-code the operation. Check for a widening multiply at the
1476 wider mode as well. */
1478 if (CLASS_HAS_WIDER_MODES_P (class)
1479 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1480 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1481 wider_mode != VOIDmode;
1482 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1484 if (binoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
1485 || (binoptab == smul_optab
1486 && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
1487 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1488 ->handlers[(int) GET_MODE_WIDER_MODE (wider_mode)].insn_code)
1489 != CODE_FOR_nothing)))
1491 rtx xop0 = op0, xop1 = op1;
1494 /* For certain integer operations, we need not actually extend
1495 the narrow operands, as long as we will truncate
1496 the results to the same narrowness. */
1498 if ((binoptab == ior_optab || binoptab == and_optab
1499 || binoptab == xor_optab
1500 || binoptab == add_optab || binoptab == sub_optab
1501 || binoptab == smul_optab || binoptab == ashl_optab)
1502 && class == MODE_INT)
1505 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1507 /* The second operand of a shift must always be extended. */
1508 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1509 no_extend && binoptab != ashl_optab);
1511 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1512 unsignedp, OPTAB_DIRECT);
1515 if (class != MODE_INT
1516 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1517 GET_MODE_BITSIZE (wider_mode)))
1520 target = gen_reg_rtx (mode);
1521 convert_move (target, temp, 0);
1525 return gen_lowpart (mode, temp);
1528 delete_insns_since (last);
1532 /* These can be done a word at a time. */
1533 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1534 && class == MODE_INT
1535 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
1536 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1542 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1543 won't be accurate, so use a new target. */
1544 if (target == 0 || target == op0 || target == op1)
1545 target = gen_reg_rtx (mode);
1549 /* Do the actual arithmetic. */
1550 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
1552 rtx target_piece = operand_subword (target, i, 1, mode);
1553 rtx x = expand_binop (word_mode, binoptab,
1554 operand_subword_force (op0, i, mode),
1555 operand_subword_force (op1, i, mode),
1556 target_piece, unsignedp, next_methods);
1561 if (target_piece != x)
1562 emit_move_insn (target_piece, x);
1565 insns = get_insns ();
1568 if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
1570 if (binoptab->code != UNKNOWN)
1572 = gen_rtx_fmt_ee (binoptab->code, mode,
1573 copy_rtx (op0), copy_rtx (op1));
1577 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1582 /* Synthesize double word shifts from single word shifts. */
1583 if ((binoptab == lshr_optab || binoptab == ashl_optab
1584 || binoptab == ashr_optab)
1585 && class == MODE_INT
1586 && (GET_CODE (op1) == CONST_INT || !optimize_size)
1587 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1588 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1589 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1590 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1592 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1593 enum machine_mode op1_mode;
1595 double_shift_mask = targetm.shift_truncation_mask (mode);
1596 shift_mask = targetm.shift_truncation_mask (word_mode);
1597 op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode;
1599 /* Apply the truncation to constant shifts. */
1600 if (double_shift_mask > 0 && GET_CODE (op1) == CONST_INT)
1601 op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
1603 if (op1 == CONST0_RTX (op1_mode))
1606 /* Make sure that this is a combination that expand_doubleword_shift
1607 can handle. See the comments there for details. */
1608 if (double_shift_mask == 0
1609 || (shift_mask == BITS_PER_WORD - 1
1610 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1612 rtx insns, equiv_value;
1613 rtx into_target, outof_target;
1614 rtx into_input, outof_input;
1615 int left_shift, outof_word;
1617 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1618 won't be accurate, so use a new target. */
1619 if (target == 0 || target == op0 || target == op1)
1620 target = gen_reg_rtx (mode);
1624 /* OUTOF_* is the word we are shifting bits away from, and
1625 INTO_* is the word that we are shifting bits towards, thus
1626 they differ depending on the direction of the shift and
1627 WORDS_BIG_ENDIAN. */
1629 left_shift = binoptab == ashl_optab;
1630 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1632 outof_target = operand_subword (target, outof_word, 1, mode);
1633 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1635 outof_input = operand_subword_force (op0, outof_word, mode);
1636 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1638 if (expand_doubleword_shift (op1_mode, binoptab,
1639 outof_input, into_input, op1,
1640 outof_target, into_target,
1641 unsignedp, next_methods, shift_mask))
1643 insns = get_insns ();
1646 equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, op0, op1);
1647 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1654 /* Synthesize double word rotates from single word shifts. */
1655 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1656 && class == MODE_INT
1657 && GET_CODE (op1) == CONST_INT
1658 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1659 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1660 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1663 rtx into_target, outof_target;
1664 rtx into_input, outof_input;
1666 int shift_count, left_shift, outof_word;
1668 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1669 won't be accurate, so use a new target. Do this also if target is not
1670 a REG, first because having a register instead may open optimization
1671 opportunities, and second because if target and op0 happen to be MEMs
1672 designating the same location, we would risk clobbering it too early
1673 in the code sequence we generate below. */
1674 if (target == 0 || target == op0 || target == op1 || ! REG_P (target))
1675 target = gen_reg_rtx (mode);
1679 shift_count = INTVAL (op1);
1681 /* OUTOF_* is the word we are shifting bits away from, and
1682 INTO_* is the word that we are shifting bits towards, thus
1683 they differ depending on the direction of the shift and
1684 WORDS_BIG_ENDIAN. */
1686 left_shift = (binoptab == rotl_optab);
1687 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1689 outof_target = operand_subword (target, outof_word, 1, mode);
1690 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1692 outof_input = operand_subword_force (op0, outof_word, mode);
1693 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1695 if (shift_count == BITS_PER_WORD)
1697 /* This is just a word swap. */
1698 emit_move_insn (outof_target, into_input);
1699 emit_move_insn (into_target, outof_input);
1704 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1705 rtx first_shift_count, second_shift_count;
1706 optab reverse_unsigned_shift, unsigned_shift;
1708 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1709 ? lshr_optab : ashl_optab);
1711 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1712 ? ashl_optab : lshr_optab);
1714 if (shift_count > BITS_PER_WORD)
1716 first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1717 second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1721 first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1722 second_shift_count = GEN_INT (shift_count);
1725 into_temp1 = expand_binop (word_mode, unsigned_shift,
1726 outof_input, first_shift_count,
1727 NULL_RTX, unsignedp, next_methods);
1728 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1729 into_input, second_shift_count,
1730 NULL_RTX, unsignedp, next_methods);
1732 if (into_temp1 != 0 && into_temp2 != 0)
1733 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1734 into_target, unsignedp, next_methods);
1738 if (inter != 0 && inter != into_target)
1739 emit_move_insn (into_target, inter);
1741 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1742 into_input, first_shift_count,
1743 NULL_RTX, unsignedp, next_methods);
1744 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1745 outof_input, second_shift_count,
1746 NULL_RTX, unsignedp, next_methods);
1748 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1749 inter = expand_binop (word_mode, ior_optab,
1750 outof_temp1, outof_temp2,
1751 outof_target, unsignedp, next_methods);
1753 if (inter != 0 && inter != outof_target)
1754 emit_move_insn (outof_target, inter);
1757 insns = get_insns ();
1762 /* One may be tempted to wrap the insns in a REG_NO_CONFLICT
1763 block to help the register allocator a bit. But a multi-word
1764 rotate will need all the input bits when setting the output
1765 bits, so there clearly is a conflict between the input and
1766 output registers. So we can't use a no-conflict block here. */
1772 /* These can be done a word at a time by propagating carries. */
1773 if ((binoptab == add_optab || binoptab == sub_optab)
1774 && class == MODE_INT
1775 && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
1776 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1779 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1780 const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
1781 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1782 rtx xop0, xop1, xtarget;
1784 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1785 value is one of those, use it. Otherwise, use 1 since it is the
1786 one easiest to get. */
1787 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1788 int normalizep = STORE_FLAG_VALUE;
1793 /* Prepare the operands. */
1794 xop0 = force_reg (mode, op0);
1795 xop1 = force_reg (mode, op1);
1797 xtarget = gen_reg_rtx (mode);
1799 if (target == 0 || !REG_P (target))
1802 /* Indicate for flow that the entire target reg is being set. */
1804 emit_insn (gen_rtx_CLOBBER (VOIDmode, xtarget));
1806 /* Do the actual arithmetic. */
1807 for (i = 0; i < nwords; i++)
1809 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1810 rtx target_piece = operand_subword (xtarget, index, 1, mode);
1811 rtx op0_piece = operand_subword_force (xop0, index, mode);
1812 rtx op1_piece = operand_subword_force (xop1, index, mode);
1815 /* Main add/subtract of the input operands. */
1816 x = expand_binop (word_mode, binoptab,
1817 op0_piece, op1_piece,
1818 target_piece, unsignedp, next_methods);
1824 /* Store carry from main add/subtract. */
1825 carry_out = gen_reg_rtx (word_mode);
1826 carry_out = emit_store_flag_force (carry_out,
1827 (binoptab == add_optab
1830 word_mode, 1, normalizep);
1837 /* Add/subtract previous carry to main result. */
1838 newx = expand_binop (word_mode,
1839 normalizep == 1 ? binoptab : otheroptab,
1841 NULL_RTX, 1, next_methods);
1845 /* Get out carry from adding/subtracting carry in. */
1846 rtx carry_tmp = gen_reg_rtx (word_mode);
1847 carry_tmp = emit_store_flag_force (carry_tmp,
1848 (binoptab == add_optab
1851 word_mode, 1, normalizep);
1853 /* Logical-ior the two poss. carry together. */
1854 carry_out = expand_binop (word_mode, ior_optab,
1855 carry_out, carry_tmp,
1856 carry_out, 0, next_methods);
1860 emit_move_insn (target_piece, newx);
1864 if (x != target_piece)
1865 emit_move_insn (target_piece, x);
1868 carry_in = carry_out;
1871 if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
1873 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
1874 || ! rtx_equal_p (target, xtarget))
1876 rtx temp = emit_move_insn (target, xtarget);
1878 set_unique_reg_note (temp,
1880 gen_rtx_fmt_ee (binoptab->code, mode,
1891 delete_insns_since (last);
1894 /* Attempt to synthesize double word multiplies using a sequence of word
1895 mode multiplications. We first attempt to generate a sequence using a
1896 more efficient unsigned widening multiply, and if that fails we then
1897 try using a signed widening multiply. */
1899 if (binoptab == smul_optab
1900 && class == MODE_INT
1901 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1902 && smul_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1903 && add_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1905 rtx product = NULL_RTX;
1907 if (umul_widen_optab->handlers[(int) mode].insn_code
1908 != CODE_FOR_nothing)
1910 product = expand_doubleword_mult (mode, op0, op1, target,
1913 delete_insns_since (last);
1916 if (product == NULL_RTX
1917 && smul_widen_optab->handlers[(int) mode].insn_code
1918 != CODE_FOR_nothing)
1920 product = expand_doubleword_mult (mode, op0, op1, target,
1923 delete_insns_since (last);
1926 if (product != NULL_RTX)
1928 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1930 temp = emit_move_insn (target ? target : product, product);
1931 set_unique_reg_note (temp,
1933 gen_rtx_fmt_ee (MULT, mode,
1941 /* It can't be open-coded in this mode.
1942 Use a library call if one is available and caller says that's ok. */
1944 if (binoptab->handlers[(int) mode].libfunc
1945 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
1949 enum machine_mode op1_mode = mode;
1956 op1_mode = word_mode;
1957 /* Specify unsigned here,
1958 since negative shift counts are meaningless. */
1959 op1x = convert_to_mode (word_mode, op1, 1);
1962 if (GET_MODE (op0) != VOIDmode
1963 && GET_MODE (op0) != mode)
1964 op0 = convert_to_mode (mode, op0, unsignedp);
1966 /* Pass 1 for NO_QUEUE so we don't lose any increments
1967 if the libcall is cse'd or moved. */
1968 value = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
1969 NULL_RTX, LCT_CONST, mode, 2,
1970 op0, mode, op1x, op1_mode);
1972 insns = get_insns ();
1975 target = gen_reg_rtx (mode);
1976 emit_libcall_block (insns, target, value,
1977 gen_rtx_fmt_ee (binoptab->code, mode, op0, op1));
1982 delete_insns_since (last);
1984 /* It can't be done in this mode. Can we do it in a wider mode? */
1986 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
1987 || methods == OPTAB_MUST_WIDEN))
1989 /* Caller says, don't even try. */
1990 delete_insns_since (entry_last);
1994 /* Compute the value of METHODS to pass to recursive calls.
1995 Don't allow widening to be tried recursively. */
1997 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
1999 /* Look for a wider mode of the same class for which it appears we can do
2002 if (CLASS_HAS_WIDER_MODES_P (class))
2004 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2005 wider_mode != VOIDmode;
2006 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2008 if ((binoptab->handlers[(int) wider_mode].insn_code
2009 != CODE_FOR_nothing)
2010 || (methods == OPTAB_LIB
2011 && binoptab->handlers[(int) wider_mode].libfunc))
2013 rtx xop0 = op0, xop1 = op1;
2016 /* For certain integer operations, we need not actually extend
2017 the narrow operands, as long as we will truncate
2018 the results to the same narrowness. */
2020 if ((binoptab == ior_optab || binoptab == and_optab
2021 || binoptab == xor_optab
2022 || binoptab == add_optab || binoptab == sub_optab
2023 || binoptab == smul_optab || binoptab == ashl_optab)
2024 && class == MODE_INT)
2027 xop0 = widen_operand (xop0, wider_mode, mode,
2028 unsignedp, no_extend);
2030 /* The second operand of a shift must always be extended. */
2031 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
2032 no_extend && binoptab != ashl_optab);
2034 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
2035 unsignedp, methods);
2038 if (class != MODE_INT
2039 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
2040 GET_MODE_BITSIZE (wider_mode)))
2043 target = gen_reg_rtx (mode);
2044 convert_move (target, temp, 0);
2048 return gen_lowpart (mode, temp);
2051 delete_insns_since (last);
2056 delete_insns_since (entry_last);
2060 /* Expand a binary operator which has both signed and unsigned forms.
2061 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2064 If we widen unsigned operands, we may use a signed wider operation instead
2065 of an unsigned wider operation, since the result would be the same. */
2068 sign_expand_binop (enum machine_mode mode, optab uoptab, optab soptab,
2069 rtx op0, rtx op1, rtx target, int unsignedp,
2070 enum optab_methods methods)
2073 optab direct_optab = unsignedp ? uoptab : soptab;
2074 struct optab wide_soptab;
2076 /* Do it without widening, if possible. */
2077 temp = expand_binop (mode, direct_optab, op0, op1, target,
2078 unsignedp, OPTAB_DIRECT);
2079 if (temp || methods == OPTAB_DIRECT)
2082 /* Try widening to a signed int. Make a fake signed optab that
2083 hides any signed insn for direct use. */
2084 wide_soptab = *soptab;
2085 wide_soptab.handlers[(int) mode].insn_code = CODE_FOR_nothing;
2086 wide_soptab.handlers[(int) mode].libfunc = 0;
2088 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2089 unsignedp, OPTAB_WIDEN);
2091 /* For unsigned operands, try widening to an unsigned int. */
2092 if (temp == 0 && unsignedp)
2093 temp = expand_binop (mode, uoptab, op0, op1, target,
2094 unsignedp, OPTAB_WIDEN);
2095 if (temp || methods == OPTAB_WIDEN)
2098 /* Use the right width lib call if that exists. */
2099 temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_LIB);
2100 if (temp || methods == OPTAB_LIB)
2103 /* Must widen and use a lib call, use either signed or unsigned. */
2104 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2105 unsignedp, methods);
2109 return expand_binop (mode, uoptab, op0, op1, target,
2110 unsignedp, methods);
2114 /* Generate code to perform an operation specified by UNOPPTAB
2115 on operand OP0, with two results to TARG0 and TARG1.
2116 We assume that the order of the operands for the instruction
2117 is TARG0, TARG1, OP0.
2119 Either TARG0 or TARG1 may be zero, but what that means is that
2120 the result is not actually wanted. We will generate it into
2121 a dummy pseudo-reg and discard it. They may not both be zero.
2123 Returns 1 if this operation can be performed; 0 if not. */
2126 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
2129 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2130 enum mode_class class;
2131 enum machine_mode wider_mode;
2132 rtx entry_last = get_last_insn ();
2135 class = GET_MODE_CLASS (mode);
2138 targ0 = gen_reg_rtx (mode);
2140 targ1 = gen_reg_rtx (mode);
2142 /* Record where to go back to if we fail. */
2143 last = get_last_insn ();
2145 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2147 int icode = (int) unoptab->handlers[(int) mode].insn_code;
2148 enum machine_mode mode0 = insn_data[icode].operand[2].mode;
2152 if (GET_MODE (xop0) != VOIDmode
2153 && GET_MODE (xop0) != mode0)
2154 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2156 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2157 if (!insn_data[icode].operand[2].predicate (xop0, mode0))
2158 xop0 = copy_to_mode_reg (mode0, xop0);
2160 /* We could handle this, but we should always be called with a pseudo
2161 for our targets and all insns should take them as outputs. */
2162 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2163 gcc_assert (insn_data[icode].operand[1].predicate (targ1, mode));
2165 pat = GEN_FCN (icode) (targ0, targ1, xop0);
2172 delete_insns_since (last);
2175 /* It can't be done in this mode. Can we do it in a wider mode? */
2177 if (CLASS_HAS_WIDER_MODES_P (class))
2179 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2180 wider_mode != VOIDmode;
2181 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2183 if (unoptab->handlers[(int) wider_mode].insn_code
2184 != CODE_FOR_nothing)
2186 rtx t0 = gen_reg_rtx (wider_mode);
2187 rtx t1 = gen_reg_rtx (wider_mode);
2188 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2190 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
2192 convert_move (targ0, t0, unsignedp);
2193 convert_move (targ1, t1, unsignedp);
2197 delete_insns_since (last);
2202 delete_insns_since (entry_last);
2206 /* Generate code to perform an operation specified by BINOPTAB
2207 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2208 We assume that the order of the operands for the instruction
2209 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2210 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2212 Either TARG0 or TARG1 may be zero, but what that means is that
2213 the result is not actually wanted. We will generate it into
2214 a dummy pseudo-reg and discard it. They may not both be zero.
2216 Returns 1 if this operation can be performed; 0 if not. */
2219 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
2222 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2223 enum mode_class class;
2224 enum machine_mode wider_mode;
2225 rtx entry_last = get_last_insn ();
2228 class = GET_MODE_CLASS (mode);
2230 /* If we are inside an appropriately-short loop and we are optimizing,
2231 force expensive constants into a register. */
2232 if (CONSTANT_P (op0) && optimize
2233 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
2234 op0 = force_reg (mode, op0);
2236 if (CONSTANT_P (op1) && optimize
2237 && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
2238 op1 = force_reg (mode, op1);
2241 targ0 = gen_reg_rtx (mode);
2243 targ1 = gen_reg_rtx (mode);
2245 /* Record where to go back to if we fail. */
2246 last = get_last_insn ();
2248 if (binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2250 int icode = (int) binoptab->handlers[(int) mode].insn_code;
2251 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2252 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
2254 rtx xop0 = op0, xop1 = op1;
2256 /* In case the insn wants input operands in modes different from
2257 those of the actual operands, convert the operands. It would
2258 seem that we don't need to convert CONST_INTs, but we do, so
2259 that they're properly zero-extended, sign-extended or truncated
2262 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
2263 xop0 = convert_modes (mode0,
2264 GET_MODE (op0) != VOIDmode
2269 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
2270 xop1 = convert_modes (mode1,
2271 GET_MODE (op1) != VOIDmode
2276 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2277 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2278 xop0 = copy_to_mode_reg (mode0, xop0);
2280 if (!insn_data[icode].operand[2].predicate (xop1, mode1))
2281 xop1 = copy_to_mode_reg (mode1, xop1);
2283 /* We could handle this, but we should always be called with a pseudo
2284 for our targets and all insns should take them as outputs. */
2285 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2286 gcc_assert (insn_data[icode].operand[3].predicate (targ1, mode));
2288 pat = GEN_FCN (icode) (targ0, xop0, xop1, targ1);
2295 delete_insns_since (last);
2298 /* It can't be done in this mode. Can we do it in a wider mode? */
2300 if (CLASS_HAS_WIDER_MODES_P (class))
2302 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2303 wider_mode != VOIDmode;
2304 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2306 if (binoptab->handlers[(int) wider_mode].insn_code
2307 != CODE_FOR_nothing)
2309 rtx t0 = gen_reg_rtx (wider_mode);
2310 rtx t1 = gen_reg_rtx (wider_mode);
2311 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2312 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2314 if (expand_twoval_binop (binoptab, cop0, cop1,
2317 convert_move (targ0, t0, unsignedp);
2318 convert_move (targ1, t1, unsignedp);
2322 delete_insns_since (last);
2327 delete_insns_since (entry_last);
2331 /* Expand the two-valued library call indicated by BINOPTAB, but
2332 preserve only one of the values. If TARG0 is non-NULL, the first
2333 value is placed into TARG0; otherwise the second value is placed
2334 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2335 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2336 This routine assumes that the value returned by the library call is
2337 as if the return value was of an integral mode twice as wide as the
2338 mode of OP0. Returns 1 if the call was successful. */
2341 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2342 rtx targ0, rtx targ1, enum rtx_code code)
2344 enum machine_mode mode;
2345 enum machine_mode libval_mode;
2349 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2350 gcc_assert (!targ0 != !targ1);
2352 mode = GET_MODE (op0);
2353 if (!binoptab->handlers[(int) mode].libfunc)
2356 /* The value returned by the library function will have twice as
2357 many bits as the nominal MODE. */
2358 libval_mode = smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode),
2361 libval = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
2362 NULL_RTX, LCT_CONST,
2366 /* Get the part of VAL containing the value that we want. */
2367 libval = simplify_gen_subreg (mode, libval, libval_mode,
2368 targ0 ? 0 : GET_MODE_SIZE (mode));
2369 insns = get_insns ();
2371 /* Move the into the desired location. */
2372 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2373 gen_rtx_fmt_ee (code, mode, op0, op1));
2379 /* Wrapper around expand_unop which takes an rtx code to specify
2380 the operation to perform, not an optab pointer. All other
2381 arguments are the same. */
2383 expand_simple_unop (enum machine_mode mode, enum rtx_code code, rtx op0,
2384 rtx target, int unsignedp)
2386 optab unop = code_to_optab[(int) code];
2389 return expand_unop (mode, unop, op0, target, unsignedp);
2395 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2397 widen_clz (enum machine_mode mode, rtx op0, rtx target)
2399 enum mode_class class = GET_MODE_CLASS (mode);
2400 if (CLASS_HAS_WIDER_MODES_P (class))
2402 enum machine_mode wider_mode;
2403 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2404 wider_mode != VOIDmode;
2405 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2407 if (clz_optab->handlers[(int) wider_mode].insn_code
2408 != CODE_FOR_nothing)
2410 rtx xop0, temp, last;
2412 last = get_last_insn ();
2415 target = gen_reg_rtx (mode);
2416 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2417 temp = expand_unop (wider_mode, clz_optab, xop0, NULL_RTX, true);
2419 temp = expand_binop (wider_mode, sub_optab, temp,
2420 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2421 - GET_MODE_BITSIZE (mode)),
2422 target, true, OPTAB_DIRECT);
2424 delete_insns_since (last);
2436 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2438 widen_bswap (enum machine_mode mode, rtx op0, rtx target)
2440 enum mode_class class = GET_MODE_CLASS (mode);
2441 enum machine_mode wider_mode;
2444 if (!CLASS_HAS_WIDER_MODES_P (class))
2447 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2448 wider_mode != VOIDmode;
2449 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2450 if (bswap_optab->handlers[wider_mode].insn_code != CODE_FOR_nothing)
2455 last = get_last_insn ();
2457 x = widen_operand (op0, wider_mode, mode, true, true);
2458 x = expand_unop (wider_mode, bswap_optab, x, NULL_RTX, true);
2461 x = expand_shift (RSHIFT_EXPR, wider_mode, x,
2462 size_int (GET_MODE_BITSIZE (wider_mode)
2463 - GET_MODE_BITSIZE (mode)),
2469 target = gen_reg_rtx (mode);
2470 emit_move_insn (target, gen_lowpart (mode, x));
2473 delete_insns_since (last);
2478 /* Try calculating bswap as two bswaps of two word-sized operands. */
2481 expand_doubleword_bswap (enum machine_mode mode, rtx op, rtx target)
2485 t1 = expand_unop (word_mode, bswap_optab,
2486 operand_subword_force (op, 0, mode), NULL_RTX, true);
2487 t0 = expand_unop (word_mode, bswap_optab,
2488 operand_subword_force (op, 1, mode), NULL_RTX, true);
2491 target = gen_reg_rtx (mode);
2493 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
2494 emit_move_insn (operand_subword (target, 0, 1, mode), t0);
2495 emit_move_insn (operand_subword (target, 1, 1, mode), t1);
2500 /* Try calculating (parity x) as (and (popcount x) 1), where
2501 popcount can also be done in a wider mode. */
2503 expand_parity (enum machine_mode mode, rtx op0, rtx target)
2505 enum mode_class class = GET_MODE_CLASS (mode);
2506 if (CLASS_HAS_WIDER_MODES_P (class))
2508 enum machine_mode wider_mode;
2509 for (wider_mode = mode; wider_mode != VOIDmode;
2510 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2512 if (popcount_optab->handlers[(int) wider_mode].insn_code
2513 != CODE_FOR_nothing)
2515 rtx xop0, temp, last;
2517 last = get_last_insn ();
2520 target = gen_reg_rtx (mode);
2521 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2522 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2525 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2526 target, true, OPTAB_DIRECT);
2528 delete_insns_since (last);
2537 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2538 conditions, VAL may already be a SUBREG against which we cannot generate
2539 a further SUBREG. In this case, we expect forcing the value into a
2540 register will work around the situation. */
2543 lowpart_subreg_maybe_copy (enum machine_mode omode, rtx val,
2544 enum machine_mode imode)
2547 ret = lowpart_subreg (omode, val, imode);
2550 val = force_reg (imode, val);
2551 ret = lowpart_subreg (omode, val, imode);
2552 gcc_assert (ret != NULL);
2557 /* Expand a floating point absolute value or negation operation via a
2558 logical operation on the sign bit. */
2561 expand_absneg_bit (enum rtx_code code, enum machine_mode mode,
2562 rtx op0, rtx target)
2564 const struct real_format *fmt;
2565 int bitpos, word, nwords, i;
2566 enum machine_mode imode;
2567 HOST_WIDE_INT hi, lo;
2570 /* The format has to have a simple sign bit. */
2571 fmt = REAL_MODE_FORMAT (mode);
2575 bitpos = fmt->signbit_rw;
2579 /* Don't create negative zeros if the format doesn't support them. */
2580 if (code == NEG && !fmt->has_signed_zero)
2583 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2585 imode = int_mode_for_mode (mode);
2586 if (imode == BLKmode)
2595 if (FLOAT_WORDS_BIG_ENDIAN)
2596 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2598 word = bitpos / BITS_PER_WORD;
2599 bitpos = bitpos % BITS_PER_WORD;
2600 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2603 if (bitpos < HOST_BITS_PER_WIDE_INT)
2606 lo = (HOST_WIDE_INT) 1 << bitpos;
2610 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2616 if (target == 0 || target == op0)
2617 target = gen_reg_rtx (mode);
2623 for (i = 0; i < nwords; ++i)
2625 rtx targ_piece = operand_subword (target, i, 1, mode);
2626 rtx op0_piece = operand_subword_force (op0, i, mode);
2630 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2632 immed_double_const (lo, hi, imode),
2633 targ_piece, 1, OPTAB_LIB_WIDEN);
2634 if (temp != targ_piece)
2635 emit_move_insn (targ_piece, temp);
2638 emit_move_insn (targ_piece, op0_piece);
2641 insns = get_insns ();
2644 temp = gen_rtx_fmt_e (code, mode, copy_rtx (op0));
2645 emit_no_conflict_block (insns, target, op0, NULL_RTX, temp);
2649 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2650 gen_lowpart (imode, op0),
2651 immed_double_const (lo, hi, imode),
2652 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2653 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2655 set_unique_reg_note (get_last_insn (), REG_EQUAL,
2656 gen_rtx_fmt_e (code, mode, copy_rtx (op0)));
2662 /* Generate code to perform an operation specified by UNOPTAB
2663 on operand OP0, with result having machine-mode MODE.
2665 UNSIGNEDP is for the case where we have to widen the operands
2666 to perform the operation. It says to use zero-extension.
2668 If TARGET is nonzero, the value
2669 is generated there, if it is convenient to do so.
2670 In all cases an rtx is returned for the locus of the value;
2671 this may or may not be TARGET. */
2674 expand_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
2677 enum mode_class class;
2678 enum machine_mode wider_mode;
2680 rtx last = get_last_insn ();
2683 class = GET_MODE_CLASS (mode);
2685 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2687 int icode = (int) unoptab->handlers[(int) mode].insn_code;
2688 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2694 temp = gen_reg_rtx (mode);
2696 if (GET_MODE (xop0) != VOIDmode
2697 && GET_MODE (xop0) != mode0)
2698 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2700 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2702 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2703 xop0 = copy_to_mode_reg (mode0, xop0);
2705 if (!insn_data[icode].operand[0].predicate (temp, mode))
2706 temp = gen_reg_rtx (mode);
2708 pat = GEN_FCN (icode) (temp, xop0);
2711 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
2712 && ! add_equal_note (pat, temp, unoptab->code, xop0, NULL_RTX))
2714 delete_insns_since (last);
2715 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
2723 delete_insns_since (last);
2726 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2728 /* Widening clz needs special treatment. */
2729 if (unoptab == clz_optab)
2731 temp = widen_clz (mode, op0, target);
2738 /* Widening (or narrowing) bswap needs special treatment. */
2739 if (unoptab == bswap_optab)
2741 temp = widen_bswap (mode, op0, target);
2745 if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
2746 && unoptab->handlers[word_mode].insn_code != CODE_FOR_nothing)
2748 temp = expand_doubleword_bswap (mode, op0, target);
2756 if (CLASS_HAS_WIDER_MODES_P (class))
2757 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2758 wider_mode != VOIDmode;
2759 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2761 if (unoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing)
2765 /* For certain operations, we need not actually extend
2766 the narrow operand, as long as we will truncate the
2767 results to the same narrowness. */
2769 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2770 (unoptab == neg_optab
2771 || unoptab == one_cmpl_optab)
2772 && class == MODE_INT);
2774 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2779 if (class != MODE_INT
2780 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
2781 GET_MODE_BITSIZE (wider_mode)))
2784 target = gen_reg_rtx (mode);
2785 convert_move (target, temp, 0);
2789 return gen_lowpart (mode, temp);
2792 delete_insns_since (last);
2796 /* These can be done a word at a time. */
2797 if (unoptab == one_cmpl_optab
2798 && class == MODE_INT
2799 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
2800 && unoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
2805 if (target == 0 || target == op0)
2806 target = gen_reg_rtx (mode);
2810 /* Do the actual arithmetic. */
2811 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
2813 rtx target_piece = operand_subword (target, i, 1, mode);
2814 rtx x = expand_unop (word_mode, unoptab,
2815 operand_subword_force (op0, i, mode),
2816 target_piece, unsignedp);
2818 if (target_piece != x)
2819 emit_move_insn (target_piece, x);
2822 insns = get_insns ();
2825 emit_no_conflict_block (insns, target, op0, NULL_RTX,
2826 gen_rtx_fmt_e (unoptab->code, mode,
2831 if (unoptab->code == NEG)
2833 /* Try negating floating point values by flipping the sign bit. */
2834 if (SCALAR_FLOAT_MODE_P (mode))
2836 temp = expand_absneg_bit (NEG, mode, op0, target);
2841 /* If there is no negation pattern, and we have no negative zero,
2842 try subtracting from zero. */
2843 if (!HONOR_SIGNED_ZEROS (mode))
2845 temp = expand_binop (mode, (unoptab == negv_optab
2846 ? subv_optab : sub_optab),
2847 CONST0_RTX (mode), op0, target,
2848 unsignedp, OPTAB_DIRECT);
2854 /* Try calculating parity (x) as popcount (x) % 2. */
2855 if (unoptab == parity_optab)
2857 temp = expand_parity (mode, op0, target);
2863 /* Now try a library call in this mode. */
2864 if (unoptab->handlers[(int) mode].libfunc)
2868 enum machine_mode outmode = mode;
2870 /* All of these functions return small values. Thus we choose to
2871 have them return something that isn't a double-word. */
2872 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
2873 || unoptab == popcount_optab || unoptab == parity_optab)
2875 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node)));
2879 /* Pass 1 for NO_QUEUE so we don't lose any increments
2880 if the libcall is cse'd or moved. */
2881 value = emit_library_call_value (unoptab->handlers[(int) mode].libfunc,
2882 NULL_RTX, LCT_CONST, outmode,
2884 insns = get_insns ();
2887 target = gen_reg_rtx (outmode);
2888 emit_libcall_block (insns, target, value,
2889 gen_rtx_fmt_e (unoptab->code, outmode, op0));
2894 /* It can't be done in this mode. Can we do it in a wider mode? */
2896 if (CLASS_HAS_WIDER_MODES_P (class))
2898 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2899 wider_mode != VOIDmode;
2900 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2902 if ((unoptab->handlers[(int) wider_mode].insn_code
2903 != CODE_FOR_nothing)
2904 || unoptab->handlers[(int) wider_mode].libfunc)
2908 /* For certain operations, we need not actually extend
2909 the narrow operand, as long as we will truncate the
2910 results to the same narrowness. */
2912 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2913 (unoptab == neg_optab
2914 || unoptab == one_cmpl_optab)
2915 && class == MODE_INT);
2917 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2920 /* If we are generating clz using wider mode, adjust the
2922 if (unoptab == clz_optab && temp != 0)
2923 temp = expand_binop (wider_mode, sub_optab, temp,
2924 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2925 - GET_MODE_BITSIZE (mode)),
2926 target, true, OPTAB_DIRECT);
2930 if (class != MODE_INT)
2933 target = gen_reg_rtx (mode);
2934 convert_move (target, temp, 0);
2938 return gen_lowpart (mode, temp);
2941 delete_insns_since (last);
2946 /* One final attempt at implementing negation via subtraction,
2947 this time allowing widening of the operand. */
2948 if (unoptab->code == NEG && !HONOR_SIGNED_ZEROS (mode))
2951 temp = expand_binop (mode,
2952 unoptab == negv_optab ? subv_optab : sub_optab,
2953 CONST0_RTX (mode), op0,
2954 target, unsignedp, OPTAB_LIB_WIDEN);
2962 /* Emit code to compute the absolute value of OP0, with result to
2963 TARGET if convenient. (TARGET may be 0.) The return value says
2964 where the result actually is to be found.
2966 MODE is the mode of the operand; the mode of the result is
2967 different but can be deduced from MODE.
2972 expand_abs_nojump (enum machine_mode mode, rtx op0, rtx target,
2973 int result_unsignedp)
2978 result_unsignedp = 1;
2980 /* First try to do it with a special abs instruction. */
2981 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
2986 /* For floating point modes, try clearing the sign bit. */
2987 if (SCALAR_FLOAT_MODE_P (mode))
2989 temp = expand_absneg_bit (ABS, mode, op0, target);
2994 /* If we have a MAX insn, we can do this as MAX (x, -x). */
2995 if (smax_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
2996 && !HONOR_SIGNED_ZEROS (mode))
2998 rtx last = get_last_insn ();
3000 temp = expand_unop (mode, neg_optab, op0, NULL_RTX, 0);
3002 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3008 delete_insns_since (last);
3011 /* If this machine has expensive jumps, we can do integer absolute
3012 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3013 where W is the width of MODE. */
3015 if (GET_MODE_CLASS (mode) == MODE_INT && BRANCH_COST >= 2)
3017 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
3018 size_int (GET_MODE_BITSIZE (mode) - 1),
3021 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
3024 temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab,
3025 temp, extended, target, 0, OPTAB_LIB_WIDEN);
3035 expand_abs (enum machine_mode mode, rtx op0, rtx target,
3036 int result_unsignedp, int safe)
3041 result_unsignedp = 1;
3043 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
3047 /* If that does not win, use conditional jump and negate. */
3049 /* It is safe to use the target if it is the same
3050 as the source if this is also a pseudo register */
3051 if (op0 == target && REG_P (op0)
3052 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
3055 op1 = gen_label_rtx ();
3056 if (target == 0 || ! safe
3057 || GET_MODE (target) != mode
3058 || (MEM_P (target) && MEM_VOLATILE_P (target))
3060 && REGNO (target) < FIRST_PSEUDO_REGISTER))
3061 target = gen_reg_rtx (mode);
3063 emit_move_insn (target, op0);
3066 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
3067 NULL_RTX, NULL_RTX, op1);
3069 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3072 emit_move_insn (target, op0);
3078 /* A subroutine of expand_copysign, perform the copysign operation using the
3079 abs and neg primitives advertised to exist on the target. The assumption
3080 is that we have a split register file, and leaving op0 in fp registers,
3081 and not playing with subregs so much, will help the register allocator. */
3084 expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3085 int bitpos, bool op0_is_abs)
3087 enum machine_mode imode;
3088 HOST_WIDE_INT hi, lo;
3097 op0 = expand_unop (mode, abs_optab, op0, target, 0);
3104 if (target == NULL_RTX)
3105 target = copy_to_reg (op0);
3107 emit_move_insn (target, op0);
3110 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3112 imode = int_mode_for_mode (mode);
3113 if (imode == BLKmode)
3115 op1 = gen_lowpart (imode, op1);
3120 if (FLOAT_WORDS_BIG_ENDIAN)
3121 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3123 word = bitpos / BITS_PER_WORD;
3124 bitpos = bitpos % BITS_PER_WORD;
3125 op1 = operand_subword_force (op1, word, mode);
3128 if (bitpos < HOST_BITS_PER_WIDE_INT)
3131 lo = (HOST_WIDE_INT) 1 << bitpos;
3135 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
3139 op1 = expand_binop (imode, and_optab, op1,
3140 immed_double_const (lo, hi, imode),
3141 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3143 label = gen_label_rtx ();
3144 emit_cmp_and_jump_insns (op1, const0_rtx, EQ, NULL_RTX, imode, 1, label);
3146 if (GET_CODE (op0) == CONST_DOUBLE)
3147 op0 = simplify_unary_operation (NEG, mode, op0, mode);
3149 op0 = expand_unop (mode, neg_optab, op0, target, 0);
3151 emit_move_insn (target, op0);
3159 /* A subroutine of expand_copysign, perform the entire copysign operation
3160 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3161 is true if op0 is known to have its sign bit clear. */
3164 expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3165 int bitpos, bool op0_is_abs)
3167 enum machine_mode imode;
3168 HOST_WIDE_INT hi, lo;
3169 int word, nwords, i;
3172 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3174 imode = int_mode_for_mode (mode);
3175 if (imode == BLKmode)
3184 if (FLOAT_WORDS_BIG_ENDIAN)
3185 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3187 word = bitpos / BITS_PER_WORD;
3188 bitpos = bitpos % BITS_PER_WORD;
3189 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
3192 if (bitpos < HOST_BITS_PER_WIDE_INT)
3195 lo = (HOST_WIDE_INT) 1 << bitpos;
3199 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
3203 if (target == 0 || target == op0 || target == op1)
3204 target = gen_reg_rtx (mode);
3210 for (i = 0; i < nwords; ++i)
3212 rtx targ_piece = operand_subword (target, i, 1, mode);
3213 rtx op0_piece = operand_subword_force (op0, i, mode);
3218 op0_piece = expand_binop (imode, and_optab, op0_piece,
3219 immed_double_const (~lo, ~hi, imode),
3220 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3222 op1 = expand_binop (imode, and_optab,
3223 operand_subword_force (op1, i, mode),
3224 immed_double_const (lo, hi, imode),
3225 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3227 temp = expand_binop (imode, ior_optab, op0_piece, op1,
3228 targ_piece, 1, OPTAB_LIB_WIDEN);
3229 if (temp != targ_piece)
3230 emit_move_insn (targ_piece, temp);
3233 emit_move_insn (targ_piece, op0_piece);
3236 insns = get_insns ();
3239 emit_no_conflict_block (insns, target, op0, op1, NULL_RTX);
3243 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
3244 immed_double_const (lo, hi, imode),
3245 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3247 op0 = gen_lowpart (imode, op0);
3249 op0 = expand_binop (imode, and_optab, op0,
3250 immed_double_const (~lo, ~hi, imode),
3251 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3253 temp = expand_binop (imode, ior_optab, op0, op1,
3254 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3255 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3261 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3262 scalar floating point mode. Return NULL if we do not know how to
3263 expand the operation inline. */
3266 expand_copysign (rtx op0, rtx op1, rtx target)
3268 enum machine_mode mode = GET_MODE (op0);
3269 const struct real_format *fmt;
3273 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3274 gcc_assert (GET_MODE (op1) == mode);
3276 /* First try to do it with a special instruction. */
3277 temp = expand_binop (mode, copysign_optab, op0, op1,
3278 target, 0, OPTAB_DIRECT);
3282 fmt = REAL_MODE_FORMAT (mode);
3283 if (fmt == NULL || !fmt->has_signed_zero)
3287 if (GET_CODE (op0) == CONST_DOUBLE)
3289 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
3290 op0 = simplify_unary_operation (ABS, mode, op0, mode);
3294 if (fmt->signbit_ro >= 0
3295 && (GET_CODE (op0) == CONST_DOUBLE
3296 || (neg_optab->handlers[mode].insn_code != CODE_FOR_nothing
3297 && abs_optab->handlers[mode].insn_code != CODE_FOR_nothing)))
3299 temp = expand_copysign_absneg (mode, op0, op1, target,
3300 fmt->signbit_ro, op0_is_abs);
3305 if (fmt->signbit_rw < 0)
3307 return expand_copysign_bit (mode, op0, op1, target,
3308 fmt->signbit_rw, op0_is_abs);
3311 /* Generate an instruction whose insn-code is INSN_CODE,
3312 with two operands: an output TARGET and an input OP0.
3313 TARGET *must* be nonzero, and the output is always stored there.
3314 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3315 the value that is stored into TARGET. */
3318 emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code)
3321 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
3326 /* Now, if insn does not accept our operands, put them into pseudos. */
3328 if (!insn_data[icode].operand[1].predicate (op0, mode0))
3329 op0 = copy_to_mode_reg (mode0, op0);
3331 if (!insn_data[icode].operand[0].predicate (temp, GET_MODE (temp)))
3332 temp = gen_reg_rtx (GET_MODE (temp));
3334 pat = GEN_FCN (icode) (temp, op0);
3336 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && code != UNKNOWN)
3337 add_equal_note (pat, temp, code, op0, NULL_RTX);
3342 emit_move_insn (target, temp);
3345 struct no_conflict_data
3347 rtx target, first, insn;
3351 /* Called via note_stores by emit_no_conflict_block and emit_libcall_block.
3352 Set P->must_stay if the currently examined clobber / store has to stay
3353 in the list of insns that constitute the actual no_conflict block /
3356 no_conflict_move_test (rtx dest, rtx set, void *p0)
3358 struct no_conflict_data *p= p0;
3360 /* If this inns directly contributes to setting the target, it must stay. */
3361 if (reg_overlap_mentioned_p (p->target, dest))
3362 p->must_stay = true;
3363 /* If we haven't committed to keeping any other insns in the list yet,
3364 there is nothing more to check. */
3365 else if (p->insn == p->first)
3367 /* If this insn sets / clobbers a register that feeds one of the insns
3368 already in the list, this insn has to stay too. */
3369 else if (reg_overlap_mentioned_p (dest, PATTERN (p->first))
3370 || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest)))
3371 || reg_used_between_p (dest, p->first, p->insn)
3372 /* Likewise if this insn depends on a register set by a previous
3373 insn in the list, or if it sets a result (presumably a hard
3374 register) that is set or clobbered by a previous insn.
3375 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3376 SET_DEST perform the former check on the address, and the latter
3377 check on the MEM. */
3378 || (GET_CODE (set) == SET
3379 && (modified_in_p (SET_SRC (set), p->first)
3380 || modified_in_p (SET_DEST (set), p->first)
3381 || modified_between_p (SET_SRC (set), p->first, p->insn)
3382 || modified_between_p (SET_DEST (set), p->first, p->insn))))
3383 p->must_stay = true;
3386 /* Encapsulate the block starting at FIRST and ending with LAST, which is
3387 logically equivalent to EQUIV, so it gets manipulated as a unit if it
3388 is possible to do so. */
3391 maybe_encapsulate_block (rtx first, rtx last, rtx equiv)
3393 if (!flag_non_call_exceptions || !may_trap_p (equiv))
3395 /* We can't attach the REG_LIBCALL and REG_RETVAL notes when the
3396 encapsulated region would not be in one basic block, i.e. when
3397 there is a control_flow_insn_p insn between FIRST and LAST. */
3398 bool attach_libcall_retval_notes = true;
3399 rtx insn, next = NEXT_INSN (last);
3401 for (insn = first; insn != next; insn = NEXT_INSN (insn))
3402 if (control_flow_insn_p (insn))
3404 attach_libcall_retval_notes = false;
3408 if (attach_libcall_retval_notes)
3410 REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
3412 REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first,
3414 next = NEXT_INSN (last);
3415 for (insn = first; insn != next; insn = NEXT_INSN (insn))
3416 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_LIBCALL_ID,
3417 GEN_INT (libcall_id),
3424 /* Emit code to perform a series of operations on a multi-word quantity, one
3427 Such a block is preceded by a CLOBBER of the output, consists of multiple
3428 insns, each setting one word of the output, and followed by a SET copying
3429 the output to itself.
3431 Each of the insns setting words of the output receives a REG_NO_CONFLICT
3432 note indicating that it doesn't conflict with the (also multi-word)
3433 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
3436 INSNS is a block of code generated to perform the operation, not including
3437 the CLOBBER and final copy. All insns that compute intermediate values
3438 are first emitted, followed by the block as described above.
3440 TARGET, OP0, and OP1 are the output and inputs of the operations,
3441 respectively. OP1 may be zero for a unary operation.
3443 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
3446 If TARGET is not a register, INSNS is simply emitted with no special
3447 processing. Likewise if anything in INSNS is not an INSN or if
3448 there is a libcall block inside INSNS.
3450 The final insn emitted is returned. */
3453 emit_no_conflict_block (rtx insns, rtx target, rtx op0, rtx op1, rtx equiv)
3455 rtx prev, next, first, last, insn;
3457 if (!REG_P (target) || reload_in_progress)
3458 return emit_insn (insns);
3460 for (insn = insns; insn; insn = NEXT_INSN (insn))
3461 if (!NONJUMP_INSN_P (insn)
3462 || find_reg_note (insn, REG_LIBCALL, NULL_RTX))
3463 return emit_insn (insns);
3465 /* First emit all insns that do not store into words of the output and remove
3466 these from the list. */
3467 for (insn = insns; insn; insn = next)
3470 struct no_conflict_data data;
3472 next = NEXT_INSN (insn);
3474 /* Some ports (cris) create a libcall regions at their own. We must
3475 avoid any potential nesting of LIBCALLs. */
3476 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3477 remove_note (insn, note);
3478 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3479 remove_note (insn, note);
3480 if ((note = find_reg_note (insn, REG_LIBCALL_ID, NULL)) != NULL)
3481 remove_note (insn, note);
3483 data.target = target;
3487 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3488 if (! data.must_stay)
3490 if (PREV_INSN (insn))
3491 NEXT_INSN (PREV_INSN (insn)) = next;
3496 PREV_INSN (next) = PREV_INSN (insn);
3502 prev = get_last_insn ();
3504 /* Now write the CLOBBER of the output, followed by the setting of each
3505 of the words, followed by the final copy. */
3506 if (target != op0 && target != op1)
3507 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
3509 for (insn = insns; insn; insn = next)
3511 next = NEXT_INSN (insn);
3514 if (op1 && REG_P (op1))
3515 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op1,
3518 if (op0 && REG_P (op0))
3519 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op0,
3523 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3524 != CODE_FOR_nothing)
3526 last = emit_move_insn (target, target);
3528 set_unique_reg_note (last, REG_EQUAL, equiv);
3532 last = get_last_insn ();
3534 /* Remove any existing REG_EQUAL note from "last", or else it will
3535 be mistaken for a note referring to the full contents of the
3536 alleged libcall value when found together with the REG_RETVAL
3537 note added below. An existing note can come from an insn
3538 expansion at "last". */
3539 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3543 first = get_insns ();
3545 first = NEXT_INSN (prev);
3547 maybe_encapsulate_block (first, last, equiv);
3552 /* Emit code to make a call to a constant function or a library call.
3554 INSNS is a list containing all insns emitted in the call.
3555 These insns leave the result in RESULT. Our block is to copy RESULT
3556 to TARGET, which is logically equivalent to EQUIV.
3558 We first emit any insns that set a pseudo on the assumption that these are
3559 loading constants into registers; doing so allows them to be safely cse'ed
3560 between blocks. Then we emit all the other insns in the block, followed by
3561 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3562 note with an operand of EQUIV.
3564 Moving assignments to pseudos outside of the block is done to improve
3565 the generated code, but is not required to generate correct code,
3566 hence being unable to move an assignment is not grounds for not making
3567 a libcall block. There are two reasons why it is safe to leave these
3568 insns inside the block: First, we know that these pseudos cannot be
3569 used in generated RTL outside the block since they are created for
3570 temporary purposes within the block. Second, CSE will not record the
3571 values of anything set inside a libcall block, so we know they must
3572 be dead at the end of the block.
3574 Except for the first group of insns (the ones setting pseudos), the
3575 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3577 emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv)
3579 rtx final_dest = target;
3580 rtx prev, next, first, last, insn;
3582 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3583 into a MEM later. Protect the libcall block from this change. */
3584 if (! REG_P (target) || REG_USERVAR_P (target))
3585 target = gen_reg_rtx (GET_MODE (target));
3587 /* If we're using non-call exceptions, a libcall corresponding to an
3588 operation that may trap may also trap. */
3589 if (flag_non_call_exceptions && may_trap_p (equiv))
3591 for (insn = insns; insn; insn = NEXT_INSN (insn))
3594 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3596 if (note != 0 && INTVAL (XEXP (note, 0)) <= 0)
3597 remove_note (insn, note);
3601 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3602 reg note to indicate that this call cannot throw or execute a nonlocal
3603 goto (unless there is already a REG_EH_REGION note, in which case
3605 for (insn = insns; insn; insn = NEXT_INSN (insn))
3608 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3611 XEXP (note, 0) = constm1_rtx;
3613 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx,
3617 /* First emit all insns that set pseudos. Remove them from the list as
3618 we go. Avoid insns that set pseudos which were referenced in previous
3619 insns. These can be generated by move_by_pieces, for example,
3620 to update an address. Similarly, avoid insns that reference things
3621 set in previous insns. */
3623 for (insn = insns; insn; insn = next)
3625 rtx set = single_set (insn);
3628 /* Some ports (cris) create a libcall regions at their own. We must
3629 avoid any potential nesting of LIBCALLs. */
3630 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3631 remove_note (insn, note);
3632 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3633 remove_note (insn, note);
3634 if ((note = find_reg_note (insn, REG_LIBCALL_ID, NULL)) != NULL)
3635 remove_note (insn, note);
3637 next = NEXT_INSN (insn);
3639 if (set != 0 && REG_P (SET_DEST (set))
3640 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3642 struct no_conflict_data data;
3644 data.target = const0_rtx;
3648 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3649 if (! data.must_stay)
3651 if (PREV_INSN (insn))
3652 NEXT_INSN (PREV_INSN (insn)) = next;
3657 PREV_INSN (next) = PREV_INSN (insn);
3663 /* Some ports use a loop to copy large arguments onto the stack.
3664 Don't move anything outside such a loop. */
3669 prev = get_last_insn ();
3671 /* Write the remaining insns followed by the final copy. */
3673 for (insn = insns; insn; insn = next)
3675 next = NEXT_INSN (insn);
3680 last = emit_move_insn (target, result);
3681 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3682 != CODE_FOR_nothing)
3683 set_unique_reg_note (last, REG_EQUAL, copy_rtx (equiv));
3686 /* Remove any existing REG_EQUAL note from "last", or else it will
3687 be mistaken for a note referring to the full contents of the
3688 libcall value when found together with the REG_RETVAL note added
3689 below. An existing note can come from an insn expansion at
3691 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3694 if (final_dest != target)
3695 emit_move_insn (final_dest, target);
3698 first = get_insns ();
3700 first = NEXT_INSN (prev);
3702 maybe_encapsulate_block (first, last, equiv);
3705 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3706 PURPOSE describes how this comparison will be used. CODE is the rtx
3707 comparison code we will be using.
3709 ??? Actually, CODE is slightly weaker than that. A target is still
3710 required to implement all of the normal bcc operations, but not
3711 required to implement all (or any) of the unordered bcc operations. */
3714 can_compare_p (enum rtx_code code, enum machine_mode mode,
3715 enum can_compare_purpose purpose)
3719 if (cmp_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3721 if (purpose == ccp_jump)
3722 return bcc_gen_fctn[(int) code] != NULL;
3723 else if (purpose == ccp_store_flag)
3724 return setcc_gen_code[(int) code] != CODE_FOR_nothing;
3726 /* There's only one cmov entry point, and it's allowed to fail. */
3729 if (purpose == ccp_jump
3730 && cbranch_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3732 if (purpose == ccp_cmov
3733 && cmov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3735 if (purpose == ccp_store_flag
3736 && cstore_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3738 mode = GET_MODE_WIDER_MODE (mode);
3740 while (mode != VOIDmode);
3745 /* This function is called when we are going to emit a compare instruction that
3746 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3748 *PMODE is the mode of the inputs (in case they are const_int).
3749 *PUNSIGNEDP nonzero says that the operands are unsigned;
3750 this matters if they need to be widened.
3752 If they have mode BLKmode, then SIZE specifies the size of both operands.
3754 This function performs all the setup necessary so that the caller only has
3755 to emit a single comparison insn. This setup can involve doing a BLKmode
3756 comparison or emitting a library call to perform the comparison if no insn
3757 is available to handle it.
3758 The values which are passed in through pointers can be modified; the caller
3759 should perform the comparison on the modified values. Constant
3760 comparisons must have already been folded. */
3763 prepare_cmp_insn (rtx *px, rtx *py, enum rtx_code *pcomparison, rtx size,
3764 enum machine_mode *pmode, int *punsignedp,
3765 enum can_compare_purpose purpose)
3767 enum machine_mode mode = *pmode;
3768 rtx x = *px, y = *py;
3769 int unsignedp = *punsignedp;
3771 /* If we are inside an appropriately-short loop and we are optimizing,
3772 force expensive constants into a register. */
3773 if (CONSTANT_P (x) && optimize
3774 && rtx_cost (x, COMPARE) > COSTS_N_INSNS (1))
3775 x = force_reg (mode, x);
3777 if (CONSTANT_P (y) && optimize
3778 && rtx_cost (y, COMPARE) > COSTS_N_INSNS (1))
3779 y = force_reg (mode, y);
3782 /* Make sure if we have a canonical comparison. The RTL
3783 documentation states that canonical comparisons are required only
3784 for targets which have cc0. */
3785 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
3788 /* Don't let both operands fail to indicate the mode. */
3789 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
3790 x = force_reg (mode, x);
3792 /* Handle all BLKmode compares. */
3794 if (mode == BLKmode)
3796 enum machine_mode cmp_mode, result_mode;
3797 enum insn_code cmp_code;
3802 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
3806 /* Try to use a memory block compare insn - either cmpstr
3807 or cmpmem will do. */
3808 for (cmp_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
3809 cmp_mode != VOIDmode;
3810 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode))
3812 cmp_code = cmpmem_optab[cmp_mode];
3813 if (cmp_code == CODE_FOR_nothing)
3814 cmp_code = cmpstr_optab[cmp_mode];
3815 if (cmp_code == CODE_FOR_nothing)
3816 cmp_code = cmpstrn_optab[cmp_mode];
3817 if (cmp_code == CODE_FOR_nothing)
3820 /* Must make sure the size fits the insn's mode. */
3821 if ((GET_CODE (size) == CONST_INT
3822 && INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode)))
3823 || (GET_MODE_BITSIZE (GET_MODE (size))
3824 > GET_MODE_BITSIZE (cmp_mode)))
3827 result_mode = insn_data[cmp_code].operand[0].mode;
3828 result = gen_reg_rtx (result_mode);
3829 size = convert_to_mode (cmp_mode, size, 1);
3830 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
3834 *pmode = result_mode;
3838 /* Otherwise call a library function, memcmp. */
3839 libfunc = memcmp_libfunc;
3840 length_type = sizetype;
3841 result_mode = TYPE_MODE (integer_type_node);
3842 cmp_mode = TYPE_MODE (length_type);
3843 size = convert_to_mode (TYPE_MODE (length_type), size,
3844 TYPE_UNSIGNED (length_type));
3846 result = emit_library_call_value (libfunc, 0, LCT_PURE_MAKE_BLOCK,
3853 *pmode = result_mode;
3857 /* Don't allow operands to the compare to trap, as that can put the
3858 compare and branch in different basic blocks. */
3859 if (flag_non_call_exceptions)
3862 x = force_reg (mode, x);
3864 y = force_reg (mode, y);
3869 if (can_compare_p (*pcomparison, mode, purpose))
3872 /* Handle a lib call just for the mode we are using. */
3874 if (cmp_optab->handlers[(int) mode].libfunc && !SCALAR_FLOAT_MODE_P (mode))
3876 rtx libfunc = cmp_optab->handlers[(int) mode].libfunc;
3879 /* If we want unsigned, and this mode has a distinct unsigned
3880 comparison routine, use that. */
3881 if (unsignedp && ucmp_optab->handlers[(int) mode].libfunc)
3882 libfunc = ucmp_optab->handlers[(int) mode].libfunc;
3884 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST_MAKE_BLOCK,
3885 word_mode, 2, x, mode, y, mode);
3887 /* There are two kinds of comparison routines. Biased routines
3888 return 0/1/2, and unbiased routines return -1/0/1. Other parts
3889 of gcc expect that the comparison operation is equivalent
3890 to the modified comparison. For signed comparisons compare the
3891 result against 1 in the biased case, and zero in the unbiased
3892 case. For unsigned comparisons always compare against 1 after
3893 biasing the unbiased result by adding 1. This gives us a way to
3899 if (!TARGET_LIB_INT_CMP_BIASED)
3902 *px = plus_constant (result, 1);
3909 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3910 prepare_float_lib_cmp (px, py, pcomparison, pmode, punsignedp);
3913 /* Before emitting an insn with code ICODE, make sure that X, which is going
3914 to be used for operand OPNUM of the insn, is converted from mode MODE to
3915 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3916 that it is accepted by the operand predicate. Return the new value. */
3919 prepare_operand (int icode, rtx x, int opnum, enum machine_mode mode,
3920 enum machine_mode wider_mode, int unsignedp)
3922 if (mode != wider_mode)
3923 x = convert_modes (wider_mode, mode, x, unsignedp);
3925 if (!insn_data[icode].operand[opnum].predicate
3926 (x, insn_data[icode].operand[opnum].mode))
3930 x = copy_to_mode_reg (insn_data[icode].operand[opnum].mode, x);
3936 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3937 we can do the comparison.
3938 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3939 be NULL_RTX which indicates that only a comparison is to be generated. */
3942 emit_cmp_and_jump_insn_1 (rtx x, rtx y, enum machine_mode mode,
3943 enum rtx_code comparison, int unsignedp, rtx label)
3945 rtx test = gen_rtx_fmt_ee (comparison, mode, x, y);
3946 enum mode_class class = GET_MODE_CLASS (mode);
3947 enum machine_mode wider_mode = mode;
3949 /* Try combined insns first. */
3952 enum insn_code icode;
3953 PUT_MODE (test, wider_mode);
3957 icode = cbranch_optab->handlers[(int) wider_mode].insn_code;
3959 if (icode != CODE_FOR_nothing
3960 && insn_data[icode].operand[0].predicate (test, wider_mode))
3962 x = prepare_operand (icode, x, 1, mode, wider_mode, unsignedp);
3963 y = prepare_operand (icode, y, 2, mode, wider_mode, unsignedp);
3964 emit_jump_insn (GEN_FCN (icode) (test, x, y, label));
3969 /* Handle some compares against zero. */
3970 icode = (int) tst_optab->handlers[(int) wider_mode].insn_code;
3971 if (y == CONST0_RTX (mode) && icode != CODE_FOR_nothing)
3973 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
3974 emit_insn (GEN_FCN (icode) (x));
3976 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
3980 /* Handle compares for which there is a directly suitable insn. */
3982 icode = (int) cmp_optab->handlers[(int) wider_mode].insn_code;
3983 if (icode != CODE_FOR_nothing)
3985 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
3986 y = prepare_operand (icode, y, 1, mode, wider_mode, unsignedp);
3987 emit_insn (GEN_FCN (icode) (x, y));
3989 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
3993 if (!CLASS_HAS_WIDER_MODES_P (class))
3996 wider_mode = GET_MODE_WIDER_MODE (wider_mode);
3998 while (wider_mode != VOIDmode);
4003 /* Generate code to compare X with Y so that the condition codes are
4004 set and to jump to LABEL if the condition is true. If X is a
4005 constant and Y is not a constant, then the comparison is swapped to
4006 ensure that the comparison RTL has the canonical form.
4008 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
4009 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
4010 the proper branch condition code.
4012 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4014 MODE is the mode of the inputs (in case they are const_int).
4016 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
4017 be passed unchanged to emit_cmp_insn, then potentially converted into an
4018 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
4021 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
4022 enum machine_mode mode, int unsignedp, rtx label)
4024 rtx op0 = x, op1 = y;
4026 /* Swap operands and condition to ensure canonical RTL. */
4027 if (swap_commutative_operands_p (x, y))
4029 /* If we're not emitting a branch, this means some caller
4034 comparison = swap_condition (comparison);
4038 /* If OP0 is still a constant, then both X and Y must be constants.
4039 Force X into a register to create canonical RTL. */
4040 if (CONSTANT_P (op0))
4041 op0 = force_reg (mode, op0);
4045 comparison = unsigned_condition (comparison);
4047 prepare_cmp_insn (&op0, &op1, &comparison, size, &mode, &unsignedp,
4049 emit_cmp_and_jump_insn_1 (op0, op1, mode, comparison, unsignedp, label);
4052 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
4055 emit_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
4056 enum machine_mode mode, int unsignedp)
4058 emit_cmp_and_jump_insns (x, y, comparison, size, mode, unsignedp, 0);
4061 /* Emit a library call comparison between floating point X and Y.
4062 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4065 prepare_float_lib_cmp (rtx *px, rtx *py, enum rtx_code *pcomparison,
4066 enum machine_mode *pmode, int *punsignedp)
4068 enum rtx_code comparison = *pcomparison;
4069 enum rtx_code swapped = swap_condition (comparison);
4070 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
4073 enum machine_mode orig_mode = GET_MODE (x);
4074 enum machine_mode mode;
4075 rtx value, target, insns, equiv;
4077 bool reversed_p = false;
4079 for (mode = orig_mode;
4081 mode = GET_MODE_WIDER_MODE (mode))
4083 if ((libfunc = code_to_optab[comparison]->handlers[mode].libfunc))
4086 if ((libfunc = code_to_optab[swapped]->handlers[mode].libfunc))
4089 tmp = x; x = y; y = tmp;
4090 comparison = swapped;
4094 if ((libfunc = code_to_optab[reversed]->handlers[mode].libfunc)
4095 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, reversed))
4097 comparison = reversed;
4103 gcc_assert (mode != VOIDmode);
4105 if (mode != orig_mode)
4107 x = convert_to_mode (mode, x, 0);
4108 y = convert_to_mode (mode, y, 0);
4111 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4112 the RTL. The allows the RTL optimizers to delete the libcall if the
4113 condition can be determined at compile-time. */
4114 if (comparison == UNORDERED)
4116 rtx temp = simplify_gen_relational (NE, word_mode, mode, x, x);
4117 equiv = simplify_gen_relational (NE, word_mode, mode, y, y);
4118 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
4119 temp, const_true_rtx, equiv);
4123 equiv = simplify_gen_relational (comparison, word_mode, mode, x, y);
4124 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4126 rtx true_rtx, false_rtx;
4131 true_rtx = const0_rtx;
4132 false_rtx = const_true_rtx;
4136 true_rtx = const_true_rtx;
4137 false_rtx = const0_rtx;
4141 true_rtx = const1_rtx;
4142 false_rtx = const0_rtx;
4146 true_rtx = const0_rtx;
4147 false_rtx = constm1_rtx;
4151 true_rtx = constm1_rtx;
4152 false_rtx = const0_rtx;
4156 true_rtx = const0_rtx;
4157 false_rtx = const1_rtx;
4163 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
4164 equiv, true_rtx, false_rtx);
4169 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4170 word_mode, 2, x, mode, y, mode);
4171 insns = get_insns ();
4174 target = gen_reg_rtx (word_mode);
4175 emit_libcall_block (insns, target, value, equiv);
4177 if (comparison == UNORDERED
4178 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4179 comparison = reversed_p ? EQ : NE;
4184 *pcomparison = comparison;
4188 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4191 emit_indirect_jump (rtx loc)
4193 if (!insn_data[(int) CODE_FOR_indirect_jump].operand[0].predicate
4195 loc = copy_to_mode_reg (Pmode, loc);
4197 emit_jump_insn (gen_indirect_jump (loc));
4201 #ifdef HAVE_conditional_move
4203 /* Emit a conditional move instruction if the machine supports one for that
4204 condition and machine mode.
4206 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4207 the mode to use should they be constants. If it is VOIDmode, they cannot
4210 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4211 should be stored there. MODE is the mode to use should they be constants.
4212 If it is VOIDmode, they cannot both be constants.
4214 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4215 is not supported. */
4218 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
4219 enum machine_mode cmode, rtx op2, rtx op3,
4220 enum machine_mode mode, int unsignedp)
4222 rtx tem, subtarget, comparison, insn;
4223 enum insn_code icode;
4224 enum rtx_code reversed;
4226 /* If one operand is constant, make it the second one. Only do this
4227 if the other operand is not constant as well. */
4229 if (swap_commutative_operands_p (op0, op1))
4234 code = swap_condition (code);
4237 /* get_condition will prefer to generate LT and GT even if the old
4238 comparison was against zero, so undo that canonicalization here since
4239 comparisons against zero are cheaper. */
4240 if (code == LT && op1 == const1_rtx)
4241 code = LE, op1 = const0_rtx;
4242 else if (code == GT && op1 == constm1_rtx)
4243 code = GE, op1 = const0_rtx;
4245 if (cmode == VOIDmode)
4246 cmode = GET_MODE (op0);
4248 if (swap_commutative_operands_p (op2, op3)
4249 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4258 if (mode == VOIDmode)
4259 mode = GET_MODE (op2);
4261 icode = movcc_gen_code[mode];
4263 if (icode == CODE_FOR_nothing)
4267 target = gen_reg_rtx (mode);
4271 /* If the insn doesn't accept these operands, put them in pseudos. */
4273 if (!insn_data[icode].operand[0].predicate
4274 (subtarget, insn_data[icode].operand[0].mode))
4275 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4277 if (!insn_data[icode].operand[2].predicate
4278 (op2, insn_data[icode].operand[2].mode))
4279 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4281 if (!insn_data[icode].operand[3].predicate
4282 (op3, insn_data[icode].operand[3].mode))
4283 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4285 /* Everything should now be in the suitable form, so emit the compare insn
4286 and then the conditional move. */
4289 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4291 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4292 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4293 return NULL and let the caller figure out how best to deal with this
4295 if (GET_CODE (comparison) != code)
4298 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4300 /* If that failed, then give up. */
4306 if (subtarget != target)
4307 convert_move (target, subtarget, 0);
4312 /* Return nonzero if a conditional move of mode MODE is supported.
4314 This function is for combine so it can tell whether an insn that looks
4315 like a conditional move is actually supported by the hardware. If we
4316 guess wrong we lose a bit on optimization, but that's it. */
4317 /* ??? sparc64 supports conditionally moving integers values based on fp
4318 comparisons, and vice versa. How do we handle them? */
4321 can_conditionally_move_p (enum machine_mode mode)
4323 if (movcc_gen_code[mode] != CODE_FOR_nothing)
4329 #endif /* HAVE_conditional_move */
4331 /* Emit a conditional addition instruction if the machine supports one for that
4332 condition and machine mode.
4334 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4335 the mode to use should they be constants. If it is VOIDmode, they cannot
4338 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
4339 should be stored there. MODE is the mode to use should they be constants.
4340 If it is VOIDmode, they cannot both be constants.
4342 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4343 is not supported. */
4346 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
4347 enum machine_mode cmode, rtx op2, rtx op3,
4348 enum machine_mode mode, int unsignedp)
4350 rtx tem, subtarget, comparison, insn;
4351 enum insn_code icode;
4352 enum rtx_code reversed;
4354 /* If one operand is constant, make it the second one. Only do this
4355 if the other operand is not constant as well. */
4357 if (swap_commutative_operands_p (op0, op1))
4362 code = swap_condition (code);
4365 /* get_condition will prefer to generate LT and GT even if the old
4366 comparison was against zero, so undo that canonicalization here since
4367 comparisons against zero are cheaper. */
4368 if (code == LT && op1 == const1_rtx)
4369 code = LE, op1 = const0_rtx;
4370 else if (code == GT && op1 == constm1_rtx)
4371 code = GE, op1 = const0_rtx;
4373 if (cmode == VOIDmode)
4374 cmode = GET_MODE (op0);
4376 if (swap_commutative_operands_p (op2, op3)
4377 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4386 if (mode == VOIDmode)
4387 mode = GET_MODE (op2);
4389 icode = addcc_optab->handlers[(int) mode].insn_code;
4391 if (icode == CODE_FOR_nothing)
4395 target = gen_reg_rtx (mode);
4397 /* If the insn doesn't accept these operands, put them in pseudos. */
4399 if (!insn_data[icode].operand[0].predicate
4400 (target, insn_data[icode].operand[0].mode))
4401 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4405 if (!insn_data[icode].operand[2].predicate
4406 (op2, insn_data[icode].operand[2].mode))
4407 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4409 if (!insn_data[icode].operand[3].predicate
4410 (op3, insn_data[icode].operand[3].mode))
4411 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4413 /* Everything should now be in the suitable form, so emit the compare insn
4414 and then the conditional move. */
4417 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4419 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4420 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4421 return NULL and let the caller figure out how best to deal with this
4423 if (GET_CODE (comparison) != code)
4426 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4428 /* If that failed, then give up. */
4434 if (subtarget != target)
4435 convert_move (target, subtarget, 0);
4440 /* These functions attempt to generate an insn body, rather than
4441 emitting the insn, but if the gen function already emits them, we
4442 make no attempt to turn them back into naked patterns. */
4444 /* Generate and return an insn body to add Y to X. */
4447 gen_add2_insn (rtx x, rtx y)
4449 int icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4451 gcc_assert (insn_data[icode].operand[0].predicate
4452 (x, insn_data[icode].operand[0].mode));
4453 gcc_assert (insn_data[icode].operand[1].predicate
4454 (x, insn_data[icode].operand[1].mode));
4455 gcc_assert (insn_data[icode].operand[2].predicate
4456 (y, insn_data[icode].operand[2].mode));
4458 return GEN_FCN (icode) (x, x, y);
4461 /* Generate and return an insn body to add r1 and c,
4462 storing the result in r0. */
4464 gen_add3_insn (rtx r0, rtx r1, rtx c)
4466 int icode = (int) add_optab->handlers[(int) GET_MODE (r0)].insn_code;
4468 if (icode == CODE_FOR_nothing
4469 || !(insn_data[icode].operand[0].predicate
4470 (r0, insn_data[icode].operand[0].mode))
4471 || !(insn_data[icode].operand[1].predicate
4472 (r1, insn_data[icode].operand[1].mode))
4473 || !(insn_data[icode].operand[2].predicate
4474 (c, insn_data[icode].operand[2].mode)))
4477 return GEN_FCN (icode) (r0, r1, c);
4481 have_add2_insn (rtx x, rtx y)
4485 gcc_assert (GET_MODE (x) != VOIDmode);
4487 icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4489 if (icode == CODE_FOR_nothing)
4492 if (!(insn_data[icode].operand[0].predicate
4493 (x, insn_data[icode].operand[0].mode))
4494 || !(insn_data[icode].operand[1].predicate
4495 (x, insn_data[icode].operand[1].mode))
4496 || !(insn_data[icode].operand[2].predicate
4497 (y, insn_data[icode].operand[2].mode)))
4503 /* Generate and return an insn body to subtract Y from X. */
4506 gen_sub2_insn (rtx x, rtx y)
4508 int icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4510 gcc_assert (insn_data[icode].operand[0].predicate
4511 (x, insn_data[icode].operand[0].mode));
4512 gcc_assert (insn_data[icode].operand[1].predicate
4513 (x, insn_data[icode].operand[1].mode));
4514 gcc_assert (insn_data[icode].operand[2].predicate
4515 (y, insn_data[icode].operand[2].mode));
4517 return GEN_FCN (icode) (x, x, y);
4520 /* Generate and return an insn body to subtract r1 and c,
4521 storing the result in r0. */
4523 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4525 int icode = (int) sub_optab->handlers[(int) GET_MODE (r0)].insn_code;
4527 if (icode == CODE_FOR_nothing
4528 || !(insn_data[icode].operand[0].predicate
4529 (r0, insn_data[icode].operand[0].mode))
4530 || !(insn_data[icode].operand[1].predicate
4531 (r1, insn_data[icode].operand[1].mode))
4532 || !(insn_data[icode].operand[2].predicate
4533 (c, insn_data[icode].operand[2].mode)))
4536 return GEN_FCN (icode) (r0, r1, c);
4540 have_sub2_insn (rtx x, rtx y)
4544 gcc_assert (GET_MODE (x) != VOIDmode);
4546 icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4548 if (icode == CODE_FOR_nothing)
4551 if (!(insn_data[icode].operand[0].predicate
4552 (x, insn_data[icode].operand[0].mode))
4553 || !(insn_data[icode].operand[1].predicate
4554 (x, insn_data[icode].operand[1].mode))
4555 || !(insn_data[icode].operand[2].predicate
4556 (y, insn_data[icode].operand[2].mode)))
4562 /* Generate the body of an instruction to copy Y into X.
4563 It may be a list of insns, if one insn isn't enough. */
4566 gen_move_insn (rtx x, rtx y)
4571 emit_move_insn_1 (x, y);
4577 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4578 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4579 no such operation exists, CODE_FOR_nothing will be returned. */
4582 can_extend_p (enum machine_mode to_mode, enum machine_mode from_mode,
4586 #ifdef HAVE_ptr_extend
4588 return CODE_FOR_ptr_extend;
4591 tab = unsignedp ? zext_optab : sext_optab;
4592 return tab->handlers[to_mode][from_mode].insn_code;
4595 /* Generate the body of an insn to extend Y (with mode MFROM)
4596 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4599 gen_extend_insn (rtx x, rtx y, enum machine_mode mto,
4600 enum machine_mode mfrom, int unsignedp)
4602 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
4603 return GEN_FCN (icode) (x, y);
4606 /* can_fix_p and can_float_p say whether the target machine
4607 can directly convert a given fixed point type to
4608 a given floating point type, or vice versa.
4609 The returned value is the CODE_FOR_... value to use,
4610 or CODE_FOR_nothing if these modes cannot be directly converted.
4612 *TRUNCP_PTR is set to 1 if it is necessary to output
4613 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4615 static enum insn_code
4616 can_fix_p (enum machine_mode fixmode, enum machine_mode fltmode,
4617 int unsignedp, int *truncp_ptr)
4620 enum insn_code icode;
4622 tab = unsignedp ? ufixtrunc_optab : sfixtrunc_optab;
4623 icode = tab->handlers[fixmode][fltmode].insn_code;
4624 if (icode != CODE_FOR_nothing)
4630 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4631 for this to work. We need to rework the fix* and ftrunc* patterns
4632 and documentation. */
4633 tab = unsignedp ? ufix_optab : sfix_optab;
4634 icode = tab->handlers[fixmode][fltmode].insn_code;
4635 if (icode != CODE_FOR_nothing
4636 && ftrunc_optab->handlers[fltmode].insn_code != CODE_FOR_nothing)
4643 return CODE_FOR_nothing;
4646 static enum insn_code
4647 can_float_p (enum machine_mode fltmode, enum machine_mode fixmode,
4652 tab = unsignedp ? ufloat_optab : sfloat_optab;
4653 return tab->handlers[fltmode][fixmode].insn_code;
4656 /* Generate code to convert FROM to floating point
4657 and store in TO. FROM must be fixed point and not VOIDmode.
4658 UNSIGNEDP nonzero means regard FROM as unsigned.
4659 Normally this is done by correcting the final value
4660 if it is negative. */
4663 expand_float (rtx to, rtx from, int unsignedp)
4665 enum insn_code icode;
4667 enum machine_mode fmode, imode;
4668 bool can_do_signed = false;
4670 /* Crash now, because we won't be able to decide which mode to use. */
4671 gcc_assert (GET_MODE (from) != VOIDmode);
4673 /* Look for an insn to do the conversion. Do it in the specified
4674 modes if possible; otherwise convert either input, output or both to
4675 wider mode. If the integer mode is wider than the mode of FROM,
4676 we can do the conversion signed even if the input is unsigned. */
4678 for (fmode = GET_MODE (to); fmode != VOIDmode;
4679 fmode = GET_MODE_WIDER_MODE (fmode))
4680 for (imode = GET_MODE (from); imode != VOIDmode;
4681 imode = GET_MODE_WIDER_MODE (imode))
4683 int doing_unsigned = unsignedp;
4685 if (fmode != GET_MODE (to)
4686 && significand_size (fmode) < GET_MODE_BITSIZE (GET_MODE (from)))
4689 icode = can_float_p (fmode, imode, unsignedp);
4690 if (icode == CODE_FOR_nothing && unsignedp)
4692 enum insn_code scode = can_float_p (fmode, imode, 0);
4693 if (scode != CODE_FOR_nothing)
4694 can_do_signed = true;
4695 if (imode != GET_MODE (from))
4696 icode = scode, doing_unsigned = 0;
4699 if (icode != CODE_FOR_nothing)
4701 if (imode != GET_MODE (from))
4702 from = convert_to_mode (imode, from, unsignedp);
4704 if (fmode != GET_MODE (to))
4705 target = gen_reg_rtx (fmode);
4707 emit_unop_insn (icode, target, from,
4708 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
4711 convert_move (to, target, 0);
4716 /* Unsigned integer, and no way to convert directly. For binary
4717 floating point modes, convert as signed, then conditionally adjust
4719 if (unsignedp && can_do_signed && !DECIMAL_FLOAT_MODE_P (GET_MODE (to)))
4721 rtx label = gen_label_rtx ();
4723 REAL_VALUE_TYPE offset;
4725 /* Look for a usable floating mode FMODE wider than the source and at
4726 least as wide as the target. Using FMODE will avoid rounding woes
4727 with unsigned values greater than the signed maximum value. */
4729 for (fmode = GET_MODE (to); fmode != VOIDmode;
4730 fmode = GET_MODE_WIDER_MODE (fmode))
4731 if (GET_MODE_BITSIZE (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
4732 && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
4735 if (fmode == VOIDmode)
4737 /* There is no such mode. Pretend the target is wide enough. */
4738 fmode = GET_MODE (to);
4740 /* Avoid double-rounding when TO is narrower than FROM. */
4741 if ((significand_size (fmode) + 1)
4742 < GET_MODE_BITSIZE (GET_MODE (from)))
4745 rtx neglabel = gen_label_rtx ();
4747 /* Don't use TARGET if it isn't a register, is a hard register,
4748 or is the wrong mode. */
4750 || REGNO (target) < FIRST_PSEUDO_REGISTER
4751 || GET_MODE (target) != fmode)
4752 target = gen_reg_rtx (fmode);
4754 imode = GET_MODE (from);
4755 do_pending_stack_adjust ();
4757 /* Test whether the sign bit is set. */
4758 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
4761 /* The sign bit is not set. Convert as signed. */
4762 expand_float (target, from, 0);
4763 emit_jump_insn (gen_jump (label));
4766 /* The sign bit is set.
4767 Convert to a usable (positive signed) value by shifting right
4768 one bit, while remembering if a nonzero bit was shifted
4769 out; i.e., compute (from & 1) | (from >> 1). */
4771 emit_label (neglabel);
4772 temp = expand_binop (imode, and_optab, from, const1_rtx,
4773 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4774 temp1 = expand_shift (RSHIFT_EXPR, imode, from, integer_one_node,
4776 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
4778 expand_float (target, temp, 0);
4780 /* Multiply by 2 to undo the shift above. */
4781 temp = expand_binop (fmode, add_optab, target, target,
4782 target, 0, OPTAB_LIB_WIDEN);
4784 emit_move_insn (target, temp);
4786 do_pending_stack_adjust ();
4792 /* If we are about to do some arithmetic to correct for an
4793 unsigned operand, do it in a pseudo-register. */
4795 if (GET_MODE (to) != fmode
4796 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
4797 target = gen_reg_rtx (fmode);
4799 /* Convert as signed integer to floating. */
4800 expand_float (target, from, 0);
4802 /* If FROM is negative (and therefore TO is negative),
4803 correct its value by 2**bitwidth. */
4805 do_pending_stack_adjust ();
4806 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from),
4810 real_2expN (&offset, GET_MODE_BITSIZE (GET_MODE (from)));
4811 temp = expand_binop (fmode, add_optab, target,
4812 CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode),
4813 target, 0, OPTAB_LIB_WIDEN);
4815 emit_move_insn (target, temp);
4817 do_pending_stack_adjust ();
4822 /* No hardware instruction available; call a library routine. */
4827 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
4829 if (GET_MODE_SIZE (GET_MODE (from)) < GET_MODE_SIZE (SImode))
4830 from = convert_to_mode (SImode, from, unsignedp);
4832 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
4833 gcc_assert (libfunc);
4837 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4838 GET_MODE (to), 1, from,
4840 insns = get_insns ();
4843 emit_libcall_block (insns, target, value,
4844 gen_rtx_FLOAT (GET_MODE (to), from));
4849 /* Copy result to requested destination
4850 if we have been computing in a temp location. */
4854 if (GET_MODE (target) == GET_MODE (to))
4855 emit_move_insn (to, target);
4857 convert_move (to, target, 0);
4861 /* Generate code to convert FROM to fixed point and store in TO. FROM
4862 must be floating point. */
4865 expand_fix (rtx to, rtx from, int unsignedp)
4867 enum insn_code icode;
4869 enum machine_mode fmode, imode;
4872 /* We first try to find a pair of modes, one real and one integer, at
4873 least as wide as FROM and TO, respectively, in which we can open-code
4874 this conversion. If the integer mode is wider than the mode of TO,
4875 we can do the conversion either signed or unsigned. */
4877 for (fmode = GET_MODE (from); fmode != VOIDmode;
4878 fmode = GET_MODE_WIDER_MODE (fmode))
4879 for (imode = GET_MODE (to); imode != VOIDmode;
4880 imode = GET_MODE_WIDER_MODE (imode))
4882 int doing_unsigned = unsignedp;
4884 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
4885 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
4886 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
4888 if (icode != CODE_FOR_nothing)
4890 if (fmode != GET_MODE (from))
4891 from = convert_to_mode (fmode, from, 0);
4895 rtx temp = gen_reg_rtx (GET_MODE (from));
4896 from = expand_unop (GET_MODE (from), ftrunc_optab, from,
4900 if (imode != GET_MODE (to))
4901 target = gen_reg_rtx (imode);
4903 emit_unop_insn (icode, target, from,
4904 doing_unsigned ? UNSIGNED_FIX : FIX);
4906 convert_move (to, target, unsignedp);
4911 /* For an unsigned conversion, there is one more way to do it.
4912 If we have a signed conversion, we generate code that compares
4913 the real value to the largest representable positive number. If if
4914 is smaller, the conversion is done normally. Otherwise, subtract
4915 one plus the highest signed number, convert, and add it back.
4917 We only need to check all real modes, since we know we didn't find
4918 anything with a wider integer mode.
4920 This code used to extend FP value into mode wider than the destination.
4921 This is not needed. Consider, for instance conversion from SFmode
4924 The hot path through the code is dealing with inputs smaller than 2^63
4925 and doing just the conversion, so there is no bits to lose.
4927 In the other path we know the value is positive in the range 2^63..2^64-1
4928 inclusive. (as for other imput overflow happens and result is undefined)
4929 So we know that the most important bit set in mantissa corresponds to
4930 2^63. The subtraction of 2^63 should not generate any rounding as it
4931 simply clears out that bit. The rest is trivial. */
4933 if (unsignedp && GET_MODE_BITSIZE (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
4934 for (fmode = GET_MODE (from); fmode != VOIDmode;
4935 fmode = GET_MODE_WIDER_MODE (fmode))
4936 if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0,
4940 REAL_VALUE_TYPE offset;
4941 rtx limit, lab1, lab2, insn;
4943 bitsize = GET_MODE_BITSIZE (GET_MODE (to));
4944 real_2expN (&offset, bitsize - 1);
4945 limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode);
4946 lab1 = gen_label_rtx ();
4947 lab2 = gen_label_rtx ();
4949 if (fmode != GET_MODE (from))
4950 from = convert_to_mode (fmode, from, 0);
4952 /* See if we need to do the subtraction. */
4953 do_pending_stack_adjust ();
4954 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from),
4957 /* If not, do the signed "fix" and branch around fixup code. */
4958 expand_fix (to, from, 0);
4959 emit_jump_insn (gen_jump (lab2));
4962 /* Otherwise, subtract 2**(N-1), convert to signed number,
4963 then add 2**(N-1). Do the addition using XOR since this
4964 will often generate better code. */
4966 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
4967 NULL_RTX, 0, OPTAB_LIB_WIDEN);
4968 expand_fix (to, target, 0);
4969 target = expand_binop (GET_MODE (to), xor_optab, to,
4971 ((HOST_WIDE_INT) 1 << (bitsize - 1),
4973 to, 1, OPTAB_LIB_WIDEN);
4976 emit_move_insn (to, target);
4980 if (mov_optab->handlers[(int) GET_MODE (to)].insn_code
4981 != CODE_FOR_nothing)
4983 /* Make a place for a REG_NOTE and add it. */
4984 insn = emit_move_insn (to, to);
4985 set_unique_reg_note (insn,
4987 gen_rtx_fmt_e (UNSIGNED_FIX,
4995 /* We can't do it with an insn, so use a library call. But first ensure
4996 that the mode of TO is at least as wide as SImode, since those are the
4997 only library calls we know about. */
4999 if (GET_MODE_SIZE (GET_MODE (to)) < GET_MODE_SIZE (SImode))
5001 target = gen_reg_rtx (SImode);
5003 expand_fix (target, from, unsignedp);
5011 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
5012 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
5013 gcc_assert (libfunc);
5017 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
5018 GET_MODE (to), 1, from,
5020 insns = get_insns ();
5023 emit_libcall_block (insns, target, value,
5024 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
5025 GET_MODE (to), from));
5030 if (GET_MODE (to) == GET_MODE (target))
5031 emit_move_insn (to, target);
5033 convert_move (to, target, 0);
5037 /* Generate code to convert FROM to fixed point and store in TO. FROM
5038 must be floating point, TO must be signed. Use the conversion optab
5039 TAB to do the conversion. */
5042 expand_sfix_optab (rtx to, rtx from, convert_optab tab)
5044 enum insn_code icode;
5046 enum machine_mode fmode, imode;
5048 /* We first try to find a pair of modes, one real and one integer, at
5049 least as wide as FROM and TO, respectively, in which we can open-code
5050 this conversion. If the integer mode is wider than the mode of TO,
5051 we can do the conversion either signed or unsigned. */
5053 for (fmode = GET_MODE (from); fmode != VOIDmode;
5054 fmode = GET_MODE_WIDER_MODE (fmode))
5055 for (imode = GET_MODE (to); imode != VOIDmode;
5056 imode = GET_MODE_WIDER_MODE (imode))
5058 icode = tab->handlers[imode][fmode].insn_code;
5059 if (icode != CODE_FOR_nothing)
5061 if (fmode != GET_MODE (from))
5062 from = convert_to_mode (fmode, from, 0);
5064 if (imode != GET_MODE (to))
5065 target = gen_reg_rtx (imode);
5067 emit_unop_insn (icode, target, from, UNKNOWN);
5069 convert_move (to, target, 0);
5077 /* Report whether we have an instruction to perform the operation
5078 specified by CODE on operands of mode MODE. */
5080 have_insn_for (enum rtx_code code, enum machine_mode mode)
5082 return (code_to_optab[(int) code] != 0
5083 && (code_to_optab[(int) code]->handlers[(int) mode].insn_code
5084 != CODE_FOR_nothing));
5087 /* Create a blank optab. */
5092 optab op = ggc_alloc (sizeof (struct optab));
5093 for (i = 0; i < NUM_MACHINE_MODES; i++)
5095 op->handlers[i].insn_code = CODE_FOR_nothing;
5096 op->handlers[i].libfunc = 0;
5102 static convert_optab
5103 new_convert_optab (void)
5106 convert_optab op = ggc_alloc (sizeof (struct convert_optab));
5107 for (i = 0; i < NUM_MACHINE_MODES; i++)
5108 for (j = 0; j < NUM_MACHINE_MODES; j++)
5110 op->handlers[i][j].insn_code = CODE_FOR_nothing;
5111 op->handlers[i][j].libfunc = 0;
5116 /* Same, but fill in its code as CODE, and write it into the
5117 code_to_optab table. */
5119 init_optab (enum rtx_code code)
5121 optab op = new_optab ();
5123 code_to_optab[(int) code] = op;
5127 /* Same, but fill in its code as CODE, and do _not_ write it into
5128 the code_to_optab table. */
5130 init_optabv (enum rtx_code code)
5132 optab op = new_optab ();
5137 /* Conversion optabs never go in the code_to_optab table. */
5138 static inline convert_optab
5139 init_convert_optab (enum rtx_code code)
5141 convert_optab op = new_convert_optab ();
5146 /* Initialize the libfunc fields of an entire group of entries in some
5147 optab. Each entry is set equal to a string consisting of a leading
5148 pair of underscores followed by a generic operation name followed by
5149 a mode name (downshifted to lowercase) followed by a single character
5150 representing the number of operands for the given operation (which is
5151 usually one of the characters '2', '3', or '4').
5153 OPTABLE is the table in which libfunc fields are to be initialized.
5154 FIRST_MODE is the first machine mode index in the given optab to
5156 LAST_MODE is the last machine mode index in the given optab to
5158 OPNAME is the generic (string) name of the operation.
5159 SUFFIX is the character which specifies the number of operands for
5160 the given generic operation.
5164 init_libfuncs (optab optable, int first_mode, int last_mode,
5165 const char *opname, int suffix)
5168 unsigned opname_len = strlen (opname);
5170 for (mode = first_mode; (int) mode <= (int) last_mode;
5171 mode = (enum machine_mode) ((int) mode + 1))
5173 const char *mname = GET_MODE_NAME (mode);
5174 unsigned mname_len = strlen (mname);
5175 char *libfunc_name = alloca (2 + opname_len + mname_len + 1 + 1);
5182 for (q = opname; *q; )
5184 for (q = mname; *q; q++)
5185 *p++ = TOLOWER (*q);
5189 optable->handlers[(int) mode].libfunc
5190 = init_one_libfunc (ggc_alloc_string (libfunc_name, p - libfunc_name));
5194 /* Initialize the libfunc fields of an entire group of entries in some
5195 optab which correspond to all integer mode operations. The parameters
5196 have the same meaning as similarly named ones for the `init_libfuncs'
5197 routine. (See above). */
5200 init_integral_libfuncs (optab optable, const char *opname, int suffix)
5202 int maxsize = 2*BITS_PER_WORD;
5203 if (maxsize < LONG_LONG_TYPE_SIZE)
5204 maxsize = LONG_LONG_TYPE_SIZE;
5205 init_libfuncs (optable, word_mode,
5206 mode_for_size (maxsize, MODE_INT, 0),
5210 /* Initialize the libfunc fields of an entire group of entries in some
5211 optab which correspond to all real mode operations. The parameters
5212 have the same meaning as similarly named ones for the `init_libfuncs'
5213 routine. (See above). */
5216 init_floating_libfuncs (optab optable, const char *opname, int suffix)
5218 char *dec_opname = alloca (sizeof (DECIMAL_PREFIX) + strlen (opname));
5220 /* For BID support, change the name to have either a bid_ or dpd_ prefix
5221 depending on the low level floating format used. */
5222 memcpy (dec_opname, DECIMAL_PREFIX, sizeof (DECIMAL_PREFIX) - 1);
5223 strcpy (dec_opname + sizeof (DECIMAL_PREFIX) - 1, opname);
5225 init_libfuncs (optable, MIN_MODE_FLOAT, MAX_MODE_FLOAT, opname, suffix);
5226 init_libfuncs (optable, MIN_MODE_DECIMAL_FLOAT, MAX_MODE_DECIMAL_FLOAT,
5227 dec_opname, suffix);
5230 /* Initialize the libfunc fields of an entire group of entries of an
5231 inter-mode-class conversion optab. The string formation rules are
5232 similar to the ones for init_libfuncs, above, but instead of having
5233 a mode name and an operand count these functions have two mode names
5234 and no operand count. */
5236 init_interclass_conv_libfuncs (convert_optab tab, const char *opname,
5237 enum mode_class from_class,
5238 enum mode_class to_class)
5240 enum machine_mode first_from_mode = GET_CLASS_NARROWEST_MODE (from_class);
5241 enum machine_mode first_to_mode = GET_CLASS_NARROWEST_MODE (to_class);
5242 size_t opname_len = strlen (opname);
5243 size_t max_mname_len = 0;
5245 enum machine_mode fmode, tmode;
5246 const char *fname, *tname;
5248 char *libfunc_name, *suffix;
5249 char *nondec_name, *dec_name, *nondec_suffix, *dec_suffix;
5252 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5253 depends on which underlying decimal floating point format is used. */
5254 const size_t dec_len = sizeof (DECIMAL_PREFIX) - 1;
5256 for (fmode = first_from_mode;
5258 fmode = GET_MODE_WIDER_MODE (fmode))
5259 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (fmode)));
5261 for (tmode = first_to_mode;
5263 tmode = GET_MODE_WIDER_MODE (tmode))
5264 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (tmode)));
5266 nondec_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
5267 nondec_name[0] = '_';
5268 nondec_name[1] = '_';
5269 memcpy (&nondec_name[2], opname, opname_len);
5270 nondec_suffix = nondec_name + opname_len + 2;
5272 dec_name = alloca (2 + dec_len + opname_len + 2*max_mname_len + 1 + 1);
5275 memcpy (&dec_name[2], DECIMAL_PREFIX, dec_len);
5276 memcpy (&dec_name[2+dec_len], opname, opname_len);
5277 dec_suffix = dec_name + dec_len + opname_len + 2;
5279 for (fmode = first_from_mode; fmode != VOIDmode;
5280 fmode = GET_MODE_WIDER_MODE (fmode))
5281 for (tmode = first_to_mode; tmode != VOIDmode;
5282 tmode = GET_MODE_WIDER_MODE (tmode))
5284 fname = GET_MODE_NAME (fmode);
5285 tname = GET_MODE_NAME (tmode);
5287 if (DECIMAL_FLOAT_MODE_P(fmode) || DECIMAL_FLOAT_MODE_P(tmode))
5289 libfunc_name = dec_name;
5290 suffix = dec_suffix;
5294 libfunc_name = nondec_name;
5295 suffix = nondec_suffix;
5299 for (q = fname; *q; p++, q++)
5301 for (q = tname; *q; p++, q++)
5306 tab->handlers[tmode][fmode].libfunc
5307 = init_one_libfunc (ggc_alloc_string (libfunc_name,
5312 /* Initialize the libfunc fields of an entire group of entries of an
5313 intra-mode-class conversion optab. The string formation rules are
5314 similar to the ones for init_libfunc, above. WIDENING says whether
5315 the optab goes from narrow to wide modes or vice versa. These functions
5316 have two mode names _and_ an operand count. */
5318 init_intraclass_conv_libfuncs (convert_optab tab, const char *opname,
5319 enum mode_class class, bool widening)
5321 enum machine_mode first_mode = GET_CLASS_NARROWEST_MODE (class);
5322 size_t opname_len = strlen (opname);
5323 size_t max_mname_len = 0;
5325 enum machine_mode nmode, wmode;
5326 const char *nname, *wname;
5328 char *nondec_name, *dec_name, *nondec_suffix, *dec_suffix;
5329 char *libfunc_name, *suffix;
5332 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5333 depends on which underlying decimal floating point format is used. */
5334 const size_t dec_len = sizeof (DECIMAL_PREFIX) - 1;
5336 for (nmode = first_mode; nmode != VOIDmode;
5337 nmode = GET_MODE_WIDER_MODE (nmode))
5338 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (nmode)));
5340 nondec_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
5341 nondec_name[0] = '_';
5342 nondec_name[1] = '_';
5343 memcpy (&nondec_name[2], opname, opname_len);
5344 nondec_suffix = nondec_name + opname_len + 2;
5346 dec_name = alloca (2 + dec_len + opname_len + 2*max_mname_len + 1 + 1);
5349 memcpy (&dec_name[2], DECIMAL_PREFIX, dec_len);
5350 memcpy (&dec_name[2 + dec_len], opname, opname_len);
5351 dec_suffix = dec_name + dec_len + opname_len + 2;
5353 for (nmode = first_mode; nmode != VOIDmode;
5354 nmode = GET_MODE_WIDER_MODE (nmode))
5355 for (wmode = GET_MODE_WIDER_MODE (nmode); wmode != VOIDmode;
5356 wmode = GET_MODE_WIDER_MODE (wmode))
5358 nname = GET_MODE_NAME (nmode);
5359 wname = GET_MODE_NAME (wmode);
5361 if (DECIMAL_FLOAT_MODE_P(nmode) || DECIMAL_FLOAT_MODE_P(wmode))
5363 libfunc_name = dec_name;
5364 suffix = dec_suffix;
5368 libfunc_name = nondec_name;
5369 suffix = nondec_suffix;
5373 for (q = widening ? nname : wname; *q; p++, q++)
5375 for (q = widening ? wname : nname; *q; p++, q++)
5381 tab->handlers[widening ? wmode : nmode]
5382 [widening ? nmode : wmode].libfunc
5383 = init_one_libfunc (ggc_alloc_string (libfunc_name,
5390 init_one_libfunc (const char *name)
5394 /* Create a FUNCTION_DECL that can be passed to
5395 targetm.encode_section_info. */
5396 /* ??? We don't have any type information except for this is
5397 a function. Pretend this is "int foo()". */
5398 tree decl = build_decl (FUNCTION_DECL, get_identifier (name),
5399 build_function_type (integer_type_node, NULL_TREE));
5400 DECL_ARTIFICIAL (decl) = 1;
5401 DECL_EXTERNAL (decl) = 1;
5402 TREE_PUBLIC (decl) = 1;
5404 symbol = XEXP (DECL_RTL (decl), 0);
5406 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
5407 are the flags assigned by targetm.encode_section_info. */
5408 SET_SYMBOL_REF_DECL (symbol, 0);
5413 /* Call this to reset the function entry for one optab (OPTABLE) in mode
5414 MODE to NAME, which should be either 0 or a string constant. */
5416 set_optab_libfunc (optab optable, enum machine_mode mode, const char *name)
5419 optable->handlers[mode].libfunc = init_one_libfunc (name);
5421 optable->handlers[mode].libfunc = 0;
5424 /* Call this to reset the function entry for one conversion optab
5425 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
5426 either 0 or a string constant. */
5428 set_conv_libfunc (convert_optab optable, enum machine_mode tmode,
5429 enum machine_mode fmode, const char *name)
5432 optable->handlers[tmode][fmode].libfunc = init_one_libfunc (name);
5434 optable->handlers[tmode][fmode].libfunc = 0;
5437 /* Call this once to initialize the contents of the optabs
5438 appropriately for the current target machine. */
5444 enum machine_mode int_mode;
5446 /* Start by initializing all tables to contain CODE_FOR_nothing. */
5448 for (i = 0; i < NUM_RTX_CODE; i++)
5449 setcc_gen_code[i] = CODE_FOR_nothing;
5451 #ifdef HAVE_conditional_move
5452 for (i = 0; i < NUM_MACHINE_MODES; i++)
5453 movcc_gen_code[i] = CODE_FOR_nothing;
5456 for (i = 0; i < NUM_MACHINE_MODES; i++)
5458 vcond_gen_code[i] = CODE_FOR_nothing;
5459 vcondu_gen_code[i] = CODE_FOR_nothing;
5462 add_optab = init_optab (PLUS);
5463 addv_optab = init_optabv (PLUS);
5464 sub_optab = init_optab (MINUS);
5465 subv_optab = init_optabv (MINUS);
5466 smul_optab = init_optab (MULT);
5467 smulv_optab = init_optabv (MULT);
5468 smul_highpart_optab = init_optab (UNKNOWN);
5469 umul_highpart_optab = init_optab (UNKNOWN);
5470 smul_widen_optab = init_optab (UNKNOWN);
5471 umul_widen_optab = init_optab (UNKNOWN);
5472 usmul_widen_optab = init_optab (UNKNOWN);
5473 smadd_widen_optab = init_optab (UNKNOWN);
5474 umadd_widen_optab = init_optab (UNKNOWN);
5475 smsub_widen_optab = init_optab (UNKNOWN);
5476 umsub_widen_optab = init_optab (UNKNOWN);
5477 sdiv_optab = init_optab (DIV);
5478 sdivv_optab = init_optabv (DIV);
5479 sdivmod_optab = init_optab (UNKNOWN);
5480 udiv_optab = init_optab (UDIV);
5481 udivmod_optab = init_optab (UNKNOWN);
5482 smod_optab = init_optab (MOD);
5483 umod_optab = init_optab (UMOD);
5484 fmod_optab = init_optab (UNKNOWN);
5485 remainder_optab = init_optab (UNKNOWN);
5486 ftrunc_optab = init_optab (UNKNOWN);
5487 and_optab = init_optab (AND);
5488 ior_optab = init_optab (IOR);
5489 xor_optab = init_optab (XOR);
5490 ashl_optab = init_optab (ASHIFT);
5491 ashr_optab = init_optab (ASHIFTRT);
5492 lshr_optab = init_optab (LSHIFTRT);
5493 rotl_optab = init_optab (ROTATE);
5494 rotr_optab = init_optab (ROTATERT);
5495 smin_optab = init_optab (SMIN);
5496 smax_optab = init_optab (SMAX);
5497 umin_optab = init_optab (UMIN);
5498 umax_optab = init_optab (UMAX);
5499 pow_optab = init_optab (UNKNOWN);
5500 atan2_optab = init_optab (UNKNOWN);
5502 /* These three have codes assigned exclusively for the sake of
5504 mov_optab = init_optab (SET);
5505 movstrict_optab = init_optab (STRICT_LOW_PART);
5506 cmp_optab = init_optab (COMPARE);
5508 storent_optab = init_optab (UNKNOWN);
5510 ucmp_optab = init_optab (UNKNOWN);
5511 tst_optab = init_optab (UNKNOWN);
5513 eq_optab = init_optab (EQ);
5514 ne_optab = init_optab (NE);
5515 gt_optab = init_optab (GT);
5516 ge_optab = init_optab (GE);
5517 lt_optab = init_optab (LT);
5518 le_optab = init_optab (LE);
5519 unord_optab = init_optab (UNORDERED);
5521 neg_optab = init_optab (NEG);
5522 negv_optab = init_optabv (NEG);
5523 abs_optab = init_optab (ABS);
5524 absv_optab = init_optabv (ABS);
5525 addcc_optab = init_optab (UNKNOWN);
5526 one_cmpl_optab = init_optab (NOT);
5527 bswap_optab = init_optab (BSWAP);
5528 ffs_optab = init_optab (FFS);
5529 clz_optab = init_optab (CLZ);
5530 ctz_optab = init_optab (CTZ);
5531 popcount_optab = init_optab (POPCOUNT);
5532 parity_optab = init_optab (PARITY);
5533 sqrt_optab = init_optab (SQRT);
5534 floor_optab = init_optab (UNKNOWN);
5535 ceil_optab = init_optab (UNKNOWN);
5536 round_optab = init_optab (UNKNOWN);
5537 btrunc_optab = init_optab (UNKNOWN);
5538 nearbyint_optab = init_optab (UNKNOWN);
5539 rint_optab = init_optab (UNKNOWN);
5540 sincos_optab = init_optab (UNKNOWN);
5541 sin_optab = init_optab (UNKNOWN);
5542 asin_optab = init_optab (UNKNOWN);
5543 cos_optab = init_optab (UNKNOWN);
5544 acos_optab = init_optab (UNKNOWN);
5545 exp_optab = init_optab (UNKNOWN);
5546 exp10_optab = init_optab (UNKNOWN);
5547 exp2_optab = init_optab (UNKNOWN);
5548 expm1_optab = init_optab (UNKNOWN);
5549 ldexp_optab = init_optab (UNKNOWN);
5550 scalb_optab = init_optab (UNKNOWN);
5551 logb_optab = init_optab (UNKNOWN);
5552 ilogb_optab = init_optab (UNKNOWN);
5553 log_optab = init_optab (UNKNOWN);
5554 log10_optab = init_optab (UNKNOWN);
5555 log2_optab = init_optab (UNKNOWN);
5556 log1p_optab = init_optab (UNKNOWN);
5557 tan_optab = init_optab (UNKNOWN);
5558 atan_optab = init_optab (UNKNOWN);
5559 copysign_optab = init_optab (UNKNOWN);
5561 isinf_optab = init_optab (UNKNOWN);
5563 strlen_optab = init_optab (UNKNOWN);
5564 cbranch_optab = init_optab (UNKNOWN);
5565 cmov_optab = init_optab (UNKNOWN);
5566 cstore_optab = init_optab (UNKNOWN);
5567 push_optab = init_optab (UNKNOWN);
5569 reduc_smax_optab = init_optab (UNKNOWN);
5570 reduc_umax_optab = init_optab (UNKNOWN);
5571 reduc_smin_optab = init_optab (UNKNOWN);
5572 reduc_umin_optab = init_optab (UNKNOWN);
5573 reduc_splus_optab = init_optab (UNKNOWN);
5574 reduc_uplus_optab = init_optab (UNKNOWN);
5576 ssum_widen_optab = init_optab (UNKNOWN);
5577 usum_widen_optab = init_optab (UNKNOWN);
5578 sdot_prod_optab = init_optab (UNKNOWN);
5579 udot_prod_optab = init_optab (UNKNOWN);
5581 vec_extract_optab = init_optab (UNKNOWN);
5582 vec_extract_even_optab = init_optab (UNKNOWN);
5583 vec_extract_odd_optab = init_optab (UNKNOWN);
5584 vec_interleave_high_optab = init_optab (UNKNOWN);
5585 vec_interleave_low_optab = init_optab (UNKNOWN);
5586 vec_set_optab = init_optab (UNKNOWN);
5587 vec_init_optab = init_optab (UNKNOWN);
5588 vec_shl_optab = init_optab (UNKNOWN);
5589 vec_shr_optab = init_optab (UNKNOWN);
5590 vec_realign_load_optab = init_optab (UNKNOWN);
5591 movmisalign_optab = init_optab (UNKNOWN);
5592 vec_widen_umult_hi_optab = init_optab (UNKNOWN);
5593 vec_widen_umult_lo_optab = init_optab (UNKNOWN);
5594 vec_widen_smult_hi_optab = init_optab (UNKNOWN);
5595 vec_widen_smult_lo_optab = init_optab (UNKNOWN);
5596 vec_unpacks_hi_optab = init_optab (UNKNOWN);
5597 vec_unpacks_lo_optab = init_optab (UNKNOWN);
5598 vec_unpacku_hi_optab = init_optab (UNKNOWN);
5599 vec_unpacku_lo_optab = init_optab (UNKNOWN);
5600 vec_unpacks_float_hi_optab = init_optab (UNKNOWN);
5601 vec_unpacks_float_lo_optab = init_optab (UNKNOWN);
5602 vec_unpacku_float_hi_optab = init_optab (UNKNOWN);
5603 vec_unpacku_float_lo_optab = init_optab (UNKNOWN);
5604 vec_pack_trunc_optab = init_optab (UNKNOWN);
5605 vec_pack_usat_optab = init_optab (UNKNOWN);
5606 vec_pack_ssat_optab = init_optab (UNKNOWN);
5607 vec_pack_ufix_trunc_optab = init_optab (UNKNOWN);
5608 vec_pack_sfix_trunc_optab = init_optab (UNKNOWN);
5610 powi_optab = init_optab (UNKNOWN);
5613 sext_optab = init_convert_optab (SIGN_EXTEND);
5614 zext_optab = init_convert_optab (ZERO_EXTEND);
5615 trunc_optab = init_convert_optab (TRUNCATE);
5616 sfix_optab = init_convert_optab (FIX);
5617 ufix_optab = init_convert_optab (UNSIGNED_FIX);
5618 sfixtrunc_optab = init_convert_optab (UNKNOWN);
5619 ufixtrunc_optab = init_convert_optab (UNKNOWN);
5620 sfloat_optab = init_convert_optab (FLOAT);
5621 ufloat_optab = init_convert_optab (UNSIGNED_FLOAT);
5622 lrint_optab = init_convert_optab (UNKNOWN);
5623 lround_optab = init_convert_optab (UNKNOWN);
5624 lfloor_optab = init_convert_optab (UNKNOWN);
5625 lceil_optab = init_convert_optab (UNKNOWN);
5627 for (i = 0; i < NUM_MACHINE_MODES; i++)
5629 movmem_optab[i] = CODE_FOR_nothing;
5630 cmpstr_optab[i] = CODE_FOR_nothing;
5631 cmpstrn_optab[i] = CODE_FOR_nothing;
5632 cmpmem_optab[i] = CODE_FOR_nothing;
5633 setmem_optab[i] = CODE_FOR_nothing;
5635 sync_add_optab[i] = CODE_FOR_nothing;
5636 sync_sub_optab[i] = CODE_FOR_nothing;
5637 sync_ior_optab[i] = CODE_FOR_nothing;
5638 sync_and_optab[i] = CODE_FOR_nothing;
5639 sync_xor_optab[i] = CODE_FOR_nothing;
5640 sync_nand_optab[i] = CODE_FOR_nothing;
5641 sync_old_add_optab[i] = CODE_FOR_nothing;
5642 sync_old_sub_optab[i] = CODE_FOR_nothing;
5643 sync_old_ior_optab[i] = CODE_FOR_nothing;
5644 sync_old_and_optab[i] = CODE_FOR_nothing;
5645 sync_old_xor_optab[i] = CODE_FOR_nothing;
5646 sync_old_nand_optab[i] = CODE_FOR_nothing;
5647 sync_new_add_optab[i] = CODE_FOR_nothing;
5648 sync_new_sub_optab[i] = CODE_FOR_nothing;
5649 sync_new_ior_optab[i] = CODE_FOR_nothing;
5650 sync_new_and_optab[i] = CODE_FOR_nothing;
5651 sync_new_xor_optab[i] = CODE_FOR_nothing;
5652 sync_new_nand_optab[i] = CODE_FOR_nothing;
5653 sync_compare_and_swap[i] = CODE_FOR_nothing;
5654 sync_compare_and_swap_cc[i] = CODE_FOR_nothing;
5655 sync_lock_test_and_set[i] = CODE_FOR_nothing;
5656 sync_lock_release[i] = CODE_FOR_nothing;
5658 reload_in_optab[i] = reload_out_optab[i] = CODE_FOR_nothing;
5661 /* Fill in the optabs with the insns we support. */
5664 /* The ffs function operates on `int'. Fall back on it if we do not
5665 have a libgcc2 function for that width. */
5666 int_mode = mode_for_size (INT_TYPE_SIZE, MODE_INT, 0);
5667 ffs_optab->handlers[(int) int_mode].libfunc = init_one_libfunc ("ffs");
5669 /* Initialize the optabs with the names of the library functions. */
5670 init_integral_libfuncs (add_optab, "add", '3');
5671 init_floating_libfuncs (add_optab, "add", '3');
5672 init_integral_libfuncs (addv_optab, "addv", '3');
5673 init_floating_libfuncs (addv_optab, "add", '3');
5674 init_integral_libfuncs (sub_optab, "sub", '3');
5675 init_floating_libfuncs (sub_optab, "sub", '3');
5676 init_integral_libfuncs (subv_optab, "subv", '3');
5677 init_floating_libfuncs (subv_optab, "sub", '3');
5678 init_integral_libfuncs (smul_optab, "mul", '3');
5679 init_floating_libfuncs (smul_optab, "mul", '3');
5680 init_integral_libfuncs (smulv_optab, "mulv", '3');
5681 init_floating_libfuncs (smulv_optab, "mul", '3');
5682 init_integral_libfuncs (sdiv_optab, "div", '3');
5683 init_floating_libfuncs (sdiv_optab, "div", '3');
5684 init_integral_libfuncs (sdivv_optab, "divv", '3');
5685 init_integral_libfuncs (udiv_optab, "udiv", '3');
5686 init_integral_libfuncs (sdivmod_optab, "divmod", '4');
5687 init_integral_libfuncs (udivmod_optab, "udivmod", '4');
5688 init_integral_libfuncs (smod_optab, "mod", '3');
5689 init_integral_libfuncs (umod_optab, "umod", '3');
5690 init_floating_libfuncs (ftrunc_optab, "ftrunc", '2');
5691 init_integral_libfuncs (and_optab, "and", '3');
5692 init_integral_libfuncs (ior_optab, "ior", '3');
5693 init_integral_libfuncs (xor_optab, "xor", '3');
5694 init_integral_libfuncs (ashl_optab, "ashl", '3');
5695 init_integral_libfuncs (ashr_optab, "ashr", '3');
5696 init_integral_libfuncs (lshr_optab, "lshr", '3');
5697 init_integral_libfuncs (smin_optab, "min", '3');
5698 init_floating_libfuncs (smin_optab, "min", '3');
5699 init_integral_libfuncs (smax_optab, "max", '3');
5700 init_floating_libfuncs (smax_optab, "max", '3');
5701 init_integral_libfuncs (umin_optab, "umin", '3');
5702 init_integral_libfuncs (umax_optab, "umax", '3');
5703 init_integral_libfuncs (neg_optab, "neg", '2');
5704 init_floating_libfuncs (neg_optab, "neg", '2');
5705 init_integral_libfuncs (negv_optab, "negv", '2');
5706 init_floating_libfuncs (negv_optab, "neg", '2');
5707 init_integral_libfuncs (one_cmpl_optab, "one_cmpl", '2');
5708 init_integral_libfuncs (ffs_optab, "ffs", '2');
5709 init_integral_libfuncs (clz_optab, "clz", '2');
5710 init_integral_libfuncs (ctz_optab, "ctz", '2');
5711 init_integral_libfuncs (popcount_optab, "popcount", '2');
5712 init_integral_libfuncs (parity_optab, "parity", '2');
5714 /* Comparison libcalls for integers MUST come in pairs,
5716 init_integral_libfuncs (cmp_optab, "cmp", '2');
5717 init_integral_libfuncs (ucmp_optab, "ucmp", '2');
5718 init_floating_libfuncs (cmp_optab, "cmp", '2');
5720 /* EQ etc are floating point only. */
5721 init_floating_libfuncs (eq_optab, "eq", '2');
5722 init_floating_libfuncs (ne_optab, "ne", '2');
5723 init_floating_libfuncs (gt_optab, "gt", '2');
5724 init_floating_libfuncs (ge_optab, "ge", '2');
5725 init_floating_libfuncs (lt_optab, "lt", '2');
5726 init_floating_libfuncs (le_optab, "le", '2');
5727 init_floating_libfuncs (unord_optab, "unord", '2');
5729 init_floating_libfuncs (powi_optab, "powi", '2');
5732 init_interclass_conv_libfuncs (sfloat_optab, "float",
5733 MODE_INT, MODE_FLOAT);
5734 init_interclass_conv_libfuncs (sfloat_optab, "float",
5735 MODE_INT, MODE_DECIMAL_FLOAT);
5736 init_interclass_conv_libfuncs (ufloat_optab, "floatun",
5737 MODE_INT, MODE_FLOAT);
5738 init_interclass_conv_libfuncs (ufloat_optab, "floatun",
5739 MODE_INT, MODE_DECIMAL_FLOAT);
5740 init_interclass_conv_libfuncs (sfix_optab, "fix",
5741 MODE_FLOAT, MODE_INT);
5742 init_interclass_conv_libfuncs (sfix_optab, "fix",
5743 MODE_DECIMAL_FLOAT, MODE_INT);
5744 init_interclass_conv_libfuncs (ufix_optab, "fixuns",
5745 MODE_FLOAT, MODE_INT);
5746 init_interclass_conv_libfuncs (ufix_optab, "fixuns",
5747 MODE_DECIMAL_FLOAT, MODE_INT);
5748 init_interclass_conv_libfuncs (ufloat_optab, "floatuns",
5749 MODE_INT, MODE_DECIMAL_FLOAT);
5750 init_interclass_conv_libfuncs (lrint_optab, "lrint",
5751 MODE_INT, MODE_FLOAT);
5752 init_interclass_conv_libfuncs (lround_optab, "lround",
5753 MODE_INT, MODE_FLOAT);
5754 init_interclass_conv_libfuncs (lfloor_optab, "lfloor",
5755 MODE_INT, MODE_FLOAT);
5756 init_interclass_conv_libfuncs (lceil_optab, "lceil",
5757 MODE_INT, MODE_FLOAT);
5759 /* sext_optab is also used for FLOAT_EXTEND. */
5760 init_intraclass_conv_libfuncs (sext_optab, "extend", MODE_FLOAT, true);
5761 init_intraclass_conv_libfuncs (sext_optab, "extend", MODE_DECIMAL_FLOAT, true);
5762 init_interclass_conv_libfuncs (sext_optab, "extend", MODE_FLOAT, MODE_DECIMAL_FLOAT);
5763 init_interclass_conv_libfuncs (sext_optab, "extend", MODE_DECIMAL_FLOAT, MODE_FLOAT);
5764 init_intraclass_conv_libfuncs (trunc_optab, "trunc", MODE_FLOAT, false);
5765 init_intraclass_conv_libfuncs (trunc_optab, "trunc", MODE_DECIMAL_FLOAT, false);
5766 init_interclass_conv_libfuncs (trunc_optab, "trunc", MODE_FLOAT, MODE_DECIMAL_FLOAT);
5767 init_interclass_conv_libfuncs (trunc_optab, "trunc", MODE_DECIMAL_FLOAT, MODE_FLOAT);
5769 /* Explicitly initialize the bswap libfuncs since we need them to be
5770 valid for things other than word_mode. */
5771 set_optab_libfunc (bswap_optab, SImode, "__bswapsi2");
5772 set_optab_libfunc (bswap_optab, DImode, "__bswapdi2");
5774 /* Use cabs for double complex abs, since systems generally have cabs.
5775 Don't define any libcall for float complex, so that cabs will be used. */
5776 if (complex_double_type_node)
5777 abs_optab->handlers[TYPE_MODE (complex_double_type_node)].libfunc
5778 = init_one_libfunc ("cabs");
5780 abort_libfunc = init_one_libfunc ("abort");
5781 memcpy_libfunc = init_one_libfunc ("memcpy");
5782 memmove_libfunc = init_one_libfunc ("memmove");
5783 memcmp_libfunc = init_one_libfunc ("memcmp");
5784 memset_libfunc = init_one_libfunc ("memset");
5785 setbits_libfunc = init_one_libfunc ("__setbits");
5787 #ifndef DONT_USE_BUILTIN_SETJMP
5788 setjmp_libfunc = init_one_libfunc ("__builtin_setjmp");
5789 longjmp_libfunc = init_one_libfunc ("__builtin_longjmp");
5791 setjmp_libfunc = init_one_libfunc ("setjmp");
5792 longjmp_libfunc = init_one_libfunc ("longjmp");
5794 unwind_sjlj_register_libfunc = init_one_libfunc ("_Unwind_SjLj_Register");
5795 unwind_sjlj_unregister_libfunc
5796 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5798 /* For function entry/exit instrumentation. */
5799 profile_function_entry_libfunc
5800 = init_one_libfunc ("__cyg_profile_func_enter");
5801 profile_function_exit_libfunc
5802 = init_one_libfunc ("__cyg_profile_func_exit");
5804 gcov_flush_libfunc = init_one_libfunc ("__gcov_flush");
5806 if (HAVE_conditional_trap)
5807 trap_rtx = gen_rtx_fmt_ee (EQ, VOIDmode, NULL_RTX, NULL_RTX);
5809 /* Allow the target to add more libcalls or rename some, etc. */
5810 targetm.init_libfuncs ();
5815 /* Print information about the current contents of the optabs on
5819 debug_optab_libfuncs (void)
5825 /* Dump the arithmetic optabs. */
5826 for (i = 0; i != (int) OTI_MAX; i++)
5827 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5830 struct optab_handlers *h;
5833 h = &o->handlers[j];
5836 gcc_assert (GET_CODE (h->libfunc) == SYMBOL_REF);
5837 fprintf (stderr, "%s\t%s:\t%s\n",
5838 GET_RTX_NAME (o->code),
5840 XSTR (h->libfunc, 0));
5844 /* Dump the conversion optabs. */
5845 for (i = 0; i < (int) COI_MAX; ++i)
5846 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5847 for (k = 0; k < NUM_MACHINE_MODES; ++k)
5850 struct optab_handlers *h;
5852 o = &convert_optab_table[i];
5853 h = &o->handlers[j][k];
5856 gcc_assert (GET_CODE (h->libfunc) == SYMBOL_REF);
5857 fprintf (stderr, "%s\t%s\t%s:\t%s\n",
5858 GET_RTX_NAME (o->code),
5861 XSTR (h->libfunc, 0));
5869 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5870 CODE. Return 0 on failure. */
5873 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED, rtx op1,
5874 rtx op2 ATTRIBUTE_UNUSED, rtx tcode ATTRIBUTE_UNUSED)
5876 enum machine_mode mode = GET_MODE (op1);
5877 enum insn_code icode;
5880 if (!HAVE_conditional_trap)
5883 if (mode == VOIDmode)
5886 icode = cmp_optab->handlers[(int) mode].insn_code;
5887 if (icode == CODE_FOR_nothing)
5891 op1 = prepare_operand (icode, op1, 0, mode, mode, 0);
5892 op2 = prepare_operand (icode, op2, 1, mode, mode, 0);
5898 emit_insn (GEN_FCN (icode) (op1, op2));
5900 PUT_CODE (trap_rtx, code);
5901 gcc_assert (HAVE_conditional_trap);
5902 insn = gen_conditional_trap (trap_rtx, tcode);
5906 insn = get_insns ();
5913 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5914 or unsigned operation code. */
5916 static enum rtx_code
5917 get_rtx_code (enum tree_code tcode, bool unsignedp)
5929 code = unsignedp ? LTU : LT;
5932 code = unsignedp ? LEU : LE;
5935 code = unsignedp ? GTU : GT;
5938 code = unsignedp ? GEU : GE;
5941 case UNORDERED_EXPR:
5972 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
5973 unsigned operators. Do not generate compare instruction. */
5976 vector_compare_rtx (tree cond, bool unsignedp, enum insn_code icode)
5978 enum rtx_code rcode;
5980 rtx rtx_op0, rtx_op1;
5982 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
5983 ensures that condition is a relational operation. */
5984 gcc_assert (COMPARISON_CLASS_P (cond));
5986 rcode = get_rtx_code (TREE_CODE (cond), unsignedp);
5987 t_op0 = TREE_OPERAND (cond, 0);
5988 t_op1 = TREE_OPERAND (cond, 1);
5990 /* Expand operands. */
5991 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)),
5993 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)),
5996 if (!insn_data[icode].operand[4].predicate (rtx_op0, GET_MODE (rtx_op0))
5997 && GET_MODE (rtx_op0) != VOIDmode)
5998 rtx_op0 = force_reg (GET_MODE (rtx_op0), rtx_op0);
6000 if (!insn_data[icode].operand[5].predicate (rtx_op1, GET_MODE (rtx_op1))
6001 && GET_MODE (rtx_op1) != VOIDmode)
6002 rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1);
6004 return gen_rtx_fmt_ee (rcode, VOIDmode, rtx_op0, rtx_op1);
6007 /* Return insn code for VEC_COND_EXPR EXPR. */
6009 static inline enum insn_code
6010 get_vcond_icode (tree expr, enum machine_mode mode)
6012 enum insn_code icode = CODE_FOR_nothing;
6014 if (TYPE_UNSIGNED (TREE_TYPE (expr)))
6015 icode = vcondu_gen_code[mode];
6017 icode = vcond_gen_code[mode];
6021 /* Return TRUE iff, appropriate vector insns are available
6022 for vector cond expr expr in VMODE mode. */
6025 expand_vec_cond_expr_p (tree expr, enum machine_mode vmode)
6027 if (get_vcond_icode (expr, vmode) == CODE_FOR_nothing)
6032 /* Generate insns for VEC_COND_EXPR. */
6035 expand_vec_cond_expr (tree vec_cond_expr, rtx target)
6037 enum insn_code icode;
6038 rtx comparison, rtx_op1, rtx_op2, cc_op0, cc_op1;
6039 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_cond_expr));
6040 bool unsignedp = TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr));
6042 icode = get_vcond_icode (vec_cond_expr, mode);
6043 if (icode == CODE_FOR_nothing)
6046 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6047 target = gen_reg_rtx (mode);
6049 /* Get comparison rtx. First expand both cond expr operands. */
6050 comparison = vector_compare_rtx (TREE_OPERAND (vec_cond_expr, 0),
6052 cc_op0 = XEXP (comparison, 0);
6053 cc_op1 = XEXP (comparison, 1);
6054 /* Expand both operands and force them in reg, if required. */
6055 rtx_op1 = expand_normal (TREE_OPERAND (vec_cond_expr, 1));
6056 if (!insn_data[icode].operand[1].predicate (rtx_op1, mode)
6057 && mode != VOIDmode)
6058 rtx_op1 = force_reg (mode, rtx_op1);
6060 rtx_op2 = expand_normal (TREE_OPERAND (vec_cond_expr, 2));
6061 if (!insn_data[icode].operand[2].predicate (rtx_op2, mode)
6062 && mode != VOIDmode)
6063 rtx_op2 = force_reg (mode, rtx_op2);
6065 /* Emit instruction! */
6066 emit_insn (GEN_FCN (icode) (target, rtx_op1, rtx_op2,
6067 comparison, cc_op0, cc_op1));
6073 /* This is an internal subroutine of the other compare_and_swap expanders.
6074 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
6075 operation. TARGET is an optional place to store the value result of
6076 the operation. ICODE is the particular instruction to expand. Return
6077 the result of the operation. */
6080 expand_val_compare_and_swap_1 (rtx mem, rtx old_val, rtx new_val,
6081 rtx target, enum insn_code icode)
6083 enum machine_mode mode = GET_MODE (mem);
6086 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6087 target = gen_reg_rtx (mode);
6089 if (GET_MODE (old_val) != VOIDmode && GET_MODE (old_val) != mode)
6090 old_val = convert_modes (mode, GET_MODE (old_val), old_val, 1);
6091 if (!insn_data[icode].operand[2].predicate (old_val, mode))
6092 old_val = force_reg (mode, old_val);
6094 if (GET_MODE (new_val) != VOIDmode && GET_MODE (new_val) != mode)
6095 new_val = convert_modes (mode, GET_MODE (new_val), new_val, 1);
6096 if (!insn_data[icode].operand[3].predicate (new_val, mode))
6097 new_val = force_reg (mode, new_val);
6099 insn = GEN_FCN (icode) (target, mem, old_val, new_val);
6100 if (insn == NULL_RTX)
6107 /* Expand a compare-and-swap operation and return its value. */
6110 expand_val_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
6112 enum machine_mode mode = GET_MODE (mem);
6113 enum insn_code icode = sync_compare_and_swap[mode];
6115 if (icode == CODE_FOR_nothing)
6118 return expand_val_compare_and_swap_1 (mem, old_val, new_val, target, icode);
6121 /* Expand a compare-and-swap operation and store true into the result if
6122 the operation was successful and false otherwise. Return the result.
6123 Unlike other routines, TARGET is not optional. */
6126 expand_bool_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
6128 enum machine_mode mode = GET_MODE (mem);
6129 enum insn_code icode;
6130 rtx subtarget, label0, label1;
6132 /* If the target supports a compare-and-swap pattern that simultaneously
6133 sets some flag for success, then use it. Otherwise use the regular
6134 compare-and-swap and follow that immediately with a compare insn. */
6135 icode = sync_compare_and_swap_cc[mode];
6139 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
6141 if (subtarget != NULL_RTX)
6145 case CODE_FOR_nothing:
6146 icode = sync_compare_and_swap[mode];
6147 if (icode == CODE_FOR_nothing)
6150 /* Ensure that if old_val == mem, that we're not comparing
6151 against an old value. */
6152 if (MEM_P (old_val))
6153 old_val = force_reg (mode, old_val);
6155 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
6157 if (subtarget == NULL_RTX)
6160 emit_cmp_insn (subtarget, old_val, EQ, const0_rtx, mode, true);
6163 /* If the target has a sane STORE_FLAG_VALUE, then go ahead and use a
6164 setcc instruction from the beginning. We don't work too hard here,
6165 but it's nice to not be stupid about initial code gen either. */
6166 if (STORE_FLAG_VALUE == 1)
6168 icode = setcc_gen_code[EQ];
6169 if (icode != CODE_FOR_nothing)
6171 enum machine_mode cmode = insn_data[icode].operand[0].mode;
6175 if (!insn_data[icode].operand[0].predicate (target, cmode))
6176 subtarget = gen_reg_rtx (cmode);
6178 insn = GEN_FCN (icode) (subtarget);
6182 if (GET_MODE (target) != GET_MODE (subtarget))
6184 convert_move (target, subtarget, 1);
6192 /* Without an appropriate setcc instruction, use a set of branches to
6193 get 1 and 0 stored into target. Presumably if the target has a
6194 STORE_FLAG_VALUE that isn't 1, then this will get cleaned up by ifcvt. */
6196 label0 = gen_label_rtx ();
6197 label1 = gen_label_rtx ();
6199 emit_jump_insn (bcc_gen_fctn[EQ] (label0));
6200 emit_move_insn (target, const0_rtx);
6201 emit_jump_insn (gen_jump (label1));
6203 emit_label (label0);
6204 emit_move_insn (target, const1_rtx);
6205 emit_label (label1);
6210 /* This is a helper function for the other atomic operations. This function
6211 emits a loop that contains SEQ that iterates until a compare-and-swap
6212 operation at the end succeeds. MEM is the memory to be modified. SEQ is
6213 a set of instructions that takes a value from OLD_REG as an input and
6214 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
6215 set to the current contents of MEM. After SEQ, a compare-and-swap will
6216 attempt to update MEM with NEW_REG. The function returns true when the
6217 loop was generated successfully. */
6220 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
6222 enum machine_mode mode = GET_MODE (mem);
6223 enum insn_code icode;
6224 rtx label, cmp_reg, subtarget;
6226 /* The loop we want to generate looks like
6232 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
6233 if (cmp_reg != old_reg)
6236 Note that we only do the plain load from memory once. Subsequent
6237 iterations use the value loaded by the compare-and-swap pattern. */
6239 label = gen_label_rtx ();
6240 cmp_reg = gen_reg_rtx (mode);
6242 emit_move_insn (cmp_reg, mem);
6244 emit_move_insn (old_reg, cmp_reg);
6248 /* If the target supports a compare-and-swap pattern that simultaneously
6249 sets some flag for success, then use it. Otherwise use the regular
6250 compare-and-swap and follow that immediately with a compare insn. */
6251 icode = sync_compare_and_swap_cc[mode];
6255 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
6257 if (subtarget != NULL_RTX)
6259 gcc_assert (subtarget == cmp_reg);
6264 case CODE_FOR_nothing:
6265 icode = sync_compare_and_swap[mode];
6266 if (icode == CODE_FOR_nothing)
6269 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
6271 if (subtarget == NULL_RTX)
6273 if (subtarget != cmp_reg)
6274 emit_move_insn (cmp_reg, subtarget);
6276 emit_cmp_insn (cmp_reg, old_reg, EQ, const0_rtx, mode, true);
6279 /* ??? Mark this jump predicted not taken? */
6280 emit_jump_insn (bcc_gen_fctn[NE] (label));
6285 /* This function generates the atomic operation MEM CODE= VAL. In this
6286 case, we do not care about any resulting value. Returns NULL if we
6287 cannot generate the operation. */
6290 expand_sync_operation (rtx mem, rtx val, enum rtx_code code)
6292 enum machine_mode mode = GET_MODE (mem);
6293 enum insn_code icode;
6296 /* Look to see if the target supports the operation directly. */
6300 icode = sync_add_optab[mode];
6303 icode = sync_ior_optab[mode];
6306 icode = sync_xor_optab[mode];
6309 icode = sync_and_optab[mode];
6312 icode = sync_nand_optab[mode];
6316 icode = sync_sub_optab[mode];
6317 if (icode == CODE_FOR_nothing)
6319 icode = sync_add_optab[mode];
6320 if (icode != CODE_FOR_nothing)
6322 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
6332 /* Generate the direct operation, if present. */
6333 if (icode != CODE_FOR_nothing)
6335 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6336 val = convert_modes (mode, GET_MODE (val), val, 1);
6337 if (!insn_data[icode].operand[1].predicate (val, mode))
6338 val = force_reg (mode, val);
6340 insn = GEN_FCN (icode) (mem, val);
6348 /* Failing that, generate a compare-and-swap loop in which we perform the
6349 operation with normal arithmetic instructions. */
6350 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6352 rtx t0 = gen_reg_rtx (mode), t1;
6359 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
6362 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
6363 true, OPTAB_LIB_WIDEN);
6365 insn = get_insns ();
6368 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
6375 /* This function generates the atomic operation MEM CODE= VAL. In this
6376 case, we do care about the resulting value: if AFTER is true then
6377 return the value MEM holds after the operation, if AFTER is false
6378 then return the value MEM holds before the operation. TARGET is an
6379 optional place for the result value to be stored. */
6382 expand_sync_fetch_operation (rtx mem, rtx val, enum rtx_code code,
6383 bool after, rtx target)
6385 enum machine_mode mode = GET_MODE (mem);
6386 enum insn_code old_code, new_code, icode;
6390 /* Look to see if the target supports the operation directly. */
6394 old_code = sync_old_add_optab[mode];
6395 new_code = sync_new_add_optab[mode];
6398 old_code = sync_old_ior_optab[mode];
6399 new_code = sync_new_ior_optab[mode];
6402 old_code = sync_old_xor_optab[mode];
6403 new_code = sync_new_xor_optab[mode];
6406 old_code = sync_old_and_optab[mode];
6407 new_code = sync_new_and_optab[mode];
6410 old_code = sync_old_nand_optab[mode];
6411 new_code = sync_new_nand_optab[mode];
6415 old_code = sync_old_sub_optab[mode];
6416 new_code = sync_new_sub_optab[mode];
6417 if (old_code == CODE_FOR_nothing && new_code == CODE_FOR_nothing)
6419 old_code = sync_old_add_optab[mode];
6420 new_code = sync_new_add_optab[mode];
6421 if (old_code != CODE_FOR_nothing || new_code != CODE_FOR_nothing)
6423 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
6433 /* If the target does supports the proper new/old operation, great. But
6434 if we only support the opposite old/new operation, check to see if we
6435 can compensate. In the case in which the old value is supported, then
6436 we can always perform the operation again with normal arithmetic. In
6437 the case in which the new value is supported, then we can only handle
6438 this in the case the operation is reversible. */
6443 if (icode == CODE_FOR_nothing)
6446 if (icode != CODE_FOR_nothing)
6453 if (icode == CODE_FOR_nothing
6454 && (code == PLUS || code == MINUS || code == XOR))
6457 if (icode != CODE_FOR_nothing)
6462 /* If we found something supported, great. */
6463 if (icode != CODE_FOR_nothing)
6465 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6466 target = gen_reg_rtx (mode);
6468 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6469 val = convert_modes (mode, GET_MODE (val), val, 1);
6470 if (!insn_data[icode].operand[2].predicate (val, mode))
6471 val = force_reg (mode, val);
6473 insn = GEN_FCN (icode) (target, mem, val);
6478 /* If we need to compensate for using an operation with the
6479 wrong return value, do so now. */
6486 else if (code == MINUS)
6491 target = expand_simple_unop (mode, NOT, target, NULL_RTX, true);
6492 target = expand_simple_binop (mode, code, target, val, NULL_RTX,
6493 true, OPTAB_LIB_WIDEN);
6500 /* Failing that, generate a compare-and-swap loop in which we perform the
6501 operation with normal arithmetic instructions. */
6502 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6504 rtx t0 = gen_reg_rtx (mode), t1;
6506 if (!target || !register_operand (target, mode))
6507 target = gen_reg_rtx (mode);
6512 emit_move_insn (target, t0);
6516 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
6519 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
6520 true, OPTAB_LIB_WIDEN);
6522 emit_move_insn (target, t1);
6524 insn = get_insns ();
6527 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
6534 /* This function expands a test-and-set operation. Ideally we atomically
6535 store VAL in MEM and return the previous value in MEM. Some targets
6536 may not support this operation and only support VAL with the constant 1;
6537 in this case while the return value will be 0/1, but the exact value
6538 stored in MEM is target defined. TARGET is an option place to stick
6539 the return value. */
6542 expand_sync_lock_test_and_set (rtx mem, rtx val, rtx target)
6544 enum machine_mode mode = GET_MODE (mem);
6545 enum insn_code icode;
6548 /* If the target supports the test-and-set directly, great. */
6549 icode = sync_lock_test_and_set[mode];
6550 if (icode != CODE_FOR_nothing)
6552 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6553 target = gen_reg_rtx (mode);
6555 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6556 val = convert_modes (mode, GET_MODE (val), val, 1);
6557 if (!insn_data[icode].operand[2].predicate (val, mode))
6558 val = force_reg (mode, val);
6560 insn = GEN_FCN (icode) (target, mem, val);
6568 /* Otherwise, use a compare-and-swap loop for the exchange. */
6569 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6571 if (!target || !register_operand (target, mode))
6572 target = gen_reg_rtx (mode);
6573 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6574 val = convert_modes (mode, GET_MODE (val), val, 1);
6575 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
6582 #include "gt-optabs.h"