1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
26 #include "coretypes.h"
30 /* Include insn-config.h before expr.h so that HAVE_conditional_move
31 is properly defined. */
32 #include "insn-config.h"
46 #include "basic-block.h"
49 /* Each optab contains info on how this target machine
50 can perform a particular operation
51 for all sizes and kinds of operands.
53 The operation to be performed is often specified
54 by passing one of these optabs as an argument.
56 See expr.h for documentation of these optabs. */
58 optab optab_table[OTI_MAX];
60 rtx libfunc_table[LTI_MAX];
62 /* Tables of patterns for converting one mode to another. */
63 convert_optab convert_optab_table[COI_MAX];
65 /* Contains the optab used for each rtx code. */
66 optab code_to_optab[NUM_RTX_CODE + 1];
68 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
69 gives the gen_function to make a branch to test that condition. */
71 rtxfun bcc_gen_fctn[NUM_RTX_CODE];
73 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
74 gives the insn code to make a store-condition insn
75 to test that condition. */
77 enum insn_code setcc_gen_code[NUM_RTX_CODE];
79 #ifdef HAVE_conditional_move
80 /* Indexed by the machine mode, gives the insn code to make a conditional
81 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
82 setcc_gen_code to cut down on the number of named patterns. Consider a day
83 when a lot more rtx codes are conditional (eg: for the ARM). */
85 enum insn_code movcc_gen_code[NUM_MACHINE_MODES];
88 /* Indexed by the machine mode, gives the insn code for vector conditional
91 enum insn_code vcond_gen_code[NUM_MACHINE_MODES];
92 enum insn_code vcondu_gen_code[NUM_MACHINE_MODES];
94 /* The insn generating function can not take an rtx_code argument.
95 TRAP_RTX is used as an rtx argument. Its code is replaced with
96 the code to be used in the trap insn and all other fields are ignored. */
97 static GTY(()) rtx trap_rtx;
99 static int add_equal_note (rtx, rtx, enum rtx_code, rtx, rtx);
100 static rtx widen_operand (rtx, enum machine_mode, enum machine_mode, int,
102 static void prepare_cmp_insn (rtx *, rtx *, enum rtx_code *, rtx,
103 enum machine_mode *, int *,
104 enum can_compare_purpose);
105 static enum insn_code can_fix_p (enum machine_mode, enum machine_mode, int,
107 static enum insn_code can_float_p (enum machine_mode, enum machine_mode, int);
108 static optab new_optab (void);
109 static convert_optab new_convert_optab (void);
110 static inline optab init_optab (enum rtx_code);
111 static inline optab init_optabv (enum rtx_code);
112 static inline convert_optab init_convert_optab (enum rtx_code);
113 static void init_libfuncs (optab, int, int, const char *, int);
114 static void init_integral_libfuncs (optab, const char *, int);
115 static void init_floating_libfuncs (optab, const char *, int);
116 static void init_interclass_conv_libfuncs (convert_optab, const char *,
117 enum mode_class, enum mode_class);
118 static void init_intraclass_conv_libfuncs (convert_optab, const char *,
119 enum mode_class, bool);
120 static void emit_cmp_and_jump_insn_1 (rtx, rtx, enum machine_mode,
121 enum rtx_code, int, rtx);
122 static void prepare_float_lib_cmp (rtx *, rtx *, enum rtx_code *,
123 enum machine_mode *, int *);
124 static rtx widen_clz (enum machine_mode, rtx, rtx);
125 static rtx expand_parity (enum machine_mode, rtx, rtx);
126 static enum rtx_code get_rtx_code (enum tree_code, bool);
127 static rtx vector_compare_rtx (tree, bool, enum insn_code);
129 /* Current libcall id. It doesn't matter what these are, as long
130 as they are unique to each libcall that is emitted. */
131 static HOST_WIDE_INT libcall_id = 0;
133 #ifndef HAVE_conditional_trap
134 #define HAVE_conditional_trap 0
135 #define gen_conditional_trap(a,b) (gcc_unreachable (), NULL_RTX)
138 /* Prefixes for the current version of decimal floating point (BID vs. DPD) */
139 #if ENABLE_DECIMAL_BID_FORMAT
140 #define DECIMAL_PREFIX "bid_"
142 #define DECIMAL_PREFIX "dpd_"
146 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
147 the result of operation CODE applied to OP0 (and OP1 if it is a binary
150 If the last insn does not set TARGET, don't do anything, but return 1.
152 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
153 don't add the REG_EQUAL note but return 0. Our caller can then try
154 again, ensuring that TARGET is not one of the operands. */
157 add_equal_note (rtx insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
159 rtx last_insn, insn, set;
162 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
164 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
165 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
166 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
167 && GET_RTX_CLASS (code) != RTX_COMPARE
168 && GET_RTX_CLASS (code) != RTX_UNARY)
171 if (GET_CODE (target) == ZERO_EXTRACT)
174 for (last_insn = insns;
175 NEXT_INSN (last_insn) != NULL_RTX;
176 last_insn = NEXT_INSN (last_insn))
179 set = single_set (last_insn);
183 if (! rtx_equal_p (SET_DEST (set), target)
184 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
185 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
186 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
189 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
190 besides the last insn. */
191 if (reg_overlap_mentioned_p (target, op0)
192 || (op1 && reg_overlap_mentioned_p (target, op1)))
194 insn = PREV_INSN (last_insn);
195 while (insn != NULL_RTX)
197 if (reg_set_p (target, insn))
200 insn = PREV_INSN (insn);
204 if (GET_RTX_CLASS (code) == RTX_UNARY)
205 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
207 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
209 set_unique_reg_note (last_insn, REG_EQUAL, note);
214 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
215 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
216 not actually do a sign-extend or zero-extend, but can leave the
217 higher-order bits of the result rtx undefined, for example, in the case
218 of logical operations, but not right shifts. */
221 widen_operand (rtx op, enum machine_mode mode, enum machine_mode oldmode,
222 int unsignedp, int no_extend)
226 /* If we don't have to extend and this is a constant, return it. */
227 if (no_extend && GET_MODE (op) == VOIDmode)
230 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
231 extend since it will be more efficient to do so unless the signedness of
232 a promoted object differs from our extension. */
234 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
235 && SUBREG_PROMOTED_UNSIGNED_P (op) == unsignedp))
236 return convert_modes (mode, oldmode, op, unsignedp);
238 /* If MODE is no wider than a single word, we return a paradoxical
240 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
241 return gen_rtx_SUBREG (mode, force_reg (GET_MODE (op), op), 0);
243 /* Otherwise, get an object of MODE, clobber it, and set the low-order
246 result = gen_reg_rtx (mode);
247 emit_insn (gen_rtx_CLOBBER (VOIDmode, result));
248 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
252 /* Return the optab used for computing the operation given by
253 the tree code, CODE. This function is not always usable (for
254 example, it cannot give complete results for multiplication
255 or division) but probably ought to be relied on more widely
256 throughout the expander. */
258 optab_for_tree_code (enum tree_code code, tree type)
270 return one_cmpl_optab;
279 return TYPE_UNSIGNED (type) ? umod_optab : smod_optab;
287 return TYPE_UNSIGNED (type) ? udiv_optab : sdiv_optab;
293 return TYPE_UNSIGNED (type) ? lshr_optab : ashr_optab;
302 return TYPE_UNSIGNED (type) ? umax_optab : smax_optab;
305 return TYPE_UNSIGNED (type) ? umin_optab : smin_optab;
307 case REALIGN_LOAD_EXPR:
308 return vec_realign_load_optab;
311 return TYPE_UNSIGNED (type) ? usum_widen_optab : ssum_widen_optab;
314 return TYPE_UNSIGNED (type) ? udot_prod_optab : sdot_prod_optab;
317 return TYPE_UNSIGNED (type) ? reduc_umax_optab : reduc_smax_optab;
320 return TYPE_UNSIGNED (type) ? reduc_umin_optab : reduc_smin_optab;
322 case REDUC_PLUS_EXPR:
323 return TYPE_UNSIGNED (type) ? reduc_uplus_optab : reduc_splus_optab;
325 case VEC_LSHIFT_EXPR:
326 return vec_shl_optab;
328 case VEC_RSHIFT_EXPR:
329 return vec_shr_optab;
331 case VEC_WIDEN_MULT_HI_EXPR:
332 return TYPE_UNSIGNED (type) ?
333 vec_widen_umult_hi_optab : vec_widen_smult_hi_optab;
335 case VEC_WIDEN_MULT_LO_EXPR:
336 return TYPE_UNSIGNED (type) ?
337 vec_widen_umult_lo_optab : vec_widen_smult_lo_optab;
339 case VEC_UNPACK_HI_EXPR:
340 return TYPE_UNSIGNED (type) ?
341 vec_unpacku_hi_optab : vec_unpacks_hi_optab;
343 case VEC_UNPACK_LO_EXPR:
344 return TYPE_UNSIGNED (type) ?
345 vec_unpacku_lo_optab : vec_unpacks_lo_optab;
347 case VEC_UNPACK_FLOAT_HI_EXPR:
348 /* The signedness is determined from input operand. */
349 return TYPE_UNSIGNED (type) ?
350 vec_unpacku_float_hi_optab : vec_unpacks_float_hi_optab;
352 case VEC_UNPACK_FLOAT_LO_EXPR:
353 /* The signedness is determined from input operand. */
354 return TYPE_UNSIGNED (type) ?
355 vec_unpacku_float_lo_optab : vec_unpacks_float_lo_optab;
357 case VEC_PACK_TRUNC_EXPR:
358 return vec_pack_trunc_optab;
360 case VEC_PACK_SAT_EXPR:
361 return TYPE_UNSIGNED (type) ? vec_pack_usat_optab : vec_pack_ssat_optab;
363 case VEC_PACK_FIX_TRUNC_EXPR:
364 /* The signedness is determined from output operand. */
365 return TYPE_UNSIGNED (type) ?
366 vec_pack_ufix_trunc_optab : vec_pack_sfix_trunc_optab;
372 trapv = INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type);
375 case POINTER_PLUS_EXPR:
377 return trapv ? addv_optab : add_optab;
380 return trapv ? subv_optab : sub_optab;
383 return trapv ? smulv_optab : smul_optab;
386 return trapv ? negv_optab : neg_optab;
389 return trapv ? absv_optab : abs_optab;
391 case VEC_EXTRACT_EVEN_EXPR:
392 return vec_extract_even_optab;
394 case VEC_EXTRACT_ODD_EXPR:
395 return vec_extract_odd_optab;
397 case VEC_INTERLEAVE_HIGH_EXPR:
398 return vec_interleave_high_optab;
400 case VEC_INTERLEAVE_LOW_EXPR:
401 return vec_interleave_low_optab;
409 /* Expand vector widening operations.
411 There are two different classes of operations handled here:
412 1) Operations whose result is wider than all the arguments to the operation.
413 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
414 In this case OP0 and optionally OP1 would be initialized,
415 but WIDE_OP wouldn't (not relevant for this case).
416 2) Operations whose result is of the same size as the last argument to the
417 operation, but wider than all the other arguments to the operation.
418 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
419 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
421 E.g, when called to expand the following operations, this is how
422 the arguments will be initialized:
424 widening-sum 2 oprnd0 - oprnd1
425 widening-dot-product 3 oprnd0 oprnd1 oprnd2
426 widening-mult 2 oprnd0 oprnd1 -
427 type-promotion (vec-unpack) 1 oprnd0 - - */
430 expand_widen_pattern_expr (tree exp, rtx op0, rtx op1, rtx wide_op, rtx target,
433 tree oprnd0, oprnd1, oprnd2;
434 enum machine_mode wmode = 0, tmode0, tmode1 = 0;
435 optab widen_pattern_optab;
437 enum machine_mode xmode0, xmode1 = 0, wxmode = 0;
440 rtx xop0, xop1, wxop;
441 int nops = TREE_OPERAND_LENGTH (exp);
443 oprnd0 = TREE_OPERAND (exp, 0);
444 tmode0 = TYPE_MODE (TREE_TYPE (oprnd0));
445 widen_pattern_optab =
446 optab_for_tree_code (TREE_CODE (exp), TREE_TYPE (oprnd0));
447 icode = (int) widen_pattern_optab->handlers[(int) tmode0].insn_code;
448 gcc_assert (icode != CODE_FOR_nothing);
449 xmode0 = insn_data[icode].operand[1].mode;
453 oprnd1 = TREE_OPERAND (exp, 1);
454 tmode1 = TYPE_MODE (TREE_TYPE (oprnd1));
455 xmode1 = insn_data[icode].operand[2].mode;
458 /* The last operand is of a wider mode than the rest of the operands. */
466 gcc_assert (tmode1 == tmode0);
468 oprnd2 = TREE_OPERAND (exp, 2);
469 wmode = TYPE_MODE (TREE_TYPE (oprnd2));
470 wxmode = insn_data[icode].operand[3].mode;
474 wmode = wxmode = insn_data[icode].operand[0].mode;
477 || ! (*insn_data[icode].operand[0].predicate) (target, wmode))
478 temp = gen_reg_rtx (wmode);
486 /* In case the insn wants input operands in modes different from
487 those of the actual operands, convert the operands. It would
488 seem that we don't need to convert CONST_INTs, but we do, so
489 that they're properly zero-extended, sign-extended or truncated
492 if (GET_MODE (op0) != xmode0 && xmode0 != VOIDmode)
493 xop0 = convert_modes (xmode0,
494 GET_MODE (op0) != VOIDmode
500 if (GET_MODE (op1) != xmode1 && xmode1 != VOIDmode)
501 xop1 = convert_modes (xmode1,
502 GET_MODE (op1) != VOIDmode
508 if (GET_MODE (wide_op) != wxmode && wxmode != VOIDmode)
509 wxop = convert_modes (wxmode,
510 GET_MODE (wide_op) != VOIDmode
515 /* Now, if insn's predicates don't allow our operands, put them into
518 if (! (*insn_data[icode].operand[1].predicate) (xop0, xmode0)
519 && xmode0 != VOIDmode)
520 xop0 = copy_to_mode_reg (xmode0, xop0);
524 if (! (*insn_data[icode].operand[2].predicate) (xop1, xmode1)
525 && xmode1 != VOIDmode)
526 xop1 = copy_to_mode_reg (xmode1, xop1);
530 if (! (*insn_data[icode].operand[3].predicate) (wxop, wxmode)
531 && wxmode != VOIDmode)
532 wxop = copy_to_mode_reg (wxmode, wxop);
534 pat = GEN_FCN (icode) (temp, xop0, xop1, wxop);
537 pat = GEN_FCN (icode) (temp, xop0, xop1);
543 if (! (*insn_data[icode].operand[2].predicate) (wxop, wxmode)
544 && wxmode != VOIDmode)
545 wxop = copy_to_mode_reg (wxmode, wxop);
547 pat = GEN_FCN (icode) (temp, xop0, wxop);
550 pat = GEN_FCN (icode) (temp, xop0);
557 /* Generate code to perform an operation specified by TERNARY_OPTAB
558 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
560 UNSIGNEDP is for the case where we have to widen the operands
561 to perform the operation. It says to use zero-extension.
563 If TARGET is nonzero, the value
564 is generated there, if it is convenient to do so.
565 In all cases an rtx is returned for the locus of the value;
566 this may or may not be TARGET. */
569 expand_ternary_op (enum machine_mode mode, optab ternary_optab, rtx op0,
570 rtx op1, rtx op2, rtx target, int unsignedp)
572 int icode = (int) ternary_optab->handlers[(int) mode].insn_code;
573 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
574 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
575 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
578 rtx xop0 = op0, xop1 = op1, xop2 = op2;
580 gcc_assert (ternary_optab->handlers[(int) mode].insn_code
581 != CODE_FOR_nothing);
583 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
584 temp = gen_reg_rtx (mode);
588 /* In case the insn wants input operands in modes different from
589 those of the actual operands, convert the operands. It would
590 seem that we don't need to convert CONST_INTs, but we do, so
591 that they're properly zero-extended, sign-extended or truncated
594 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
595 xop0 = convert_modes (mode0,
596 GET_MODE (op0) != VOIDmode
601 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
602 xop1 = convert_modes (mode1,
603 GET_MODE (op1) != VOIDmode
608 if (GET_MODE (op2) != mode2 && mode2 != VOIDmode)
609 xop2 = convert_modes (mode2,
610 GET_MODE (op2) != VOIDmode
615 /* Now, if insn's predicates don't allow our operands, put them into
618 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
619 && mode0 != VOIDmode)
620 xop0 = copy_to_mode_reg (mode0, xop0);
622 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
623 && mode1 != VOIDmode)
624 xop1 = copy_to_mode_reg (mode1, xop1);
626 if (!insn_data[icode].operand[3].predicate (xop2, mode2)
627 && mode2 != VOIDmode)
628 xop2 = copy_to_mode_reg (mode2, xop2);
630 pat = GEN_FCN (icode) (temp, xop0, xop1, xop2);
637 /* Like expand_binop, but return a constant rtx if the result can be
638 calculated at compile time. The arguments and return value are
639 otherwise the same as for expand_binop. */
642 simplify_expand_binop (enum machine_mode mode, optab binoptab,
643 rtx op0, rtx op1, rtx target, int unsignedp,
644 enum optab_methods methods)
646 if (CONSTANT_P (op0) && CONSTANT_P (op1))
648 rtx x = simplify_binary_operation (binoptab->code, mode, op0, op1);
654 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
657 /* Like simplify_expand_binop, but always put the result in TARGET.
658 Return true if the expansion succeeded. */
661 force_expand_binop (enum machine_mode mode, optab binoptab,
662 rtx op0, rtx op1, rtx target, int unsignedp,
663 enum optab_methods methods)
665 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
666 target, unsignedp, methods);
670 emit_move_insn (target, x);
674 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
677 expand_vec_shift_expr (tree vec_shift_expr, rtx target)
679 enum insn_code icode;
680 rtx rtx_op1, rtx_op2;
681 enum machine_mode mode1;
682 enum machine_mode mode2;
683 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_shift_expr));
684 tree vec_oprnd = TREE_OPERAND (vec_shift_expr, 0);
685 tree shift_oprnd = TREE_OPERAND (vec_shift_expr, 1);
689 switch (TREE_CODE (vec_shift_expr))
691 case VEC_RSHIFT_EXPR:
692 shift_optab = vec_shr_optab;
694 case VEC_LSHIFT_EXPR:
695 shift_optab = vec_shl_optab;
701 icode = (int) shift_optab->handlers[(int) mode].insn_code;
702 gcc_assert (icode != CODE_FOR_nothing);
704 mode1 = insn_data[icode].operand[1].mode;
705 mode2 = insn_data[icode].operand[2].mode;
707 rtx_op1 = expand_normal (vec_oprnd);
708 if (!(*insn_data[icode].operand[1].predicate) (rtx_op1, mode1)
709 && mode1 != VOIDmode)
710 rtx_op1 = force_reg (mode1, rtx_op1);
712 rtx_op2 = expand_normal (shift_oprnd);
713 if (!(*insn_data[icode].operand[2].predicate) (rtx_op2, mode2)
714 && mode2 != VOIDmode)
715 rtx_op2 = force_reg (mode2, rtx_op2);
718 || ! (*insn_data[icode].operand[0].predicate) (target, mode))
719 target = gen_reg_rtx (mode);
721 /* Emit instruction */
722 pat = GEN_FCN (icode) (target, rtx_op1, rtx_op2);
729 /* This subroutine of expand_doubleword_shift handles the cases in which
730 the effective shift value is >= BITS_PER_WORD. The arguments and return
731 value are the same as for the parent routine, except that SUPERWORD_OP1
732 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
733 INTO_TARGET may be null if the caller has decided to calculate it. */
736 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
737 rtx outof_target, rtx into_target,
738 int unsignedp, enum optab_methods methods)
740 if (into_target != 0)
741 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
742 into_target, unsignedp, methods))
745 if (outof_target != 0)
747 /* For a signed right shift, we must fill OUTOF_TARGET with copies
748 of the sign bit, otherwise we must fill it with zeros. */
749 if (binoptab != ashr_optab)
750 emit_move_insn (outof_target, CONST0_RTX (word_mode));
752 if (!force_expand_binop (word_mode, binoptab,
753 outof_input, GEN_INT (BITS_PER_WORD - 1),
754 outof_target, unsignedp, methods))
760 /* This subroutine of expand_doubleword_shift handles the cases in which
761 the effective shift value is < BITS_PER_WORD. The arguments and return
762 value are the same as for the parent routine. */
765 expand_subword_shift (enum machine_mode op1_mode, optab binoptab,
766 rtx outof_input, rtx into_input, rtx op1,
767 rtx outof_target, rtx into_target,
768 int unsignedp, enum optab_methods methods,
769 unsigned HOST_WIDE_INT shift_mask)
771 optab reverse_unsigned_shift, unsigned_shift;
774 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
775 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
777 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
778 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
779 the opposite direction to BINOPTAB. */
780 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
782 carries = outof_input;
783 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
784 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
789 /* We must avoid shifting by BITS_PER_WORD bits since that is either
790 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
791 has unknown behavior. Do a single shift first, then shift by the
792 remainder. It's OK to use ~OP1 as the remainder if shift counts
793 are truncated to the mode size. */
794 carries = expand_binop (word_mode, reverse_unsigned_shift,
795 outof_input, const1_rtx, 0, unsignedp, methods);
796 if (shift_mask == BITS_PER_WORD - 1)
798 tmp = immed_double_const (-1, -1, op1_mode);
799 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
804 tmp = immed_double_const (BITS_PER_WORD - 1, 0, op1_mode);
805 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
809 if (tmp == 0 || carries == 0)
811 carries = expand_binop (word_mode, reverse_unsigned_shift,
812 carries, tmp, 0, unsignedp, methods);
816 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
817 so the result can go directly into INTO_TARGET if convenient. */
818 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
819 into_target, unsignedp, methods);
823 /* Now OR in the bits carried over from OUTOF_INPUT. */
824 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
825 into_target, unsignedp, methods))
828 /* Use a standard word_mode shift for the out-of half. */
829 if (outof_target != 0)
830 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
831 outof_target, unsignedp, methods))
838 #ifdef HAVE_conditional_move
839 /* Try implementing expand_doubleword_shift using conditional moves.
840 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
841 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
842 are the shift counts to use in the former and latter case. All other
843 arguments are the same as the parent routine. */
846 expand_doubleword_shift_condmove (enum machine_mode op1_mode, optab binoptab,
847 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
848 rtx outof_input, rtx into_input,
849 rtx subword_op1, rtx superword_op1,
850 rtx outof_target, rtx into_target,
851 int unsignedp, enum optab_methods methods,
852 unsigned HOST_WIDE_INT shift_mask)
854 rtx outof_superword, into_superword;
856 /* Put the superword version of the output into OUTOF_SUPERWORD and
858 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
859 if (outof_target != 0 && subword_op1 == superword_op1)
861 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
862 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
863 into_superword = outof_target;
864 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
865 outof_superword, 0, unsignedp, methods))
870 into_superword = gen_reg_rtx (word_mode);
871 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
872 outof_superword, into_superword,
877 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
878 if (!expand_subword_shift (op1_mode, binoptab,
879 outof_input, into_input, subword_op1,
880 outof_target, into_target,
881 unsignedp, methods, shift_mask))
884 /* Select between them. Do the INTO half first because INTO_SUPERWORD
885 might be the current value of OUTOF_TARGET. */
886 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
887 into_target, into_superword, word_mode, false))
890 if (outof_target != 0)
891 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
892 outof_target, outof_superword,
900 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
901 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
902 input operand; the shift moves bits in the direction OUTOF_INPUT->
903 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
904 of the target. OP1 is the shift count and OP1_MODE is its mode.
905 If OP1 is constant, it will have been truncated as appropriate
906 and is known to be nonzero.
908 If SHIFT_MASK is zero, the result of word shifts is undefined when the
909 shift count is outside the range [0, BITS_PER_WORD). This routine must
910 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
912 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
913 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
914 fill with zeros or sign bits as appropriate.
916 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
917 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
918 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
919 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
922 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
923 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
924 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
925 function wants to calculate it itself.
927 Return true if the shift could be successfully synthesized. */
930 expand_doubleword_shift (enum machine_mode op1_mode, optab binoptab,
931 rtx outof_input, rtx into_input, rtx op1,
932 rtx outof_target, rtx into_target,
933 int unsignedp, enum optab_methods methods,
934 unsigned HOST_WIDE_INT shift_mask)
936 rtx superword_op1, tmp, cmp1, cmp2;
937 rtx subword_label, done_label;
938 enum rtx_code cmp_code;
940 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
941 fill the result with sign or zero bits as appropriate. If so, the value
942 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
943 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
944 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
946 This isn't worthwhile for constant shifts since the optimizers will
947 cope better with in-range shift counts. */
948 if (shift_mask >= BITS_PER_WORD
950 && !CONSTANT_P (op1))
952 if (!expand_doubleword_shift (op1_mode, binoptab,
953 outof_input, into_input, op1,
955 unsignedp, methods, shift_mask))
957 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
958 outof_target, unsignedp, methods))
963 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
964 is true when the effective shift value is less than BITS_PER_WORD.
965 Set SUPERWORD_OP1 to the shift count that should be used to shift
966 OUTOF_INPUT into INTO_TARGET when the condition is false. */
967 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
968 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
970 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
971 is a subword shift count. */
972 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
974 cmp2 = CONST0_RTX (op1_mode);
980 /* Set CMP1 to OP1 - BITS_PER_WORD. */
981 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
983 cmp2 = CONST0_RTX (op1_mode);
985 superword_op1 = cmp1;
990 /* If we can compute the condition at compile time, pick the
991 appropriate subroutine. */
992 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
993 if (tmp != 0 && GET_CODE (tmp) == CONST_INT)
995 if (tmp == const0_rtx)
996 return expand_superword_shift (binoptab, outof_input, superword_op1,
997 outof_target, into_target,
1000 return expand_subword_shift (op1_mode, binoptab,
1001 outof_input, into_input, op1,
1002 outof_target, into_target,
1003 unsignedp, methods, shift_mask);
1006 #ifdef HAVE_conditional_move
1007 /* Try using conditional moves to generate straight-line code. */
1009 rtx start = get_last_insn ();
1010 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
1011 cmp_code, cmp1, cmp2,
1012 outof_input, into_input,
1014 outof_target, into_target,
1015 unsignedp, methods, shift_mask))
1017 delete_insns_since (start);
1021 /* As a last resort, use branches to select the correct alternative. */
1022 subword_label = gen_label_rtx ();
1023 done_label = gen_label_rtx ();
1026 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
1027 0, 0, subword_label);
1030 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
1031 outof_target, into_target,
1032 unsignedp, methods))
1035 emit_jump_insn (gen_jump (done_label));
1037 emit_label (subword_label);
1039 if (!expand_subword_shift (op1_mode, binoptab,
1040 outof_input, into_input, op1,
1041 outof_target, into_target,
1042 unsignedp, methods, shift_mask))
1045 emit_label (done_label);
1049 /* Subroutine of expand_binop. Perform a double word multiplication of
1050 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
1051 as the target's word_mode. This function return NULL_RTX if anything
1052 goes wrong, in which case it may have already emitted instructions
1053 which need to be deleted.
1055 If we want to multiply two two-word values and have normal and widening
1056 multiplies of single-word values, we can do this with three smaller
1057 multiplications. Note that we do not make a REG_NO_CONFLICT block here
1058 because we are not operating on one word at a time.
1060 The multiplication proceeds as follows:
1061 _______________________
1062 [__op0_high_|__op0_low__]
1063 _______________________
1064 * [__op1_high_|__op1_low__]
1065 _______________________________________________
1066 _______________________
1067 (1) [__op0_low__*__op1_low__]
1068 _______________________
1069 (2a) [__op0_low__*__op1_high_]
1070 _______________________
1071 (2b) [__op0_high_*__op1_low__]
1072 _______________________
1073 (3) [__op0_high_*__op1_high_]
1076 This gives a 4-word result. Since we are only interested in the
1077 lower 2 words, partial result (3) and the upper words of (2a) and
1078 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1079 calculated using non-widening multiplication.
1081 (1), however, needs to be calculated with an unsigned widening
1082 multiplication. If this operation is not directly supported we
1083 try using a signed widening multiplication and adjust the result.
1084 This adjustment works as follows:
1086 If both operands are positive then no adjustment is needed.
1088 If the operands have different signs, for example op0_low < 0 and
1089 op1_low >= 0, the instruction treats the most significant bit of
1090 op0_low as a sign bit instead of a bit with significance
1091 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1092 with 2**BITS_PER_WORD - op0_low, and two's complements the
1093 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1096 Similarly, if both operands are negative, we need to add
1097 (op0_low + op1_low) * 2**BITS_PER_WORD.
1099 We use a trick to adjust quickly. We logically shift op0_low right
1100 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1101 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1102 logical shift exists, we do an arithmetic right shift and subtract
1106 expand_doubleword_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
1107 bool umulp, enum optab_methods methods)
1109 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
1110 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
1111 rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
1112 rtx product, adjust, product_high, temp;
1114 rtx op0_high = operand_subword_force (op0, high, mode);
1115 rtx op0_low = operand_subword_force (op0, low, mode);
1116 rtx op1_high = operand_subword_force (op1, high, mode);
1117 rtx op1_low = operand_subword_force (op1, low, mode);
1119 /* If we're using an unsigned multiply to directly compute the product
1120 of the low-order words of the operands and perform any required
1121 adjustments of the operands, we begin by trying two more multiplications
1122 and then computing the appropriate sum.
1124 We have checked above that the required addition is provided.
1125 Full-word addition will normally always succeed, especially if
1126 it is provided at all, so we don't worry about its failure. The
1127 multiplication may well fail, however, so we do handle that. */
1131 /* ??? This could be done with emit_store_flag where available. */
1132 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
1133 NULL_RTX, 1, methods);
1135 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
1136 NULL_RTX, 0, OPTAB_DIRECT);
1139 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
1140 NULL_RTX, 0, methods);
1143 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
1144 NULL_RTX, 0, OPTAB_DIRECT);
1151 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
1152 NULL_RTX, 0, OPTAB_DIRECT);
1156 /* OP0_HIGH should now be dead. */
1160 /* ??? This could be done with emit_store_flag where available. */
1161 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
1162 NULL_RTX, 1, methods);
1164 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
1165 NULL_RTX, 0, OPTAB_DIRECT);
1168 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
1169 NULL_RTX, 0, methods);
1172 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
1173 NULL_RTX, 0, OPTAB_DIRECT);
1180 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
1181 NULL_RTX, 0, OPTAB_DIRECT);
1185 /* OP1_HIGH should now be dead. */
1187 adjust = expand_binop (word_mode, add_optab, adjust, temp,
1188 adjust, 0, OPTAB_DIRECT);
1190 if (target && !REG_P (target))
1194 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
1195 target, 1, OPTAB_DIRECT);
1197 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
1198 target, 1, OPTAB_DIRECT);
1203 product_high = operand_subword (product, high, 1, mode);
1204 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
1205 REG_P (product_high) ? product_high : adjust,
1207 emit_move_insn (product_high, adjust);
1211 /* Wrapper around expand_binop which takes an rtx code to specify
1212 the operation to perform, not an optab pointer. All other
1213 arguments are the same. */
1215 expand_simple_binop (enum machine_mode mode, enum rtx_code code, rtx op0,
1216 rtx op1, rtx target, int unsignedp,
1217 enum optab_methods methods)
1219 optab binop = code_to_optab[(int) code];
1222 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
1225 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1226 binop. Order them according to commutative_operand_precedence and, if
1227 possible, try to put TARGET or a pseudo first. */
1229 swap_commutative_operands_with_target (rtx target, rtx op0, rtx op1)
1231 int op0_prec = commutative_operand_precedence (op0);
1232 int op1_prec = commutative_operand_precedence (op1);
1234 if (op0_prec < op1_prec)
1237 if (op0_prec > op1_prec)
1240 /* With equal precedence, both orders are ok, but it is better if the
1241 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
1242 if (target == 0 || REG_P (target))
1243 return (REG_P (op1) && !REG_P (op0)) || target == op1;
1245 return rtx_equal_p (op1, target);
1249 /* Helper function for expand_binop: handle the case where there
1250 is an insn that directly implements the indicated operation.
1251 Returns null if this is not possible. */
1253 expand_binop_directly (enum machine_mode mode, optab binoptab,
1255 rtx target, int unsignedp, enum optab_methods methods,
1256 int commutative_op, rtx last)
1258 int icode = (int) binoptab->handlers[(int) mode].insn_code;
1259 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
1260 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
1261 enum machine_mode tmp_mode;
1263 rtx xop0 = op0, xop1 = op1;
1269 temp = gen_reg_rtx (mode);
1271 /* If it is a commutative operator and the modes would match
1272 if we would swap the operands, we can save the conversions. */
1275 if (GET_MODE (op0) != mode0 && GET_MODE (op1) != mode1
1276 && GET_MODE (op0) == mode1 && GET_MODE (op1) == mode0)
1280 tmp = op0; op0 = op1; op1 = tmp;
1281 tmp = xop0; xop0 = xop1; xop1 = tmp;
1285 /* In case the insn wants input operands in modes different from
1286 those of the actual operands, convert the operands. It would
1287 seem that we don't need to convert CONST_INTs, but we do, so
1288 that they're properly zero-extended, sign-extended or truncated
1291 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
1292 xop0 = convert_modes (mode0,
1293 GET_MODE (op0) != VOIDmode
1298 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
1299 xop1 = convert_modes (mode1,
1300 GET_MODE (op1) != VOIDmode
1305 /* Now, if insn's predicates don't allow our operands, put them into
1308 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
1309 && mode0 != VOIDmode)
1310 xop0 = copy_to_mode_reg (mode0, xop0);
1312 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
1313 && mode1 != VOIDmode)
1314 xop1 = copy_to_mode_reg (mode1, xop1);
1316 if (binoptab == vec_pack_trunc_optab
1317 || binoptab == vec_pack_usat_optab
1318 || binoptab == vec_pack_ssat_optab
1319 || binoptab == vec_pack_ufix_trunc_optab
1320 || binoptab == vec_pack_sfix_trunc_optab)
1322 /* The mode of the result is different then the mode of the
1324 tmp_mode = insn_data[icode].operand[0].mode;
1325 if (GET_MODE_NUNITS (tmp_mode) != 2 * GET_MODE_NUNITS (mode))
1331 if (!insn_data[icode].operand[0].predicate (temp, tmp_mode))
1332 temp = gen_reg_rtx (tmp_mode);
1334 pat = GEN_FCN (icode) (temp, xop0, xop1);
1337 /* If PAT is composed of more than one insn, try to add an appropriate
1338 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1339 operand, call expand_binop again, this time without a target. */
1340 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1341 && ! add_equal_note (pat, temp, binoptab->code, xop0, xop1))
1343 delete_insns_since (last);
1344 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1345 unsignedp, methods);
1352 delete_insns_since (last);
1356 /* Generate code to perform an operation specified by BINOPTAB
1357 on operands OP0 and OP1, with result having machine-mode MODE.
1359 UNSIGNEDP is for the case where we have to widen the operands
1360 to perform the operation. It says to use zero-extension.
1362 If TARGET is nonzero, the value
1363 is generated there, if it is convenient to do so.
1364 In all cases an rtx is returned for the locus of the value;
1365 this may or may not be TARGET. */
1368 expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1,
1369 rtx target, int unsignedp, enum optab_methods methods)
1371 enum optab_methods next_methods
1372 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1373 ? OPTAB_WIDEN : methods);
1374 enum mode_class class;
1375 enum machine_mode wider_mode;
1377 int commutative_op = 0;
1378 int shift_op = (binoptab->code == ASHIFT
1379 || binoptab->code == ASHIFTRT
1380 || binoptab->code == LSHIFTRT
1381 || binoptab->code == ROTATE
1382 || binoptab->code == ROTATERT);
1383 rtx entry_last = get_last_insn ();
1386 class = GET_MODE_CLASS (mode);
1388 /* If subtracting an integer constant, convert this into an addition of
1389 the negated constant. */
1391 if (binoptab == sub_optab && GET_CODE (op1) == CONST_INT)
1393 op1 = negate_rtx (mode, op1);
1394 binoptab = add_optab;
1397 /* If we are inside an appropriately-short loop and we are optimizing,
1398 force expensive constants into a register. */
1399 if (CONSTANT_P (op0) && optimize
1400 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
1402 if (GET_MODE (op0) != VOIDmode)
1403 op0 = convert_modes (mode, VOIDmode, op0, unsignedp);
1404 op0 = force_reg (mode, op0);
1407 if (CONSTANT_P (op1) && optimize
1408 && ! shift_op && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
1410 if (GET_MODE (op1) != VOIDmode)
1411 op1 = convert_modes (mode, VOIDmode, op1, unsignedp);
1412 op1 = force_reg (mode, op1);
1415 /* Record where to delete back to if we backtrack. */
1416 last = get_last_insn ();
1418 /* If operation is commutative,
1419 try to make the first operand a register.
1420 Even better, try to make it the same as the target.
1421 Also try to make the last operand a constant. */
1422 if (GET_RTX_CLASS (binoptab->code) == RTX_COMM_ARITH
1423 || binoptab == smul_widen_optab
1424 || binoptab == umul_widen_optab
1425 || binoptab == smul_highpart_optab
1426 || binoptab == umul_highpart_optab)
1430 if (swap_commutative_operands_with_target (target, op0, op1))
1438 /* If we can do it with a three-operand insn, do so. */
1440 if (methods != OPTAB_MUST_WIDEN
1441 && binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1443 temp = expand_binop_directly (mode, binoptab, op0, op1, target,
1444 unsignedp, methods, commutative_op, last);
1449 /* If we were trying to rotate, and that didn't work, try rotating
1450 the other direction before falling back to shifts and bitwise-or. */
1451 if (((binoptab == rotl_optab
1452 && rotr_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1453 || (binoptab == rotr_optab
1454 && rotl_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing))
1455 && class == MODE_INT)
1457 optab otheroptab = (binoptab == rotl_optab ? rotr_optab : rotl_optab);
1459 unsigned int bits = GET_MODE_BITSIZE (mode);
1461 if (GET_CODE (op1) == CONST_INT)
1462 newop1 = GEN_INT (bits - INTVAL (op1));
1463 else if (targetm.shift_truncation_mask (mode) == bits - 1)
1464 newop1 = negate_rtx (mode, op1);
1466 newop1 = expand_binop (mode, sub_optab,
1467 GEN_INT (bits), op1,
1468 NULL_RTX, unsignedp, OPTAB_DIRECT);
1470 temp = expand_binop_directly (mode, otheroptab, op0, newop1,
1471 target, unsignedp, methods,
1472 commutative_op, last);
1477 /* If this is a multiply, see if we can do a widening operation that
1478 takes operands of this mode and makes a wider mode. */
1480 if (binoptab == smul_optab
1481 && GET_MODE_WIDER_MODE (mode) != VOIDmode
1482 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1483 ->handlers[(int) GET_MODE_WIDER_MODE (mode)].insn_code)
1484 != CODE_FOR_nothing))
1486 temp = expand_binop (GET_MODE_WIDER_MODE (mode),
1487 unsignedp ? umul_widen_optab : smul_widen_optab,
1488 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1492 if (GET_MODE_CLASS (mode) == MODE_INT
1493 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1494 GET_MODE_BITSIZE (GET_MODE (temp))))
1495 return gen_lowpart (mode, temp);
1497 return convert_to_mode (mode, temp, unsignedp);
1501 /* Look for a wider mode of the same class for which we think we
1502 can open-code the operation. Check for a widening multiply at the
1503 wider mode as well. */
1505 if (CLASS_HAS_WIDER_MODES_P (class)
1506 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1507 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1508 wider_mode != VOIDmode;
1509 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1511 if (binoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
1512 || (binoptab == smul_optab
1513 && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
1514 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1515 ->handlers[(int) GET_MODE_WIDER_MODE (wider_mode)].insn_code)
1516 != CODE_FOR_nothing)))
1518 rtx xop0 = op0, xop1 = op1;
1521 /* For certain integer operations, we need not actually extend
1522 the narrow operands, as long as we will truncate
1523 the results to the same narrowness. */
1525 if ((binoptab == ior_optab || binoptab == and_optab
1526 || binoptab == xor_optab
1527 || binoptab == add_optab || binoptab == sub_optab
1528 || binoptab == smul_optab || binoptab == ashl_optab)
1529 && class == MODE_INT)
1532 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1534 /* The second operand of a shift must always be extended. */
1535 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1536 no_extend && binoptab != ashl_optab);
1538 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1539 unsignedp, OPTAB_DIRECT);
1542 if (class != MODE_INT
1543 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1544 GET_MODE_BITSIZE (wider_mode)))
1547 target = gen_reg_rtx (mode);
1548 convert_move (target, temp, 0);
1552 return gen_lowpart (mode, temp);
1555 delete_insns_since (last);
1559 /* These can be done a word at a time. */
1560 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1561 && class == MODE_INT
1562 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
1563 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1569 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1570 won't be accurate, so use a new target. */
1571 if (target == 0 || target == op0 || target == op1)
1572 target = gen_reg_rtx (mode);
1576 /* Do the actual arithmetic. */
1577 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
1579 rtx target_piece = operand_subword (target, i, 1, mode);
1580 rtx x = expand_binop (word_mode, binoptab,
1581 operand_subword_force (op0, i, mode),
1582 operand_subword_force (op1, i, mode),
1583 target_piece, unsignedp, next_methods);
1588 if (target_piece != x)
1589 emit_move_insn (target_piece, x);
1592 insns = get_insns ();
1595 if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
1597 if (binoptab->code != UNKNOWN)
1599 = gen_rtx_fmt_ee (binoptab->code, mode,
1600 copy_rtx (op0), copy_rtx (op1));
1604 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1609 /* Synthesize double word shifts from single word shifts. */
1610 if ((binoptab == lshr_optab || binoptab == ashl_optab
1611 || binoptab == ashr_optab)
1612 && class == MODE_INT
1613 && (GET_CODE (op1) == CONST_INT || !optimize_size)
1614 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1615 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1616 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1617 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1619 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1620 enum machine_mode op1_mode;
1622 double_shift_mask = targetm.shift_truncation_mask (mode);
1623 shift_mask = targetm.shift_truncation_mask (word_mode);
1624 op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode;
1626 /* Apply the truncation to constant shifts. */
1627 if (double_shift_mask > 0 && GET_CODE (op1) == CONST_INT)
1628 op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
1630 if (op1 == CONST0_RTX (op1_mode))
1633 /* Make sure that this is a combination that expand_doubleword_shift
1634 can handle. See the comments there for details. */
1635 if (double_shift_mask == 0
1636 || (shift_mask == BITS_PER_WORD - 1
1637 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1639 rtx insns, equiv_value;
1640 rtx into_target, outof_target;
1641 rtx into_input, outof_input;
1642 int left_shift, outof_word;
1644 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1645 won't be accurate, so use a new target. */
1646 if (target == 0 || target == op0 || target == op1)
1647 target = gen_reg_rtx (mode);
1651 /* OUTOF_* is the word we are shifting bits away from, and
1652 INTO_* is the word that we are shifting bits towards, thus
1653 they differ depending on the direction of the shift and
1654 WORDS_BIG_ENDIAN. */
1656 left_shift = binoptab == ashl_optab;
1657 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1659 outof_target = operand_subword (target, outof_word, 1, mode);
1660 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1662 outof_input = operand_subword_force (op0, outof_word, mode);
1663 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1665 if (expand_doubleword_shift (op1_mode, binoptab,
1666 outof_input, into_input, op1,
1667 outof_target, into_target,
1668 unsignedp, next_methods, shift_mask))
1670 insns = get_insns ();
1673 equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, op0, op1);
1674 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1681 /* Synthesize double word rotates from single word shifts. */
1682 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1683 && class == MODE_INT
1684 && GET_CODE (op1) == CONST_INT
1685 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1686 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1687 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1690 rtx into_target, outof_target;
1691 rtx into_input, outof_input;
1693 int shift_count, left_shift, outof_word;
1695 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1696 won't be accurate, so use a new target. Do this also if target is not
1697 a REG, first because having a register instead may open optimization
1698 opportunities, and second because if target and op0 happen to be MEMs
1699 designating the same location, we would risk clobbering it too early
1700 in the code sequence we generate below. */
1701 if (target == 0 || target == op0 || target == op1 || ! REG_P (target))
1702 target = gen_reg_rtx (mode);
1706 shift_count = INTVAL (op1);
1708 /* OUTOF_* is the word we are shifting bits away from, and
1709 INTO_* is the word that we are shifting bits towards, thus
1710 they differ depending on the direction of the shift and
1711 WORDS_BIG_ENDIAN. */
1713 left_shift = (binoptab == rotl_optab);
1714 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1716 outof_target = operand_subword (target, outof_word, 1, mode);
1717 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1719 outof_input = operand_subword_force (op0, outof_word, mode);
1720 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1722 if (shift_count == BITS_PER_WORD)
1724 /* This is just a word swap. */
1725 emit_move_insn (outof_target, into_input);
1726 emit_move_insn (into_target, outof_input);
1731 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1732 rtx first_shift_count, second_shift_count;
1733 optab reverse_unsigned_shift, unsigned_shift;
1735 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1736 ? lshr_optab : ashl_optab);
1738 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1739 ? ashl_optab : lshr_optab);
1741 if (shift_count > BITS_PER_WORD)
1743 first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1744 second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1748 first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1749 second_shift_count = GEN_INT (shift_count);
1752 into_temp1 = expand_binop (word_mode, unsigned_shift,
1753 outof_input, first_shift_count,
1754 NULL_RTX, unsignedp, next_methods);
1755 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1756 into_input, second_shift_count,
1757 NULL_RTX, unsignedp, next_methods);
1759 if (into_temp1 != 0 && into_temp2 != 0)
1760 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1761 into_target, unsignedp, next_methods);
1765 if (inter != 0 && inter != into_target)
1766 emit_move_insn (into_target, inter);
1768 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1769 into_input, first_shift_count,
1770 NULL_RTX, unsignedp, next_methods);
1771 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1772 outof_input, second_shift_count,
1773 NULL_RTX, unsignedp, next_methods);
1775 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1776 inter = expand_binop (word_mode, ior_optab,
1777 outof_temp1, outof_temp2,
1778 outof_target, unsignedp, next_methods);
1780 if (inter != 0 && inter != outof_target)
1781 emit_move_insn (outof_target, inter);
1784 insns = get_insns ();
1789 /* One may be tempted to wrap the insns in a REG_NO_CONFLICT
1790 block to help the register allocator a bit. But a multi-word
1791 rotate will need all the input bits when setting the output
1792 bits, so there clearly is a conflict between the input and
1793 output registers. So we can't use a no-conflict block here. */
1799 /* These can be done a word at a time by propagating carries. */
1800 if ((binoptab == add_optab || binoptab == sub_optab)
1801 && class == MODE_INT
1802 && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
1803 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1806 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1807 const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
1808 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1809 rtx xop0, xop1, xtarget;
1811 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1812 value is one of those, use it. Otherwise, use 1 since it is the
1813 one easiest to get. */
1814 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1815 int normalizep = STORE_FLAG_VALUE;
1820 /* Prepare the operands. */
1821 xop0 = force_reg (mode, op0);
1822 xop1 = force_reg (mode, op1);
1824 xtarget = gen_reg_rtx (mode);
1826 if (target == 0 || !REG_P (target))
1829 /* Indicate for flow that the entire target reg is being set. */
1831 emit_insn (gen_rtx_CLOBBER (VOIDmode, xtarget));
1833 /* Do the actual arithmetic. */
1834 for (i = 0; i < nwords; i++)
1836 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1837 rtx target_piece = operand_subword (xtarget, index, 1, mode);
1838 rtx op0_piece = operand_subword_force (xop0, index, mode);
1839 rtx op1_piece = operand_subword_force (xop1, index, mode);
1842 /* Main add/subtract of the input operands. */
1843 x = expand_binop (word_mode, binoptab,
1844 op0_piece, op1_piece,
1845 target_piece, unsignedp, next_methods);
1851 /* Store carry from main add/subtract. */
1852 carry_out = gen_reg_rtx (word_mode);
1853 carry_out = emit_store_flag_force (carry_out,
1854 (binoptab == add_optab
1857 word_mode, 1, normalizep);
1864 /* Add/subtract previous carry to main result. */
1865 newx = expand_binop (word_mode,
1866 normalizep == 1 ? binoptab : otheroptab,
1868 NULL_RTX, 1, next_methods);
1872 /* Get out carry from adding/subtracting carry in. */
1873 rtx carry_tmp = gen_reg_rtx (word_mode);
1874 carry_tmp = emit_store_flag_force (carry_tmp,
1875 (binoptab == add_optab
1878 word_mode, 1, normalizep);
1880 /* Logical-ior the two poss. carry together. */
1881 carry_out = expand_binop (word_mode, ior_optab,
1882 carry_out, carry_tmp,
1883 carry_out, 0, next_methods);
1887 emit_move_insn (target_piece, newx);
1891 if (x != target_piece)
1892 emit_move_insn (target_piece, x);
1895 carry_in = carry_out;
1898 if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
1900 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
1901 || ! rtx_equal_p (target, xtarget))
1903 rtx temp = emit_move_insn (target, xtarget);
1905 set_unique_reg_note (temp,
1907 gen_rtx_fmt_ee (binoptab->code, mode,
1918 delete_insns_since (last);
1921 /* Attempt to synthesize double word multiplies using a sequence of word
1922 mode multiplications. We first attempt to generate a sequence using a
1923 more efficient unsigned widening multiply, and if that fails we then
1924 try using a signed widening multiply. */
1926 if (binoptab == smul_optab
1927 && class == MODE_INT
1928 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1929 && smul_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1930 && add_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1932 rtx product = NULL_RTX;
1934 if (umul_widen_optab->handlers[(int) mode].insn_code
1935 != CODE_FOR_nothing)
1937 product = expand_doubleword_mult (mode, op0, op1, target,
1940 delete_insns_since (last);
1943 if (product == NULL_RTX
1944 && smul_widen_optab->handlers[(int) mode].insn_code
1945 != CODE_FOR_nothing)
1947 product = expand_doubleword_mult (mode, op0, op1, target,
1950 delete_insns_since (last);
1953 if (product != NULL_RTX)
1955 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1957 temp = emit_move_insn (target ? target : product, product);
1958 set_unique_reg_note (temp,
1960 gen_rtx_fmt_ee (MULT, mode,
1968 /* It can't be open-coded in this mode.
1969 Use a library call if one is available and caller says that's ok. */
1971 if (binoptab->handlers[(int) mode].libfunc
1972 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
1976 enum machine_mode op1_mode = mode;
1983 op1_mode = targetm.libgcc_shift_count_mode ();
1984 /* Specify unsigned here,
1985 since negative shift counts are meaningless. */
1986 op1x = convert_to_mode (op1_mode, op1, 1);
1989 if (GET_MODE (op0) != VOIDmode
1990 && GET_MODE (op0) != mode)
1991 op0 = convert_to_mode (mode, op0, unsignedp);
1993 /* Pass 1 for NO_QUEUE so we don't lose any increments
1994 if the libcall is cse'd or moved. */
1995 value = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
1996 NULL_RTX, LCT_CONST, mode, 2,
1997 op0, mode, op1x, op1_mode);
1999 insns = get_insns ();
2002 target = gen_reg_rtx (mode);
2003 emit_libcall_block (insns, target, value,
2004 gen_rtx_fmt_ee (binoptab->code, mode, op0, op1));
2009 delete_insns_since (last);
2011 /* It can't be done in this mode. Can we do it in a wider mode? */
2013 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
2014 || methods == OPTAB_MUST_WIDEN))
2016 /* Caller says, don't even try. */
2017 delete_insns_since (entry_last);
2021 /* Compute the value of METHODS to pass to recursive calls.
2022 Don't allow widening to be tried recursively. */
2024 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
2026 /* Look for a wider mode of the same class for which it appears we can do
2029 if (CLASS_HAS_WIDER_MODES_P (class))
2031 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2032 wider_mode != VOIDmode;
2033 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2035 if ((binoptab->handlers[(int) wider_mode].insn_code
2036 != CODE_FOR_nothing)
2037 || (methods == OPTAB_LIB
2038 && binoptab->handlers[(int) wider_mode].libfunc))
2040 rtx xop0 = op0, xop1 = op1;
2043 /* For certain integer operations, we need not actually extend
2044 the narrow operands, as long as we will truncate
2045 the results to the same narrowness. */
2047 if ((binoptab == ior_optab || binoptab == and_optab
2048 || binoptab == xor_optab
2049 || binoptab == add_optab || binoptab == sub_optab
2050 || binoptab == smul_optab || binoptab == ashl_optab)
2051 && class == MODE_INT)
2054 xop0 = widen_operand (xop0, wider_mode, mode,
2055 unsignedp, no_extend);
2057 /* The second operand of a shift must always be extended. */
2058 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
2059 no_extend && binoptab != ashl_optab);
2061 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
2062 unsignedp, methods);
2065 if (class != MODE_INT
2066 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
2067 GET_MODE_BITSIZE (wider_mode)))
2070 target = gen_reg_rtx (mode);
2071 convert_move (target, temp, 0);
2075 return gen_lowpart (mode, temp);
2078 delete_insns_since (last);
2083 delete_insns_since (entry_last);
2087 /* Expand a binary operator which has both signed and unsigned forms.
2088 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2091 If we widen unsigned operands, we may use a signed wider operation instead
2092 of an unsigned wider operation, since the result would be the same. */
2095 sign_expand_binop (enum machine_mode mode, optab uoptab, optab soptab,
2096 rtx op0, rtx op1, rtx target, int unsignedp,
2097 enum optab_methods methods)
2100 optab direct_optab = unsignedp ? uoptab : soptab;
2101 struct optab wide_soptab;
2103 /* Do it without widening, if possible. */
2104 temp = expand_binop (mode, direct_optab, op0, op1, target,
2105 unsignedp, OPTAB_DIRECT);
2106 if (temp || methods == OPTAB_DIRECT)
2109 /* Try widening to a signed int. Make a fake signed optab that
2110 hides any signed insn for direct use. */
2111 wide_soptab = *soptab;
2112 wide_soptab.handlers[(int) mode].insn_code = CODE_FOR_nothing;
2113 wide_soptab.handlers[(int) mode].libfunc = 0;
2115 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2116 unsignedp, OPTAB_WIDEN);
2118 /* For unsigned operands, try widening to an unsigned int. */
2119 if (temp == 0 && unsignedp)
2120 temp = expand_binop (mode, uoptab, op0, op1, target,
2121 unsignedp, OPTAB_WIDEN);
2122 if (temp || methods == OPTAB_WIDEN)
2125 /* Use the right width lib call if that exists. */
2126 temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_LIB);
2127 if (temp || methods == OPTAB_LIB)
2130 /* Must widen and use a lib call, use either signed or unsigned. */
2131 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2132 unsignedp, methods);
2136 return expand_binop (mode, uoptab, op0, op1, target,
2137 unsignedp, methods);
2141 /* Generate code to perform an operation specified by UNOPPTAB
2142 on operand OP0, with two results to TARG0 and TARG1.
2143 We assume that the order of the operands for the instruction
2144 is TARG0, TARG1, OP0.
2146 Either TARG0 or TARG1 may be zero, but what that means is that
2147 the result is not actually wanted. We will generate it into
2148 a dummy pseudo-reg and discard it. They may not both be zero.
2150 Returns 1 if this operation can be performed; 0 if not. */
2153 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
2156 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2157 enum mode_class class;
2158 enum machine_mode wider_mode;
2159 rtx entry_last = get_last_insn ();
2162 class = GET_MODE_CLASS (mode);
2165 targ0 = gen_reg_rtx (mode);
2167 targ1 = gen_reg_rtx (mode);
2169 /* Record where to go back to if we fail. */
2170 last = get_last_insn ();
2172 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2174 int icode = (int) unoptab->handlers[(int) mode].insn_code;
2175 enum machine_mode mode0 = insn_data[icode].operand[2].mode;
2179 if (GET_MODE (xop0) != VOIDmode
2180 && GET_MODE (xop0) != mode0)
2181 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2183 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2184 if (!insn_data[icode].operand[2].predicate (xop0, mode0))
2185 xop0 = copy_to_mode_reg (mode0, xop0);
2187 /* We could handle this, but we should always be called with a pseudo
2188 for our targets and all insns should take them as outputs. */
2189 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2190 gcc_assert (insn_data[icode].operand[1].predicate (targ1, mode));
2192 pat = GEN_FCN (icode) (targ0, targ1, xop0);
2199 delete_insns_since (last);
2202 /* It can't be done in this mode. Can we do it in a wider mode? */
2204 if (CLASS_HAS_WIDER_MODES_P (class))
2206 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2207 wider_mode != VOIDmode;
2208 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2210 if (unoptab->handlers[(int) wider_mode].insn_code
2211 != CODE_FOR_nothing)
2213 rtx t0 = gen_reg_rtx (wider_mode);
2214 rtx t1 = gen_reg_rtx (wider_mode);
2215 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2217 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
2219 convert_move (targ0, t0, unsignedp);
2220 convert_move (targ1, t1, unsignedp);
2224 delete_insns_since (last);
2229 delete_insns_since (entry_last);
2233 /* Generate code to perform an operation specified by BINOPTAB
2234 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2235 We assume that the order of the operands for the instruction
2236 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2237 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2239 Either TARG0 or TARG1 may be zero, but what that means is that
2240 the result is not actually wanted. We will generate it into
2241 a dummy pseudo-reg and discard it. They may not both be zero.
2243 Returns 1 if this operation can be performed; 0 if not. */
2246 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
2249 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2250 enum mode_class class;
2251 enum machine_mode wider_mode;
2252 rtx entry_last = get_last_insn ();
2255 class = GET_MODE_CLASS (mode);
2257 /* If we are inside an appropriately-short loop and we are optimizing,
2258 force expensive constants into a register. */
2259 if (CONSTANT_P (op0) && optimize
2260 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
2261 op0 = force_reg (mode, op0);
2263 if (CONSTANT_P (op1) && optimize
2264 && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
2265 op1 = force_reg (mode, op1);
2268 targ0 = gen_reg_rtx (mode);
2270 targ1 = gen_reg_rtx (mode);
2272 /* Record where to go back to if we fail. */
2273 last = get_last_insn ();
2275 if (binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2277 int icode = (int) binoptab->handlers[(int) mode].insn_code;
2278 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2279 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
2281 rtx xop0 = op0, xop1 = op1;
2283 /* In case the insn wants input operands in modes different from
2284 those of the actual operands, convert the operands. It would
2285 seem that we don't need to convert CONST_INTs, but we do, so
2286 that they're properly zero-extended, sign-extended or truncated
2289 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
2290 xop0 = convert_modes (mode0,
2291 GET_MODE (op0) != VOIDmode
2296 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
2297 xop1 = convert_modes (mode1,
2298 GET_MODE (op1) != VOIDmode
2303 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2304 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2305 xop0 = copy_to_mode_reg (mode0, xop0);
2307 if (!insn_data[icode].operand[2].predicate (xop1, mode1))
2308 xop1 = copy_to_mode_reg (mode1, xop1);
2310 /* We could handle this, but we should always be called with a pseudo
2311 for our targets and all insns should take them as outputs. */
2312 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2313 gcc_assert (insn_data[icode].operand[3].predicate (targ1, mode));
2315 pat = GEN_FCN (icode) (targ0, xop0, xop1, targ1);
2322 delete_insns_since (last);
2325 /* It can't be done in this mode. Can we do it in a wider mode? */
2327 if (CLASS_HAS_WIDER_MODES_P (class))
2329 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2330 wider_mode != VOIDmode;
2331 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2333 if (binoptab->handlers[(int) wider_mode].insn_code
2334 != CODE_FOR_nothing)
2336 rtx t0 = gen_reg_rtx (wider_mode);
2337 rtx t1 = gen_reg_rtx (wider_mode);
2338 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2339 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2341 if (expand_twoval_binop (binoptab, cop0, cop1,
2344 convert_move (targ0, t0, unsignedp);
2345 convert_move (targ1, t1, unsignedp);
2349 delete_insns_since (last);
2354 delete_insns_since (entry_last);
2358 /* Expand the two-valued library call indicated by BINOPTAB, but
2359 preserve only one of the values. If TARG0 is non-NULL, the first
2360 value is placed into TARG0; otherwise the second value is placed
2361 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2362 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2363 This routine assumes that the value returned by the library call is
2364 as if the return value was of an integral mode twice as wide as the
2365 mode of OP0. Returns 1 if the call was successful. */
2368 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2369 rtx targ0, rtx targ1, enum rtx_code code)
2371 enum machine_mode mode;
2372 enum machine_mode libval_mode;
2376 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2377 gcc_assert (!targ0 != !targ1);
2379 mode = GET_MODE (op0);
2380 if (!binoptab->handlers[(int) mode].libfunc)
2383 /* The value returned by the library function will have twice as
2384 many bits as the nominal MODE. */
2385 libval_mode = smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode),
2388 libval = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
2389 NULL_RTX, LCT_CONST,
2393 /* Get the part of VAL containing the value that we want. */
2394 libval = simplify_gen_subreg (mode, libval, libval_mode,
2395 targ0 ? 0 : GET_MODE_SIZE (mode));
2396 insns = get_insns ();
2398 /* Move the into the desired location. */
2399 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2400 gen_rtx_fmt_ee (code, mode, op0, op1));
2406 /* Wrapper around expand_unop which takes an rtx code to specify
2407 the operation to perform, not an optab pointer. All other
2408 arguments are the same. */
2410 expand_simple_unop (enum machine_mode mode, enum rtx_code code, rtx op0,
2411 rtx target, int unsignedp)
2413 optab unop = code_to_optab[(int) code];
2416 return expand_unop (mode, unop, op0, target, unsignedp);
2422 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2424 widen_clz (enum machine_mode mode, rtx op0, rtx target)
2426 enum mode_class class = GET_MODE_CLASS (mode);
2427 if (CLASS_HAS_WIDER_MODES_P (class))
2429 enum machine_mode wider_mode;
2430 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2431 wider_mode != VOIDmode;
2432 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2434 if (clz_optab->handlers[(int) wider_mode].insn_code
2435 != CODE_FOR_nothing)
2437 rtx xop0, temp, last;
2439 last = get_last_insn ();
2442 target = gen_reg_rtx (mode);
2443 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2444 temp = expand_unop (wider_mode, clz_optab, xop0, NULL_RTX, true);
2446 temp = expand_binop (wider_mode, sub_optab, temp,
2447 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2448 - GET_MODE_BITSIZE (mode)),
2449 target, true, OPTAB_DIRECT);
2451 delete_insns_since (last);
2463 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2465 widen_bswap (enum machine_mode mode, rtx op0, rtx target)
2467 enum mode_class class = GET_MODE_CLASS (mode);
2468 enum machine_mode wider_mode;
2471 if (!CLASS_HAS_WIDER_MODES_P (class))
2474 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2475 wider_mode != VOIDmode;
2476 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2477 if (bswap_optab->handlers[wider_mode].insn_code != CODE_FOR_nothing)
2482 last = get_last_insn ();
2484 x = widen_operand (op0, wider_mode, mode, true, true);
2485 x = expand_unop (wider_mode, bswap_optab, x, NULL_RTX, true);
2488 x = expand_shift (RSHIFT_EXPR, wider_mode, x,
2489 size_int (GET_MODE_BITSIZE (wider_mode)
2490 - GET_MODE_BITSIZE (mode)),
2496 target = gen_reg_rtx (mode);
2497 emit_move_insn (target, gen_lowpart (mode, x));
2500 delete_insns_since (last);
2505 /* Try calculating bswap as two bswaps of two word-sized operands. */
2508 expand_doubleword_bswap (enum machine_mode mode, rtx op, rtx target)
2512 t1 = expand_unop (word_mode, bswap_optab,
2513 operand_subword_force (op, 0, mode), NULL_RTX, true);
2514 t0 = expand_unop (word_mode, bswap_optab,
2515 operand_subword_force (op, 1, mode), NULL_RTX, true);
2518 target = gen_reg_rtx (mode);
2520 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
2521 emit_move_insn (operand_subword (target, 0, 1, mode), t0);
2522 emit_move_insn (operand_subword (target, 1, 1, mode), t1);
2527 /* Try calculating (parity x) as (and (popcount x) 1), where
2528 popcount can also be done in a wider mode. */
2530 expand_parity (enum machine_mode mode, rtx op0, rtx target)
2532 enum mode_class class = GET_MODE_CLASS (mode);
2533 if (CLASS_HAS_WIDER_MODES_P (class))
2535 enum machine_mode wider_mode;
2536 for (wider_mode = mode; wider_mode != VOIDmode;
2537 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2539 if (popcount_optab->handlers[(int) wider_mode].insn_code
2540 != CODE_FOR_nothing)
2542 rtx xop0, temp, last;
2544 last = get_last_insn ();
2547 target = gen_reg_rtx (mode);
2548 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2549 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2552 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2553 target, true, OPTAB_DIRECT);
2555 delete_insns_since (last);
2564 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2565 conditions, VAL may already be a SUBREG against which we cannot generate
2566 a further SUBREG. In this case, we expect forcing the value into a
2567 register will work around the situation. */
2570 lowpart_subreg_maybe_copy (enum machine_mode omode, rtx val,
2571 enum machine_mode imode)
2574 ret = lowpart_subreg (omode, val, imode);
2577 val = force_reg (imode, val);
2578 ret = lowpart_subreg (omode, val, imode);
2579 gcc_assert (ret != NULL);
2584 /* Expand a floating point absolute value or negation operation via a
2585 logical operation on the sign bit. */
2588 expand_absneg_bit (enum rtx_code code, enum machine_mode mode,
2589 rtx op0, rtx target)
2591 const struct real_format *fmt;
2592 int bitpos, word, nwords, i;
2593 enum machine_mode imode;
2594 HOST_WIDE_INT hi, lo;
2597 /* The format has to have a simple sign bit. */
2598 fmt = REAL_MODE_FORMAT (mode);
2602 bitpos = fmt->signbit_rw;
2606 /* Don't create negative zeros if the format doesn't support them. */
2607 if (code == NEG && !fmt->has_signed_zero)
2610 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2612 imode = int_mode_for_mode (mode);
2613 if (imode == BLKmode)
2622 if (FLOAT_WORDS_BIG_ENDIAN)
2623 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2625 word = bitpos / BITS_PER_WORD;
2626 bitpos = bitpos % BITS_PER_WORD;
2627 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2630 if (bitpos < HOST_BITS_PER_WIDE_INT)
2633 lo = (HOST_WIDE_INT) 1 << bitpos;
2637 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2643 if (target == 0 || target == op0)
2644 target = gen_reg_rtx (mode);
2650 for (i = 0; i < nwords; ++i)
2652 rtx targ_piece = operand_subword (target, i, 1, mode);
2653 rtx op0_piece = operand_subword_force (op0, i, mode);
2657 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2659 immed_double_const (lo, hi, imode),
2660 targ_piece, 1, OPTAB_LIB_WIDEN);
2661 if (temp != targ_piece)
2662 emit_move_insn (targ_piece, temp);
2665 emit_move_insn (targ_piece, op0_piece);
2668 insns = get_insns ();
2671 temp = gen_rtx_fmt_e (code, mode, copy_rtx (op0));
2672 emit_no_conflict_block (insns, target, op0, NULL_RTX, temp);
2676 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2677 gen_lowpart (imode, op0),
2678 immed_double_const (lo, hi, imode),
2679 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2680 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2682 set_unique_reg_note (get_last_insn (), REG_EQUAL,
2683 gen_rtx_fmt_e (code, mode, copy_rtx (op0)));
2689 /* Generate code to perform an operation specified by UNOPTAB
2690 on operand OP0, with result having machine-mode MODE.
2692 UNSIGNEDP is for the case where we have to widen the operands
2693 to perform the operation. It says to use zero-extension.
2695 If TARGET is nonzero, the value
2696 is generated there, if it is convenient to do so.
2697 In all cases an rtx is returned for the locus of the value;
2698 this may or may not be TARGET. */
2701 expand_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
2704 enum mode_class class;
2705 enum machine_mode wider_mode;
2707 rtx last = get_last_insn ();
2710 class = GET_MODE_CLASS (mode);
2712 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2714 int icode = (int) unoptab->handlers[(int) mode].insn_code;
2715 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2721 temp = gen_reg_rtx (mode);
2723 if (GET_MODE (xop0) != VOIDmode
2724 && GET_MODE (xop0) != mode0)
2725 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2727 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2729 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2730 xop0 = copy_to_mode_reg (mode0, xop0);
2732 if (!insn_data[icode].operand[0].predicate (temp, mode))
2733 temp = gen_reg_rtx (mode);
2735 pat = GEN_FCN (icode) (temp, xop0);
2738 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
2739 && ! add_equal_note (pat, temp, unoptab->code, xop0, NULL_RTX))
2741 delete_insns_since (last);
2742 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
2750 delete_insns_since (last);
2753 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2755 /* Widening clz needs special treatment. */
2756 if (unoptab == clz_optab)
2758 temp = widen_clz (mode, op0, target);
2765 /* Widening (or narrowing) bswap needs special treatment. */
2766 if (unoptab == bswap_optab)
2768 temp = widen_bswap (mode, op0, target);
2772 if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
2773 && unoptab->handlers[word_mode].insn_code != CODE_FOR_nothing)
2775 temp = expand_doubleword_bswap (mode, op0, target);
2783 if (CLASS_HAS_WIDER_MODES_P (class))
2784 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2785 wider_mode != VOIDmode;
2786 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2788 if (unoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing)
2792 /* For certain operations, we need not actually extend
2793 the narrow operand, as long as we will truncate the
2794 results to the same narrowness. */
2796 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2797 (unoptab == neg_optab
2798 || unoptab == one_cmpl_optab)
2799 && class == MODE_INT);
2801 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2806 if (class != MODE_INT
2807 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
2808 GET_MODE_BITSIZE (wider_mode)))
2811 target = gen_reg_rtx (mode);
2812 convert_move (target, temp, 0);
2816 return gen_lowpart (mode, temp);
2819 delete_insns_since (last);
2823 /* These can be done a word at a time. */
2824 if (unoptab == one_cmpl_optab
2825 && class == MODE_INT
2826 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
2827 && unoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
2832 if (target == 0 || target == op0)
2833 target = gen_reg_rtx (mode);
2837 /* Do the actual arithmetic. */
2838 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
2840 rtx target_piece = operand_subword (target, i, 1, mode);
2841 rtx x = expand_unop (word_mode, unoptab,
2842 operand_subword_force (op0, i, mode),
2843 target_piece, unsignedp);
2845 if (target_piece != x)
2846 emit_move_insn (target_piece, x);
2849 insns = get_insns ();
2852 emit_no_conflict_block (insns, target, op0, NULL_RTX,
2853 gen_rtx_fmt_e (unoptab->code, mode,
2858 if (unoptab->code == NEG)
2860 /* Try negating floating point values by flipping the sign bit. */
2861 if (SCALAR_FLOAT_MODE_P (mode))
2863 temp = expand_absneg_bit (NEG, mode, op0, target);
2868 /* If there is no negation pattern, and we have no negative zero,
2869 try subtracting from zero. */
2870 if (!HONOR_SIGNED_ZEROS (mode))
2872 temp = expand_binop (mode, (unoptab == negv_optab
2873 ? subv_optab : sub_optab),
2874 CONST0_RTX (mode), op0, target,
2875 unsignedp, OPTAB_DIRECT);
2881 /* Try calculating parity (x) as popcount (x) % 2. */
2882 if (unoptab == parity_optab)
2884 temp = expand_parity (mode, op0, target);
2890 /* Now try a library call in this mode. */
2891 if (unoptab->handlers[(int) mode].libfunc)
2895 enum machine_mode outmode = mode;
2897 /* All of these functions return small values. Thus we choose to
2898 have them return something that isn't a double-word. */
2899 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
2900 || unoptab == popcount_optab || unoptab == parity_optab)
2902 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node)));
2906 /* Pass 1 for NO_QUEUE so we don't lose any increments
2907 if the libcall is cse'd or moved. */
2908 value = emit_library_call_value (unoptab->handlers[(int) mode].libfunc,
2909 NULL_RTX, LCT_CONST, outmode,
2911 insns = get_insns ();
2914 target = gen_reg_rtx (outmode);
2915 emit_libcall_block (insns, target, value,
2916 gen_rtx_fmt_e (unoptab->code, outmode, op0));
2921 /* It can't be done in this mode. Can we do it in a wider mode? */
2923 if (CLASS_HAS_WIDER_MODES_P (class))
2925 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2926 wider_mode != VOIDmode;
2927 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2929 if ((unoptab->handlers[(int) wider_mode].insn_code
2930 != CODE_FOR_nothing)
2931 || unoptab->handlers[(int) wider_mode].libfunc)
2935 /* For certain operations, we need not actually extend
2936 the narrow operand, as long as we will truncate the
2937 results to the same narrowness. */
2939 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2940 (unoptab == neg_optab
2941 || unoptab == one_cmpl_optab)
2942 && class == MODE_INT);
2944 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2947 /* If we are generating clz using wider mode, adjust the
2949 if (unoptab == clz_optab && temp != 0)
2950 temp = expand_binop (wider_mode, sub_optab, temp,
2951 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2952 - GET_MODE_BITSIZE (mode)),
2953 target, true, OPTAB_DIRECT);
2957 if (class != MODE_INT)
2960 target = gen_reg_rtx (mode);
2961 convert_move (target, temp, 0);
2965 return gen_lowpart (mode, temp);
2968 delete_insns_since (last);
2973 /* One final attempt at implementing negation via subtraction,
2974 this time allowing widening of the operand. */
2975 if (unoptab->code == NEG && !HONOR_SIGNED_ZEROS (mode))
2978 temp = expand_binop (mode,
2979 unoptab == negv_optab ? subv_optab : sub_optab,
2980 CONST0_RTX (mode), op0,
2981 target, unsignedp, OPTAB_LIB_WIDEN);
2989 /* Emit code to compute the absolute value of OP0, with result to
2990 TARGET if convenient. (TARGET may be 0.) The return value says
2991 where the result actually is to be found.
2993 MODE is the mode of the operand; the mode of the result is
2994 different but can be deduced from MODE.
2999 expand_abs_nojump (enum machine_mode mode, rtx op0, rtx target,
3000 int result_unsignedp)
3005 result_unsignedp = 1;
3007 /* First try to do it with a special abs instruction. */
3008 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
3013 /* For floating point modes, try clearing the sign bit. */
3014 if (SCALAR_FLOAT_MODE_P (mode))
3016 temp = expand_absneg_bit (ABS, mode, op0, target);
3021 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3022 if (smax_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
3023 && !HONOR_SIGNED_ZEROS (mode))
3025 rtx last = get_last_insn ();
3027 temp = expand_unop (mode, neg_optab, op0, NULL_RTX, 0);
3029 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3035 delete_insns_since (last);
3038 /* If this machine has expensive jumps, we can do integer absolute
3039 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3040 where W is the width of MODE. */
3042 if (GET_MODE_CLASS (mode) == MODE_INT && BRANCH_COST >= 2)
3044 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
3045 size_int (GET_MODE_BITSIZE (mode) - 1),
3048 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
3051 temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab,
3052 temp, extended, target, 0, OPTAB_LIB_WIDEN);
3062 expand_abs (enum machine_mode mode, rtx op0, rtx target,
3063 int result_unsignedp, int safe)
3068 result_unsignedp = 1;
3070 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
3074 /* If that does not win, use conditional jump and negate. */
3076 /* It is safe to use the target if it is the same
3077 as the source if this is also a pseudo register */
3078 if (op0 == target && REG_P (op0)
3079 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
3082 op1 = gen_label_rtx ();
3083 if (target == 0 || ! safe
3084 || GET_MODE (target) != mode
3085 || (MEM_P (target) && MEM_VOLATILE_P (target))
3087 && REGNO (target) < FIRST_PSEUDO_REGISTER))
3088 target = gen_reg_rtx (mode);
3090 emit_move_insn (target, op0);
3093 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
3094 NULL_RTX, NULL_RTX, op1);
3096 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3099 emit_move_insn (target, op0);
3105 /* A subroutine of expand_copysign, perform the copysign operation using the
3106 abs and neg primitives advertised to exist on the target. The assumption
3107 is that we have a split register file, and leaving op0 in fp registers,
3108 and not playing with subregs so much, will help the register allocator. */
3111 expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3112 int bitpos, bool op0_is_abs)
3114 enum machine_mode imode;
3121 /* Check if the back end provides an insn that handles signbit for the
3123 icode = (int) signbit_optab->handlers [(int) mode].insn_code;
3124 if (icode != CODE_FOR_nothing)
3126 imode = insn_data[icode].operand[0].mode;
3127 sign = gen_reg_rtx (imode);
3128 emit_unop_insn (icode, sign, op1, UNKNOWN);
3132 HOST_WIDE_INT hi, lo;
3134 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3136 imode = int_mode_for_mode (mode);
3137 if (imode == BLKmode)
3139 op1 = gen_lowpart (imode, op1);
3146 if (FLOAT_WORDS_BIG_ENDIAN)
3147 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3149 word = bitpos / BITS_PER_WORD;
3150 bitpos = bitpos % BITS_PER_WORD;
3151 op1 = operand_subword_force (op1, word, mode);
3154 if (bitpos < HOST_BITS_PER_WIDE_INT)
3157 lo = (HOST_WIDE_INT) 1 << bitpos;
3161 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
3165 sign = gen_reg_rtx (imode);
3166 sign = expand_binop (imode, and_optab, op1,
3167 immed_double_const (lo, hi, imode),
3168 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3173 op0 = expand_unop (mode, abs_optab, op0, target, 0);
3180 if (target == NULL_RTX)
3181 target = copy_to_reg (op0);
3183 emit_move_insn (target, op0);
3186 label = gen_label_rtx ();
3187 emit_cmp_and_jump_insns (sign, const0_rtx, EQ, NULL_RTX, imode, 1, label);
3189 if (GET_CODE (op0) == CONST_DOUBLE)
3190 op0 = simplify_unary_operation (NEG, mode, op0, mode);
3192 op0 = expand_unop (mode, neg_optab, op0, target, 0);
3194 emit_move_insn (target, op0);
3202 /* A subroutine of expand_copysign, perform the entire copysign operation
3203 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3204 is true if op0 is known to have its sign bit clear. */
3207 expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3208 int bitpos, bool op0_is_abs)
3210 enum machine_mode imode;
3211 HOST_WIDE_INT hi, lo;
3212 int word, nwords, i;
3215 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3217 imode = int_mode_for_mode (mode);
3218 if (imode == BLKmode)
3227 if (FLOAT_WORDS_BIG_ENDIAN)
3228 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3230 word = bitpos / BITS_PER_WORD;
3231 bitpos = bitpos % BITS_PER_WORD;
3232 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
3235 if (bitpos < HOST_BITS_PER_WIDE_INT)
3238 lo = (HOST_WIDE_INT) 1 << bitpos;
3242 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
3246 if (target == 0 || target == op0 || target == op1)
3247 target = gen_reg_rtx (mode);
3253 for (i = 0; i < nwords; ++i)
3255 rtx targ_piece = operand_subword (target, i, 1, mode);
3256 rtx op0_piece = operand_subword_force (op0, i, mode);
3261 op0_piece = expand_binop (imode, and_optab, op0_piece,
3262 immed_double_const (~lo, ~hi, imode),
3263 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3265 op1 = expand_binop (imode, and_optab,
3266 operand_subword_force (op1, i, mode),
3267 immed_double_const (lo, hi, imode),
3268 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3270 temp = expand_binop (imode, ior_optab, op0_piece, op1,
3271 targ_piece, 1, OPTAB_LIB_WIDEN);
3272 if (temp != targ_piece)
3273 emit_move_insn (targ_piece, temp);
3276 emit_move_insn (targ_piece, op0_piece);
3279 insns = get_insns ();
3282 emit_no_conflict_block (insns, target, op0, op1, NULL_RTX);
3286 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
3287 immed_double_const (lo, hi, imode),
3288 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3290 op0 = gen_lowpart (imode, op0);
3292 op0 = expand_binop (imode, and_optab, op0,
3293 immed_double_const (~lo, ~hi, imode),
3294 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3296 temp = expand_binop (imode, ior_optab, op0, op1,
3297 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3298 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3304 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3305 scalar floating point mode. Return NULL if we do not know how to
3306 expand the operation inline. */
3309 expand_copysign (rtx op0, rtx op1, rtx target)
3311 enum machine_mode mode = GET_MODE (op0);
3312 const struct real_format *fmt;
3316 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3317 gcc_assert (GET_MODE (op1) == mode);
3319 /* First try to do it with a special instruction. */
3320 temp = expand_binop (mode, copysign_optab, op0, op1,
3321 target, 0, OPTAB_DIRECT);
3325 fmt = REAL_MODE_FORMAT (mode);
3326 if (fmt == NULL || !fmt->has_signed_zero)
3330 if (GET_CODE (op0) == CONST_DOUBLE)
3332 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
3333 op0 = simplify_unary_operation (ABS, mode, op0, mode);
3337 if (fmt->signbit_ro >= 0
3338 && (GET_CODE (op0) == CONST_DOUBLE
3339 || (neg_optab->handlers[mode].insn_code != CODE_FOR_nothing
3340 && abs_optab->handlers[mode].insn_code != CODE_FOR_nothing)))
3342 temp = expand_copysign_absneg (mode, op0, op1, target,
3343 fmt->signbit_ro, op0_is_abs);
3348 if (fmt->signbit_rw < 0)
3350 return expand_copysign_bit (mode, op0, op1, target,
3351 fmt->signbit_rw, op0_is_abs);
3354 /* Generate an instruction whose insn-code is INSN_CODE,
3355 with two operands: an output TARGET and an input OP0.
3356 TARGET *must* be nonzero, and the output is always stored there.
3357 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3358 the value that is stored into TARGET. */
3361 emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code)
3364 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
3369 /* Now, if insn does not accept our operands, put them into pseudos. */
3371 if (!insn_data[icode].operand[1].predicate (op0, mode0))
3372 op0 = copy_to_mode_reg (mode0, op0);
3374 if (!insn_data[icode].operand[0].predicate (temp, GET_MODE (temp)))
3375 temp = gen_reg_rtx (GET_MODE (temp));
3377 pat = GEN_FCN (icode) (temp, op0);
3379 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && code != UNKNOWN)
3380 add_equal_note (pat, temp, code, op0, NULL_RTX);
3385 emit_move_insn (target, temp);
3388 struct no_conflict_data
3390 rtx target, first, insn;
3394 /* Called via note_stores by emit_no_conflict_block and emit_libcall_block.
3395 Set P->must_stay if the currently examined clobber / store has to stay
3396 in the list of insns that constitute the actual no_conflict block /
3399 no_conflict_move_test (rtx dest, rtx set, void *p0)
3401 struct no_conflict_data *p= p0;
3403 /* If this inns directly contributes to setting the target, it must stay. */
3404 if (reg_overlap_mentioned_p (p->target, dest))
3405 p->must_stay = true;
3406 /* If we haven't committed to keeping any other insns in the list yet,
3407 there is nothing more to check. */
3408 else if (p->insn == p->first)
3410 /* If this insn sets / clobbers a register that feeds one of the insns
3411 already in the list, this insn has to stay too. */
3412 else if (reg_overlap_mentioned_p (dest, PATTERN (p->first))
3413 || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest)))
3414 || reg_used_between_p (dest, p->first, p->insn)
3415 /* Likewise if this insn depends on a register set by a previous
3416 insn in the list, or if it sets a result (presumably a hard
3417 register) that is set or clobbered by a previous insn.
3418 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3419 SET_DEST perform the former check on the address, and the latter
3420 check on the MEM. */
3421 || (GET_CODE (set) == SET
3422 && (modified_in_p (SET_SRC (set), p->first)
3423 || modified_in_p (SET_DEST (set), p->first)
3424 || modified_between_p (SET_SRC (set), p->first, p->insn)
3425 || modified_between_p (SET_DEST (set), p->first, p->insn))))
3426 p->must_stay = true;
3429 /* Encapsulate the block starting at FIRST and ending with LAST, which is
3430 logically equivalent to EQUIV, so it gets manipulated as a unit if it
3431 is possible to do so. */
3434 maybe_encapsulate_block (rtx first, rtx last, rtx equiv)
3436 if (!flag_non_call_exceptions || !may_trap_p (equiv))
3438 /* We can't attach the REG_LIBCALL and REG_RETVAL notes when the
3439 encapsulated region would not be in one basic block, i.e. when
3440 there is a control_flow_insn_p insn between FIRST and LAST. */
3441 bool attach_libcall_retval_notes = true;
3442 rtx insn, next = NEXT_INSN (last);
3444 for (insn = first; insn != next; insn = NEXT_INSN (insn))
3445 if (control_flow_insn_p (insn))
3447 attach_libcall_retval_notes = false;
3451 if (attach_libcall_retval_notes)
3453 REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
3455 REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first,
3457 next = NEXT_INSN (last);
3458 for (insn = first; insn != next; insn = NEXT_INSN (insn))
3459 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_LIBCALL_ID,
3460 GEN_INT (libcall_id),
3467 /* Emit code to perform a series of operations on a multi-word quantity, one
3470 Such a block is preceded by a CLOBBER of the output, consists of multiple
3471 insns, each setting one word of the output, and followed by a SET copying
3472 the output to itself.
3474 Each of the insns setting words of the output receives a REG_NO_CONFLICT
3475 note indicating that it doesn't conflict with the (also multi-word)
3476 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
3479 INSNS is a block of code generated to perform the operation, not including
3480 the CLOBBER and final copy. All insns that compute intermediate values
3481 are first emitted, followed by the block as described above.
3483 TARGET, OP0, and OP1 are the output and inputs of the operations,
3484 respectively. OP1 may be zero for a unary operation.
3486 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
3489 If TARGET is not a register, INSNS is simply emitted with no special
3490 processing. Likewise if anything in INSNS is not an INSN or if
3491 there is a libcall block inside INSNS.
3493 The final insn emitted is returned. */
3496 emit_no_conflict_block (rtx insns, rtx target, rtx op0, rtx op1, rtx equiv)
3498 rtx prev, next, first, last, insn;
3500 if (!REG_P (target) || reload_in_progress)
3501 return emit_insn (insns);
3503 for (insn = insns; insn; insn = NEXT_INSN (insn))
3504 if (!NONJUMP_INSN_P (insn)
3505 || find_reg_note (insn, REG_LIBCALL, NULL_RTX))
3506 return emit_insn (insns);
3508 /* First emit all insns that do not store into words of the output and remove
3509 these from the list. */
3510 for (insn = insns; insn; insn = next)
3513 struct no_conflict_data data;
3515 next = NEXT_INSN (insn);
3517 /* Some ports (cris) create a libcall regions at their own. We must
3518 avoid any potential nesting of LIBCALLs. */
3519 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3520 remove_note (insn, note);
3521 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3522 remove_note (insn, note);
3523 if ((note = find_reg_note (insn, REG_LIBCALL_ID, NULL)) != NULL)
3524 remove_note (insn, note);
3526 data.target = target;
3530 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3531 if (! data.must_stay)
3533 if (PREV_INSN (insn))
3534 NEXT_INSN (PREV_INSN (insn)) = next;
3539 PREV_INSN (next) = PREV_INSN (insn);
3545 prev = get_last_insn ();
3547 /* Now write the CLOBBER of the output, followed by the setting of each
3548 of the words, followed by the final copy. */
3549 if (target != op0 && target != op1)
3550 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
3552 for (insn = insns; insn; insn = next)
3554 next = NEXT_INSN (insn);
3557 if (op1 && REG_P (op1))
3558 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op1,
3561 if (op0 && REG_P (op0))
3562 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op0,
3566 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3567 != CODE_FOR_nothing)
3569 last = emit_move_insn (target, target);
3571 set_unique_reg_note (last, REG_EQUAL, equiv);
3575 last = get_last_insn ();
3577 /* Remove any existing REG_EQUAL note from "last", or else it will
3578 be mistaken for a note referring to the full contents of the
3579 alleged libcall value when found together with the REG_RETVAL
3580 note added below. An existing note can come from an insn
3581 expansion at "last". */
3582 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3586 first = get_insns ();
3588 first = NEXT_INSN (prev);
3590 maybe_encapsulate_block (first, last, equiv);
3595 /* Emit code to make a call to a constant function or a library call.
3597 INSNS is a list containing all insns emitted in the call.
3598 These insns leave the result in RESULT. Our block is to copy RESULT
3599 to TARGET, which is logically equivalent to EQUIV.
3601 We first emit any insns that set a pseudo on the assumption that these are
3602 loading constants into registers; doing so allows them to be safely cse'ed
3603 between blocks. Then we emit all the other insns in the block, followed by
3604 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3605 note with an operand of EQUIV.
3607 Moving assignments to pseudos outside of the block is done to improve
3608 the generated code, but is not required to generate correct code,
3609 hence being unable to move an assignment is not grounds for not making
3610 a libcall block. There are two reasons why it is safe to leave these
3611 insns inside the block: First, we know that these pseudos cannot be
3612 used in generated RTL outside the block since they are created for
3613 temporary purposes within the block. Second, CSE will not record the
3614 values of anything set inside a libcall block, so we know they must
3615 be dead at the end of the block.
3617 Except for the first group of insns (the ones setting pseudos), the
3618 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3620 emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv)
3622 rtx final_dest = target;
3623 rtx prev, next, first, last, insn;
3625 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3626 into a MEM later. Protect the libcall block from this change. */
3627 if (! REG_P (target) || REG_USERVAR_P (target))
3628 target = gen_reg_rtx (GET_MODE (target));
3630 /* If we're using non-call exceptions, a libcall corresponding to an
3631 operation that may trap may also trap. */
3632 if (flag_non_call_exceptions && may_trap_p (equiv))
3634 for (insn = insns; insn; insn = NEXT_INSN (insn))
3637 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3639 if (note != 0 && INTVAL (XEXP (note, 0)) <= 0)
3640 remove_note (insn, note);
3644 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3645 reg note to indicate that this call cannot throw or execute a nonlocal
3646 goto (unless there is already a REG_EH_REGION note, in which case
3648 for (insn = insns; insn; insn = NEXT_INSN (insn))
3651 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3654 XEXP (note, 0) = constm1_rtx;
3656 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx,
3660 /* First emit all insns that set pseudos. Remove them from the list as
3661 we go. Avoid insns that set pseudos which were referenced in previous
3662 insns. These can be generated by move_by_pieces, for example,
3663 to update an address. Similarly, avoid insns that reference things
3664 set in previous insns. */
3666 for (insn = insns; insn; insn = next)
3668 rtx set = single_set (insn);
3671 /* Some ports (cris) create a libcall regions at their own. We must
3672 avoid any potential nesting of LIBCALLs. */
3673 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3674 remove_note (insn, note);
3675 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3676 remove_note (insn, note);
3677 if ((note = find_reg_note (insn, REG_LIBCALL_ID, NULL)) != NULL)
3678 remove_note (insn, note);
3680 next = NEXT_INSN (insn);
3682 if (set != 0 && REG_P (SET_DEST (set))
3683 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3685 struct no_conflict_data data;
3687 data.target = const0_rtx;
3691 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3692 if (! data.must_stay)
3694 if (PREV_INSN (insn))
3695 NEXT_INSN (PREV_INSN (insn)) = next;
3700 PREV_INSN (next) = PREV_INSN (insn);
3706 /* Some ports use a loop to copy large arguments onto the stack.
3707 Don't move anything outside such a loop. */
3712 prev = get_last_insn ();
3714 /* Write the remaining insns followed by the final copy. */
3716 for (insn = insns; insn; insn = next)
3718 next = NEXT_INSN (insn);
3723 last = emit_move_insn (target, result);
3724 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3725 != CODE_FOR_nothing)
3726 set_unique_reg_note (last, REG_EQUAL, copy_rtx (equiv));
3729 /* Remove any existing REG_EQUAL note from "last", or else it will
3730 be mistaken for a note referring to the full contents of the
3731 libcall value when found together with the REG_RETVAL note added
3732 below. An existing note can come from an insn expansion at
3734 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3737 if (final_dest != target)
3738 emit_move_insn (final_dest, target);
3741 first = get_insns ();
3743 first = NEXT_INSN (prev);
3745 maybe_encapsulate_block (first, last, equiv);
3748 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3749 PURPOSE describes how this comparison will be used. CODE is the rtx
3750 comparison code we will be using.
3752 ??? Actually, CODE is slightly weaker than that. A target is still
3753 required to implement all of the normal bcc operations, but not
3754 required to implement all (or any) of the unordered bcc operations. */
3757 can_compare_p (enum rtx_code code, enum machine_mode mode,
3758 enum can_compare_purpose purpose)
3762 if (cmp_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3764 if (purpose == ccp_jump)
3765 return bcc_gen_fctn[(int) code] != NULL;
3766 else if (purpose == ccp_store_flag)
3767 return setcc_gen_code[(int) code] != CODE_FOR_nothing;
3769 /* There's only one cmov entry point, and it's allowed to fail. */
3772 if (purpose == ccp_jump
3773 && cbranch_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3775 if (purpose == ccp_cmov
3776 && cmov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3778 if (purpose == ccp_store_flag
3779 && cstore_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3781 mode = GET_MODE_WIDER_MODE (mode);
3783 while (mode != VOIDmode);
3788 /* This function is called when we are going to emit a compare instruction that
3789 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3791 *PMODE is the mode of the inputs (in case they are const_int).
3792 *PUNSIGNEDP nonzero says that the operands are unsigned;
3793 this matters if they need to be widened.
3795 If they have mode BLKmode, then SIZE specifies the size of both operands.
3797 This function performs all the setup necessary so that the caller only has
3798 to emit a single comparison insn. This setup can involve doing a BLKmode
3799 comparison or emitting a library call to perform the comparison if no insn
3800 is available to handle it.
3801 The values which are passed in through pointers can be modified; the caller
3802 should perform the comparison on the modified values. Constant
3803 comparisons must have already been folded. */
3806 prepare_cmp_insn (rtx *px, rtx *py, enum rtx_code *pcomparison, rtx size,
3807 enum machine_mode *pmode, int *punsignedp,
3808 enum can_compare_purpose purpose)
3810 enum machine_mode mode = *pmode;
3811 rtx x = *px, y = *py;
3812 int unsignedp = *punsignedp;
3814 /* If we are inside an appropriately-short loop and we are optimizing,
3815 force expensive constants into a register. */
3816 if (CONSTANT_P (x) && optimize
3817 && rtx_cost (x, COMPARE) > COSTS_N_INSNS (1))
3818 x = force_reg (mode, x);
3820 if (CONSTANT_P (y) && optimize
3821 && rtx_cost (y, COMPARE) > COSTS_N_INSNS (1))
3822 y = force_reg (mode, y);
3825 /* Make sure if we have a canonical comparison. The RTL
3826 documentation states that canonical comparisons are required only
3827 for targets which have cc0. */
3828 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
3831 /* Don't let both operands fail to indicate the mode. */
3832 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
3833 x = force_reg (mode, x);
3835 /* Handle all BLKmode compares. */
3837 if (mode == BLKmode)
3839 enum machine_mode cmp_mode, result_mode;
3840 enum insn_code cmp_code;
3845 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
3849 /* Try to use a memory block compare insn - either cmpstr
3850 or cmpmem will do. */
3851 for (cmp_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
3852 cmp_mode != VOIDmode;
3853 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode))
3855 cmp_code = cmpmem_optab[cmp_mode];
3856 if (cmp_code == CODE_FOR_nothing)
3857 cmp_code = cmpstr_optab[cmp_mode];
3858 if (cmp_code == CODE_FOR_nothing)
3859 cmp_code = cmpstrn_optab[cmp_mode];
3860 if (cmp_code == CODE_FOR_nothing)
3863 /* Must make sure the size fits the insn's mode. */
3864 if ((GET_CODE (size) == CONST_INT
3865 && INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode)))
3866 || (GET_MODE_BITSIZE (GET_MODE (size))
3867 > GET_MODE_BITSIZE (cmp_mode)))
3870 result_mode = insn_data[cmp_code].operand[0].mode;
3871 result = gen_reg_rtx (result_mode);
3872 size = convert_to_mode (cmp_mode, size, 1);
3873 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
3877 *pmode = result_mode;
3881 /* Otherwise call a library function, memcmp. */
3882 libfunc = memcmp_libfunc;
3883 length_type = sizetype;
3884 result_mode = TYPE_MODE (integer_type_node);
3885 cmp_mode = TYPE_MODE (length_type);
3886 size = convert_to_mode (TYPE_MODE (length_type), size,
3887 TYPE_UNSIGNED (length_type));
3889 result = emit_library_call_value (libfunc, 0, LCT_PURE_MAKE_BLOCK,
3896 *pmode = result_mode;
3900 /* Don't allow operands to the compare to trap, as that can put the
3901 compare and branch in different basic blocks. */
3902 if (flag_non_call_exceptions)
3905 x = force_reg (mode, x);
3907 y = force_reg (mode, y);
3912 if (can_compare_p (*pcomparison, mode, purpose))
3915 /* Handle a lib call just for the mode we are using. */
3917 if (cmp_optab->handlers[(int) mode].libfunc && !SCALAR_FLOAT_MODE_P (mode))
3919 rtx libfunc = cmp_optab->handlers[(int) mode].libfunc;
3922 /* If we want unsigned, and this mode has a distinct unsigned
3923 comparison routine, use that. */
3924 if (unsignedp && ucmp_optab->handlers[(int) mode].libfunc)
3925 libfunc = ucmp_optab->handlers[(int) mode].libfunc;
3927 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST_MAKE_BLOCK,
3928 targetm.libgcc_cmp_return_mode (),
3929 2, x, mode, y, mode);
3931 /* There are two kinds of comparison routines. Biased routines
3932 return 0/1/2, and unbiased routines return -1/0/1. Other parts
3933 of gcc expect that the comparison operation is equivalent
3934 to the modified comparison. For signed comparisons compare the
3935 result against 1 in the biased case, and zero in the unbiased
3936 case. For unsigned comparisons always compare against 1 after
3937 biasing the unbiased result by adding 1. This gives us a way to
3943 if (!TARGET_LIB_INT_CMP_BIASED)
3946 *px = plus_constant (result, 1);
3953 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3954 prepare_float_lib_cmp (px, py, pcomparison, pmode, punsignedp);
3957 /* Before emitting an insn with code ICODE, make sure that X, which is going
3958 to be used for operand OPNUM of the insn, is converted from mode MODE to
3959 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3960 that it is accepted by the operand predicate. Return the new value. */
3963 prepare_operand (int icode, rtx x, int opnum, enum machine_mode mode,
3964 enum machine_mode wider_mode, int unsignedp)
3966 if (mode != wider_mode)
3967 x = convert_modes (wider_mode, mode, x, unsignedp);
3969 if (!insn_data[icode].operand[opnum].predicate
3970 (x, insn_data[icode].operand[opnum].mode))
3972 if (reload_completed)
3974 x = copy_to_mode_reg (insn_data[icode].operand[opnum].mode, x);
3980 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3981 we can do the comparison.
3982 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3983 be NULL_RTX which indicates that only a comparison is to be generated. */
3986 emit_cmp_and_jump_insn_1 (rtx x, rtx y, enum machine_mode mode,
3987 enum rtx_code comparison, int unsignedp, rtx label)
3989 rtx test = gen_rtx_fmt_ee (comparison, mode, x, y);
3990 enum mode_class class = GET_MODE_CLASS (mode);
3991 enum machine_mode wider_mode = mode;
3993 /* Try combined insns first. */
3996 enum insn_code icode;
3997 PUT_MODE (test, wider_mode);
4001 icode = cbranch_optab->handlers[(int) wider_mode].insn_code;
4003 if (icode != CODE_FOR_nothing
4004 && insn_data[icode].operand[0].predicate (test, wider_mode))
4006 x = prepare_operand (icode, x, 1, mode, wider_mode, unsignedp);
4007 y = prepare_operand (icode, y, 2, mode, wider_mode, unsignedp);
4008 emit_jump_insn (GEN_FCN (icode) (test, x, y, label));
4013 /* Handle some compares against zero. */
4014 icode = (int) tst_optab->handlers[(int) wider_mode].insn_code;
4015 if (y == CONST0_RTX (mode) && icode != CODE_FOR_nothing)
4017 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
4018 emit_insn (GEN_FCN (icode) (x));
4020 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
4024 /* Handle compares for which there is a directly suitable insn. */
4026 icode = (int) cmp_optab->handlers[(int) wider_mode].insn_code;
4027 if (icode != CODE_FOR_nothing)
4029 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
4030 y = prepare_operand (icode, y, 1, mode, wider_mode, unsignedp);
4031 emit_insn (GEN_FCN (icode) (x, y));
4033 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
4037 if (!CLASS_HAS_WIDER_MODES_P (class))
4040 wider_mode = GET_MODE_WIDER_MODE (wider_mode);
4042 while (wider_mode != VOIDmode);
4047 /* Generate code to compare X with Y so that the condition codes are
4048 set and to jump to LABEL if the condition is true. If X is a
4049 constant and Y is not a constant, then the comparison is swapped to
4050 ensure that the comparison RTL has the canonical form.
4052 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
4053 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
4054 the proper branch condition code.
4056 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4058 MODE is the mode of the inputs (in case they are const_int).
4060 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
4061 be passed unchanged to emit_cmp_insn, then potentially converted into an
4062 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
4065 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
4066 enum machine_mode mode, int unsignedp, rtx label)
4068 rtx op0 = x, op1 = y;
4070 /* Swap operands and condition to ensure canonical RTL. */
4071 if (swap_commutative_operands_p (x, y))
4073 /* If we're not emitting a branch, this means some caller
4078 comparison = swap_condition (comparison);
4082 /* If OP0 is still a constant, then both X and Y must be constants.
4083 Force X into a register to create canonical RTL. */
4084 if (CONSTANT_P (op0))
4085 op0 = force_reg (mode, op0);
4089 comparison = unsigned_condition (comparison);
4091 prepare_cmp_insn (&op0, &op1, &comparison, size, &mode, &unsignedp,
4093 emit_cmp_and_jump_insn_1 (op0, op1, mode, comparison, unsignedp, label);
4096 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
4099 emit_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
4100 enum machine_mode mode, int unsignedp)
4102 emit_cmp_and_jump_insns (x, y, comparison, size, mode, unsignedp, 0);
4105 /* Emit a library call comparison between floating point X and Y.
4106 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4109 prepare_float_lib_cmp (rtx *px, rtx *py, enum rtx_code *pcomparison,
4110 enum machine_mode *pmode, int *punsignedp)
4112 enum rtx_code comparison = *pcomparison;
4113 enum rtx_code swapped = swap_condition (comparison);
4114 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
4117 enum machine_mode orig_mode = GET_MODE (x);
4118 enum machine_mode mode;
4119 rtx value, target, insns, equiv;
4121 bool reversed_p = false;
4123 for (mode = orig_mode;
4125 mode = GET_MODE_WIDER_MODE (mode))
4127 if ((libfunc = code_to_optab[comparison]->handlers[mode].libfunc))
4130 if ((libfunc = code_to_optab[swapped]->handlers[mode].libfunc))
4133 tmp = x; x = y; y = tmp;
4134 comparison = swapped;
4138 if ((libfunc = code_to_optab[reversed]->handlers[mode].libfunc)
4139 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, reversed))
4141 comparison = reversed;
4147 gcc_assert (mode != VOIDmode);
4149 if (mode != orig_mode)
4151 x = convert_to_mode (mode, x, 0);
4152 y = convert_to_mode (mode, y, 0);
4155 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4156 the RTL. The allows the RTL optimizers to delete the libcall if the
4157 condition can be determined at compile-time. */
4158 if (comparison == UNORDERED)
4160 rtx temp = simplify_gen_relational (NE, word_mode, mode, x, x);
4161 equiv = simplify_gen_relational (NE, word_mode, mode, y, y);
4162 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
4163 temp, const_true_rtx, equiv);
4167 equiv = simplify_gen_relational (comparison, word_mode, mode, x, y);
4168 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4170 rtx true_rtx, false_rtx;
4175 true_rtx = const0_rtx;
4176 false_rtx = const_true_rtx;
4180 true_rtx = const_true_rtx;
4181 false_rtx = const0_rtx;
4185 true_rtx = const1_rtx;
4186 false_rtx = const0_rtx;
4190 true_rtx = const0_rtx;
4191 false_rtx = constm1_rtx;
4195 true_rtx = constm1_rtx;
4196 false_rtx = const0_rtx;
4200 true_rtx = const0_rtx;
4201 false_rtx = const1_rtx;
4207 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
4208 equiv, true_rtx, false_rtx);
4213 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4214 word_mode, 2, x, mode, y, mode);
4215 insns = get_insns ();
4218 target = gen_reg_rtx (word_mode);
4219 emit_libcall_block (insns, target, value, equiv);
4221 if (comparison == UNORDERED
4222 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4223 comparison = reversed_p ? EQ : NE;
4228 *pcomparison = comparison;
4232 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4235 emit_indirect_jump (rtx loc)
4237 if (!insn_data[(int) CODE_FOR_indirect_jump].operand[0].predicate
4239 loc = copy_to_mode_reg (Pmode, loc);
4241 emit_jump_insn (gen_indirect_jump (loc));
4245 #ifdef HAVE_conditional_move
4247 /* Emit a conditional move instruction if the machine supports one for that
4248 condition and machine mode.
4250 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4251 the mode to use should they be constants. If it is VOIDmode, they cannot
4254 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4255 should be stored there. MODE is the mode to use should they be constants.
4256 If it is VOIDmode, they cannot both be constants.
4258 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4259 is not supported. */
4262 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
4263 enum machine_mode cmode, rtx op2, rtx op3,
4264 enum machine_mode mode, int unsignedp)
4266 rtx tem, subtarget, comparison, insn;
4267 enum insn_code icode;
4268 enum rtx_code reversed;
4270 /* If one operand is constant, make it the second one. Only do this
4271 if the other operand is not constant as well. */
4273 if (swap_commutative_operands_p (op0, op1))
4278 code = swap_condition (code);
4281 /* get_condition will prefer to generate LT and GT even if the old
4282 comparison was against zero, so undo that canonicalization here since
4283 comparisons against zero are cheaper. */
4284 if (code == LT && op1 == const1_rtx)
4285 code = LE, op1 = const0_rtx;
4286 else if (code == GT && op1 == constm1_rtx)
4287 code = GE, op1 = const0_rtx;
4289 if (cmode == VOIDmode)
4290 cmode = GET_MODE (op0);
4292 if (swap_commutative_operands_p (op2, op3)
4293 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4302 if (mode == VOIDmode)
4303 mode = GET_MODE (op2);
4305 icode = movcc_gen_code[mode];
4307 if (icode == CODE_FOR_nothing)
4311 target = gen_reg_rtx (mode);
4315 /* If the insn doesn't accept these operands, put them in pseudos. */
4317 if (!insn_data[icode].operand[0].predicate
4318 (subtarget, insn_data[icode].operand[0].mode))
4319 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4321 if (!insn_data[icode].operand[2].predicate
4322 (op2, insn_data[icode].operand[2].mode))
4323 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4325 if (!insn_data[icode].operand[3].predicate
4326 (op3, insn_data[icode].operand[3].mode))
4327 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4329 /* Everything should now be in the suitable form, so emit the compare insn
4330 and then the conditional move. */
4333 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4335 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4336 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4337 return NULL and let the caller figure out how best to deal with this
4339 if (GET_CODE (comparison) != code)
4342 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4344 /* If that failed, then give up. */
4350 if (subtarget != target)
4351 convert_move (target, subtarget, 0);
4356 /* Return nonzero if a conditional move of mode MODE is supported.
4358 This function is for combine so it can tell whether an insn that looks
4359 like a conditional move is actually supported by the hardware. If we
4360 guess wrong we lose a bit on optimization, but that's it. */
4361 /* ??? sparc64 supports conditionally moving integers values based on fp
4362 comparisons, and vice versa. How do we handle them? */
4365 can_conditionally_move_p (enum machine_mode mode)
4367 if (movcc_gen_code[mode] != CODE_FOR_nothing)
4373 #endif /* HAVE_conditional_move */
4375 /* Emit a conditional addition instruction if the machine supports one for that
4376 condition and machine mode.
4378 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4379 the mode to use should they be constants. If it is VOIDmode, they cannot
4382 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
4383 should be stored there. MODE is the mode to use should they be constants.
4384 If it is VOIDmode, they cannot both be constants.
4386 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4387 is not supported. */
4390 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
4391 enum machine_mode cmode, rtx op2, rtx op3,
4392 enum machine_mode mode, int unsignedp)
4394 rtx tem, subtarget, comparison, insn;
4395 enum insn_code icode;
4396 enum rtx_code reversed;
4398 /* If one operand is constant, make it the second one. Only do this
4399 if the other operand is not constant as well. */
4401 if (swap_commutative_operands_p (op0, op1))
4406 code = swap_condition (code);
4409 /* get_condition will prefer to generate LT and GT even if the old
4410 comparison was against zero, so undo that canonicalization here since
4411 comparisons against zero are cheaper. */
4412 if (code == LT && op1 == const1_rtx)
4413 code = LE, op1 = const0_rtx;
4414 else if (code == GT && op1 == constm1_rtx)
4415 code = GE, op1 = const0_rtx;
4417 if (cmode == VOIDmode)
4418 cmode = GET_MODE (op0);
4420 if (swap_commutative_operands_p (op2, op3)
4421 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4430 if (mode == VOIDmode)
4431 mode = GET_MODE (op2);
4433 icode = addcc_optab->handlers[(int) mode].insn_code;
4435 if (icode == CODE_FOR_nothing)
4439 target = gen_reg_rtx (mode);
4441 /* If the insn doesn't accept these operands, put them in pseudos. */
4443 if (!insn_data[icode].operand[0].predicate
4444 (target, insn_data[icode].operand[0].mode))
4445 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4449 if (!insn_data[icode].operand[2].predicate
4450 (op2, insn_data[icode].operand[2].mode))
4451 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4453 if (!insn_data[icode].operand[3].predicate
4454 (op3, insn_data[icode].operand[3].mode))
4455 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4457 /* Everything should now be in the suitable form, so emit the compare insn
4458 and then the conditional move. */
4461 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4463 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4464 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4465 return NULL and let the caller figure out how best to deal with this
4467 if (GET_CODE (comparison) != code)
4470 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4472 /* If that failed, then give up. */
4478 if (subtarget != target)
4479 convert_move (target, subtarget, 0);
4484 /* These functions attempt to generate an insn body, rather than
4485 emitting the insn, but if the gen function already emits them, we
4486 make no attempt to turn them back into naked patterns. */
4488 /* Generate and return an insn body to add Y to X. */
4491 gen_add2_insn (rtx x, rtx y)
4493 int icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4495 gcc_assert (insn_data[icode].operand[0].predicate
4496 (x, insn_data[icode].operand[0].mode));
4497 gcc_assert (insn_data[icode].operand[1].predicate
4498 (x, insn_data[icode].operand[1].mode));
4499 gcc_assert (insn_data[icode].operand[2].predicate
4500 (y, insn_data[icode].operand[2].mode));
4502 return GEN_FCN (icode) (x, x, y);
4505 /* Generate and return an insn body to add r1 and c,
4506 storing the result in r0. */
4508 gen_add3_insn (rtx r0, rtx r1, rtx c)
4510 int icode = (int) add_optab->handlers[(int) GET_MODE (r0)].insn_code;
4512 if (icode == CODE_FOR_nothing
4513 || !(insn_data[icode].operand[0].predicate
4514 (r0, insn_data[icode].operand[0].mode))
4515 || !(insn_data[icode].operand[1].predicate
4516 (r1, insn_data[icode].operand[1].mode))
4517 || !(insn_data[icode].operand[2].predicate
4518 (c, insn_data[icode].operand[2].mode)))
4521 return GEN_FCN (icode) (r0, r1, c);
4525 have_add2_insn (rtx x, rtx y)
4529 gcc_assert (GET_MODE (x) != VOIDmode);
4531 icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4533 if (icode == CODE_FOR_nothing)
4536 if (!(insn_data[icode].operand[0].predicate
4537 (x, insn_data[icode].operand[0].mode))
4538 || !(insn_data[icode].operand[1].predicate
4539 (x, insn_data[icode].operand[1].mode))
4540 || !(insn_data[icode].operand[2].predicate
4541 (y, insn_data[icode].operand[2].mode)))
4547 /* Generate and return an insn body to subtract Y from X. */
4550 gen_sub2_insn (rtx x, rtx y)
4552 int icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4554 gcc_assert (insn_data[icode].operand[0].predicate
4555 (x, insn_data[icode].operand[0].mode));
4556 gcc_assert (insn_data[icode].operand[1].predicate
4557 (x, insn_data[icode].operand[1].mode));
4558 gcc_assert (insn_data[icode].operand[2].predicate
4559 (y, insn_data[icode].operand[2].mode));
4561 return GEN_FCN (icode) (x, x, y);
4564 /* Generate and return an insn body to subtract r1 and c,
4565 storing the result in r0. */
4567 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4569 int icode = (int) sub_optab->handlers[(int) GET_MODE (r0)].insn_code;
4571 if (icode == CODE_FOR_nothing
4572 || !(insn_data[icode].operand[0].predicate
4573 (r0, insn_data[icode].operand[0].mode))
4574 || !(insn_data[icode].operand[1].predicate
4575 (r1, insn_data[icode].operand[1].mode))
4576 || !(insn_data[icode].operand[2].predicate
4577 (c, insn_data[icode].operand[2].mode)))
4580 return GEN_FCN (icode) (r0, r1, c);
4584 have_sub2_insn (rtx x, rtx y)
4588 gcc_assert (GET_MODE (x) != VOIDmode);
4590 icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4592 if (icode == CODE_FOR_nothing)
4595 if (!(insn_data[icode].operand[0].predicate
4596 (x, insn_data[icode].operand[0].mode))
4597 || !(insn_data[icode].operand[1].predicate
4598 (x, insn_data[icode].operand[1].mode))
4599 || !(insn_data[icode].operand[2].predicate
4600 (y, insn_data[icode].operand[2].mode)))
4606 /* Generate the body of an instruction to copy Y into X.
4607 It may be a list of insns, if one insn isn't enough. */
4610 gen_move_insn (rtx x, rtx y)
4615 emit_move_insn_1 (x, y);
4621 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4622 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4623 no such operation exists, CODE_FOR_nothing will be returned. */
4626 can_extend_p (enum machine_mode to_mode, enum machine_mode from_mode,
4630 #ifdef HAVE_ptr_extend
4632 return CODE_FOR_ptr_extend;
4635 tab = unsignedp ? zext_optab : sext_optab;
4636 return tab->handlers[to_mode][from_mode].insn_code;
4639 /* Generate the body of an insn to extend Y (with mode MFROM)
4640 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4643 gen_extend_insn (rtx x, rtx y, enum machine_mode mto,
4644 enum machine_mode mfrom, int unsignedp)
4646 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
4647 return GEN_FCN (icode) (x, y);
4650 /* can_fix_p and can_float_p say whether the target machine
4651 can directly convert a given fixed point type to
4652 a given floating point type, or vice versa.
4653 The returned value is the CODE_FOR_... value to use,
4654 or CODE_FOR_nothing if these modes cannot be directly converted.
4656 *TRUNCP_PTR is set to 1 if it is necessary to output
4657 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4659 static enum insn_code
4660 can_fix_p (enum machine_mode fixmode, enum machine_mode fltmode,
4661 int unsignedp, int *truncp_ptr)
4664 enum insn_code icode;
4666 tab = unsignedp ? ufixtrunc_optab : sfixtrunc_optab;
4667 icode = tab->handlers[fixmode][fltmode].insn_code;
4668 if (icode != CODE_FOR_nothing)
4674 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4675 for this to work. We need to rework the fix* and ftrunc* patterns
4676 and documentation. */
4677 tab = unsignedp ? ufix_optab : sfix_optab;
4678 icode = tab->handlers[fixmode][fltmode].insn_code;
4679 if (icode != CODE_FOR_nothing
4680 && ftrunc_optab->handlers[fltmode].insn_code != CODE_FOR_nothing)
4687 return CODE_FOR_nothing;
4690 static enum insn_code
4691 can_float_p (enum machine_mode fltmode, enum machine_mode fixmode,
4696 tab = unsignedp ? ufloat_optab : sfloat_optab;
4697 return tab->handlers[fltmode][fixmode].insn_code;
4700 /* Generate code to convert FROM to floating point
4701 and store in TO. FROM must be fixed point and not VOIDmode.
4702 UNSIGNEDP nonzero means regard FROM as unsigned.
4703 Normally this is done by correcting the final value
4704 if it is negative. */
4707 expand_float (rtx to, rtx from, int unsignedp)
4709 enum insn_code icode;
4711 enum machine_mode fmode, imode;
4712 bool can_do_signed = false;
4714 /* Crash now, because we won't be able to decide which mode to use. */
4715 gcc_assert (GET_MODE (from) != VOIDmode);
4717 /* Look for an insn to do the conversion. Do it in the specified
4718 modes if possible; otherwise convert either input, output or both to
4719 wider mode. If the integer mode is wider than the mode of FROM,
4720 we can do the conversion signed even if the input is unsigned. */
4722 for (fmode = GET_MODE (to); fmode != VOIDmode;
4723 fmode = GET_MODE_WIDER_MODE (fmode))
4724 for (imode = GET_MODE (from); imode != VOIDmode;
4725 imode = GET_MODE_WIDER_MODE (imode))
4727 int doing_unsigned = unsignedp;
4729 if (fmode != GET_MODE (to)
4730 && significand_size (fmode) < GET_MODE_BITSIZE (GET_MODE (from)))
4733 icode = can_float_p (fmode, imode, unsignedp);
4734 if (icode == CODE_FOR_nothing && unsignedp)
4736 enum insn_code scode = can_float_p (fmode, imode, 0);
4737 if (scode != CODE_FOR_nothing)
4738 can_do_signed = true;
4739 if (imode != GET_MODE (from))
4740 icode = scode, doing_unsigned = 0;
4743 if (icode != CODE_FOR_nothing)
4745 if (imode != GET_MODE (from))
4746 from = convert_to_mode (imode, from, unsignedp);
4748 if (fmode != GET_MODE (to))
4749 target = gen_reg_rtx (fmode);
4751 emit_unop_insn (icode, target, from,
4752 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
4755 convert_move (to, target, 0);
4760 /* Unsigned integer, and no way to convert directly. For binary
4761 floating point modes, convert as signed, then conditionally adjust
4763 if (unsignedp && can_do_signed && !DECIMAL_FLOAT_MODE_P (GET_MODE (to)))
4765 rtx label = gen_label_rtx ();
4767 REAL_VALUE_TYPE offset;
4769 /* Look for a usable floating mode FMODE wider than the source and at
4770 least as wide as the target. Using FMODE will avoid rounding woes
4771 with unsigned values greater than the signed maximum value. */
4773 for (fmode = GET_MODE (to); fmode != VOIDmode;
4774 fmode = GET_MODE_WIDER_MODE (fmode))
4775 if (GET_MODE_BITSIZE (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
4776 && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
4779 if (fmode == VOIDmode)
4781 /* There is no such mode. Pretend the target is wide enough. */
4782 fmode = GET_MODE (to);
4784 /* Avoid double-rounding when TO is narrower than FROM. */
4785 if ((significand_size (fmode) + 1)
4786 < GET_MODE_BITSIZE (GET_MODE (from)))
4789 rtx neglabel = gen_label_rtx ();
4791 /* Don't use TARGET if it isn't a register, is a hard register,
4792 or is the wrong mode. */
4794 || REGNO (target) < FIRST_PSEUDO_REGISTER
4795 || GET_MODE (target) != fmode)
4796 target = gen_reg_rtx (fmode);
4798 imode = GET_MODE (from);
4799 do_pending_stack_adjust ();
4801 /* Test whether the sign bit is set. */
4802 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
4805 /* The sign bit is not set. Convert as signed. */
4806 expand_float (target, from, 0);
4807 emit_jump_insn (gen_jump (label));
4810 /* The sign bit is set.
4811 Convert to a usable (positive signed) value by shifting right
4812 one bit, while remembering if a nonzero bit was shifted
4813 out; i.e., compute (from & 1) | (from >> 1). */
4815 emit_label (neglabel);
4816 temp = expand_binop (imode, and_optab, from, const1_rtx,
4817 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4818 temp1 = expand_shift (RSHIFT_EXPR, imode, from, integer_one_node,
4820 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
4822 expand_float (target, temp, 0);
4824 /* Multiply by 2 to undo the shift above. */
4825 temp = expand_binop (fmode, add_optab, target, target,
4826 target, 0, OPTAB_LIB_WIDEN);
4828 emit_move_insn (target, temp);
4830 do_pending_stack_adjust ();
4836 /* If we are about to do some arithmetic to correct for an
4837 unsigned operand, do it in a pseudo-register. */
4839 if (GET_MODE (to) != fmode
4840 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
4841 target = gen_reg_rtx (fmode);
4843 /* Convert as signed integer to floating. */
4844 expand_float (target, from, 0);
4846 /* If FROM is negative (and therefore TO is negative),
4847 correct its value by 2**bitwidth. */
4849 do_pending_stack_adjust ();
4850 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from),
4854 real_2expN (&offset, GET_MODE_BITSIZE (GET_MODE (from)));
4855 temp = expand_binop (fmode, add_optab, target,
4856 CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode),
4857 target, 0, OPTAB_LIB_WIDEN);
4859 emit_move_insn (target, temp);
4861 do_pending_stack_adjust ();
4866 /* No hardware instruction available; call a library routine. */
4871 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
4873 if (GET_MODE_SIZE (GET_MODE (from)) < GET_MODE_SIZE (SImode))
4874 from = convert_to_mode (SImode, from, unsignedp);
4876 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
4877 gcc_assert (libfunc);
4881 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4882 GET_MODE (to), 1, from,
4884 insns = get_insns ();
4887 emit_libcall_block (insns, target, value,
4888 gen_rtx_FLOAT (GET_MODE (to), from));
4893 /* Copy result to requested destination
4894 if we have been computing in a temp location. */
4898 if (GET_MODE (target) == GET_MODE (to))
4899 emit_move_insn (to, target);
4901 convert_move (to, target, 0);
4905 /* Generate code to convert FROM to fixed point and store in TO. FROM
4906 must be floating point. */
4909 expand_fix (rtx to, rtx from, int unsignedp)
4911 enum insn_code icode;
4913 enum machine_mode fmode, imode;
4916 /* We first try to find a pair of modes, one real and one integer, at
4917 least as wide as FROM and TO, respectively, in which we can open-code
4918 this conversion. If the integer mode is wider than the mode of TO,
4919 we can do the conversion either signed or unsigned. */
4921 for (fmode = GET_MODE (from); fmode != VOIDmode;
4922 fmode = GET_MODE_WIDER_MODE (fmode))
4923 for (imode = GET_MODE (to); imode != VOIDmode;
4924 imode = GET_MODE_WIDER_MODE (imode))
4926 int doing_unsigned = unsignedp;
4928 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
4929 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
4930 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
4932 if (icode != CODE_FOR_nothing)
4934 if (fmode != GET_MODE (from))
4935 from = convert_to_mode (fmode, from, 0);
4939 rtx temp = gen_reg_rtx (GET_MODE (from));
4940 from = expand_unop (GET_MODE (from), ftrunc_optab, from,
4944 if (imode != GET_MODE (to))
4945 target = gen_reg_rtx (imode);
4947 emit_unop_insn (icode, target, from,
4948 doing_unsigned ? UNSIGNED_FIX : FIX);
4950 convert_move (to, target, unsignedp);
4955 /* For an unsigned conversion, there is one more way to do it.
4956 If we have a signed conversion, we generate code that compares
4957 the real value to the largest representable positive number. If if
4958 is smaller, the conversion is done normally. Otherwise, subtract
4959 one plus the highest signed number, convert, and add it back.
4961 We only need to check all real modes, since we know we didn't find
4962 anything with a wider integer mode.
4964 This code used to extend FP value into mode wider than the destination.
4965 This is not needed. Consider, for instance conversion from SFmode
4968 The hot path through the code is dealing with inputs smaller than 2^63
4969 and doing just the conversion, so there is no bits to lose.
4971 In the other path we know the value is positive in the range 2^63..2^64-1
4972 inclusive. (as for other imput overflow happens and result is undefined)
4973 So we know that the most important bit set in mantissa corresponds to
4974 2^63. The subtraction of 2^63 should not generate any rounding as it
4975 simply clears out that bit. The rest is trivial. */
4977 if (unsignedp && GET_MODE_BITSIZE (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
4978 for (fmode = GET_MODE (from); fmode != VOIDmode;
4979 fmode = GET_MODE_WIDER_MODE (fmode))
4980 if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0,
4984 REAL_VALUE_TYPE offset;
4985 rtx limit, lab1, lab2, insn;
4987 bitsize = GET_MODE_BITSIZE (GET_MODE (to));
4988 real_2expN (&offset, bitsize - 1);
4989 limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode);
4990 lab1 = gen_label_rtx ();
4991 lab2 = gen_label_rtx ();
4993 if (fmode != GET_MODE (from))
4994 from = convert_to_mode (fmode, from, 0);
4996 /* See if we need to do the subtraction. */
4997 do_pending_stack_adjust ();
4998 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from),
5001 /* If not, do the signed "fix" and branch around fixup code. */
5002 expand_fix (to, from, 0);
5003 emit_jump_insn (gen_jump (lab2));
5006 /* Otherwise, subtract 2**(N-1), convert to signed number,
5007 then add 2**(N-1). Do the addition using XOR since this
5008 will often generate better code. */
5010 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
5011 NULL_RTX, 0, OPTAB_LIB_WIDEN);
5012 expand_fix (to, target, 0);
5013 target = expand_binop (GET_MODE (to), xor_optab, to,
5015 ((HOST_WIDE_INT) 1 << (bitsize - 1),
5017 to, 1, OPTAB_LIB_WIDEN);
5020 emit_move_insn (to, target);
5024 if (mov_optab->handlers[(int) GET_MODE (to)].insn_code
5025 != CODE_FOR_nothing)
5027 /* Make a place for a REG_NOTE and add it. */
5028 insn = emit_move_insn (to, to);
5029 set_unique_reg_note (insn,
5031 gen_rtx_fmt_e (UNSIGNED_FIX,
5039 /* We can't do it with an insn, so use a library call. But first ensure
5040 that the mode of TO is at least as wide as SImode, since those are the
5041 only library calls we know about. */
5043 if (GET_MODE_SIZE (GET_MODE (to)) < GET_MODE_SIZE (SImode))
5045 target = gen_reg_rtx (SImode);
5047 expand_fix (target, from, unsignedp);
5055 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
5056 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
5057 gcc_assert (libfunc);
5061 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
5062 GET_MODE (to), 1, from,
5064 insns = get_insns ();
5067 emit_libcall_block (insns, target, value,
5068 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
5069 GET_MODE (to), from));
5074 if (GET_MODE (to) == GET_MODE (target))
5075 emit_move_insn (to, target);
5077 convert_move (to, target, 0);
5081 /* Generate code to convert FROM to fixed point and store in TO. FROM
5082 must be floating point, TO must be signed. Use the conversion optab
5083 TAB to do the conversion. */
5086 expand_sfix_optab (rtx to, rtx from, convert_optab tab)
5088 enum insn_code icode;
5090 enum machine_mode fmode, imode;
5092 /* We first try to find a pair of modes, one real and one integer, at
5093 least as wide as FROM and TO, respectively, in which we can open-code
5094 this conversion. If the integer mode is wider than the mode of TO,
5095 we can do the conversion either signed or unsigned. */
5097 for (fmode = GET_MODE (from); fmode != VOIDmode;
5098 fmode = GET_MODE_WIDER_MODE (fmode))
5099 for (imode = GET_MODE (to); imode != VOIDmode;
5100 imode = GET_MODE_WIDER_MODE (imode))
5102 icode = tab->handlers[imode][fmode].insn_code;
5103 if (icode != CODE_FOR_nothing)
5105 if (fmode != GET_MODE (from))
5106 from = convert_to_mode (fmode, from, 0);
5108 if (imode != GET_MODE (to))
5109 target = gen_reg_rtx (imode);
5111 emit_unop_insn (icode, target, from, UNKNOWN);
5113 convert_move (to, target, 0);
5121 /* Report whether we have an instruction to perform the operation
5122 specified by CODE on operands of mode MODE. */
5124 have_insn_for (enum rtx_code code, enum machine_mode mode)
5126 return (code_to_optab[(int) code] != 0
5127 && (code_to_optab[(int) code]->handlers[(int) mode].insn_code
5128 != CODE_FOR_nothing));
5131 /* Create a blank optab. */
5136 optab op = ggc_alloc (sizeof (struct optab));
5137 for (i = 0; i < NUM_MACHINE_MODES; i++)
5139 op->handlers[i].insn_code = CODE_FOR_nothing;
5140 op->handlers[i].libfunc = 0;
5146 static convert_optab
5147 new_convert_optab (void)
5150 convert_optab op = ggc_alloc (sizeof (struct convert_optab));
5151 for (i = 0; i < NUM_MACHINE_MODES; i++)
5152 for (j = 0; j < NUM_MACHINE_MODES; j++)
5154 op->handlers[i][j].insn_code = CODE_FOR_nothing;
5155 op->handlers[i][j].libfunc = 0;
5160 /* Same, but fill in its code as CODE, and write it into the
5161 code_to_optab table. */
5163 init_optab (enum rtx_code code)
5165 optab op = new_optab ();
5167 code_to_optab[(int) code] = op;
5171 /* Same, but fill in its code as CODE, and do _not_ write it into
5172 the code_to_optab table. */
5174 init_optabv (enum rtx_code code)
5176 optab op = new_optab ();
5181 /* Conversion optabs never go in the code_to_optab table. */
5182 static inline convert_optab
5183 init_convert_optab (enum rtx_code code)
5185 convert_optab op = new_convert_optab ();
5190 /* Initialize the libfunc fields of an entire group of entries in some
5191 optab. Each entry is set equal to a string consisting of a leading
5192 pair of underscores followed by a generic operation name followed by
5193 a mode name (downshifted to lowercase) followed by a single character
5194 representing the number of operands for the given operation (which is
5195 usually one of the characters '2', '3', or '4').
5197 OPTABLE is the table in which libfunc fields are to be initialized.
5198 FIRST_MODE is the first machine mode index in the given optab to
5200 LAST_MODE is the last machine mode index in the given optab to
5202 OPNAME is the generic (string) name of the operation.
5203 SUFFIX is the character which specifies the number of operands for
5204 the given generic operation.
5208 init_libfuncs (optab optable, int first_mode, int last_mode,
5209 const char *opname, int suffix)
5212 unsigned opname_len = strlen (opname);
5214 for (mode = first_mode; (int) mode <= (int) last_mode;
5215 mode = (enum machine_mode) ((int) mode + 1))
5217 const char *mname = GET_MODE_NAME (mode);
5218 unsigned mname_len = strlen (mname);
5219 char *libfunc_name = alloca (2 + opname_len + mname_len + 1 + 1);
5226 for (q = opname; *q; )
5228 for (q = mname; *q; q++)
5229 *p++ = TOLOWER (*q);
5233 optable->handlers[(int) mode].libfunc
5234 = init_one_libfunc (ggc_alloc_string (libfunc_name, p - libfunc_name));
5238 /* Initialize the libfunc fields of an entire group of entries in some
5239 optab which correspond to all integer mode operations. The parameters
5240 have the same meaning as similarly named ones for the `init_libfuncs'
5241 routine. (See above). */
5244 init_integral_libfuncs (optab optable, const char *opname, int suffix)
5246 int maxsize = 2*BITS_PER_WORD;
5247 if (maxsize < LONG_LONG_TYPE_SIZE)
5248 maxsize = LONG_LONG_TYPE_SIZE;
5249 init_libfuncs (optable, word_mode,
5250 mode_for_size (maxsize, MODE_INT, 0),
5254 /* Initialize the libfunc fields of an entire group of entries in some
5255 optab which correspond to all real mode operations. The parameters
5256 have the same meaning as similarly named ones for the `init_libfuncs'
5257 routine. (See above). */
5260 init_floating_libfuncs (optab optable, const char *opname, int suffix)
5262 char *dec_opname = alloca (sizeof (DECIMAL_PREFIX) + strlen (opname));
5264 /* For BID support, change the name to have either a bid_ or dpd_ prefix
5265 depending on the low level floating format used. */
5266 memcpy (dec_opname, DECIMAL_PREFIX, sizeof (DECIMAL_PREFIX) - 1);
5267 strcpy (dec_opname + sizeof (DECIMAL_PREFIX) - 1, opname);
5269 init_libfuncs (optable, MIN_MODE_FLOAT, MAX_MODE_FLOAT, opname, suffix);
5270 init_libfuncs (optable, MIN_MODE_DECIMAL_FLOAT, MAX_MODE_DECIMAL_FLOAT,
5271 dec_opname, suffix);
5274 /* Initialize the libfunc fields of an entire group of entries of an
5275 inter-mode-class conversion optab. The string formation rules are
5276 similar to the ones for init_libfuncs, above, but instead of having
5277 a mode name and an operand count these functions have two mode names
5278 and no operand count. */
5280 init_interclass_conv_libfuncs (convert_optab tab, const char *opname,
5281 enum mode_class from_class,
5282 enum mode_class to_class)
5284 enum machine_mode first_from_mode = GET_CLASS_NARROWEST_MODE (from_class);
5285 enum machine_mode first_to_mode = GET_CLASS_NARROWEST_MODE (to_class);
5286 size_t opname_len = strlen (opname);
5287 size_t max_mname_len = 0;
5289 enum machine_mode fmode, tmode;
5290 const char *fname, *tname;
5292 char *libfunc_name, *suffix;
5293 char *nondec_name, *dec_name, *nondec_suffix, *dec_suffix;
5296 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5297 depends on which underlying decimal floating point format is used. */
5298 const size_t dec_len = sizeof (DECIMAL_PREFIX) - 1;
5300 for (fmode = first_from_mode;
5302 fmode = GET_MODE_WIDER_MODE (fmode))
5303 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (fmode)));
5305 for (tmode = first_to_mode;
5307 tmode = GET_MODE_WIDER_MODE (tmode))
5308 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (tmode)));
5310 nondec_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
5311 nondec_name[0] = '_';
5312 nondec_name[1] = '_';
5313 memcpy (&nondec_name[2], opname, opname_len);
5314 nondec_suffix = nondec_name + opname_len + 2;
5316 dec_name = alloca (2 + dec_len + opname_len + 2*max_mname_len + 1 + 1);
5319 memcpy (&dec_name[2], DECIMAL_PREFIX, dec_len);
5320 memcpy (&dec_name[2+dec_len], opname, opname_len);
5321 dec_suffix = dec_name + dec_len + opname_len + 2;
5323 for (fmode = first_from_mode; fmode != VOIDmode;
5324 fmode = GET_MODE_WIDER_MODE (fmode))
5325 for (tmode = first_to_mode; tmode != VOIDmode;
5326 tmode = GET_MODE_WIDER_MODE (tmode))
5328 fname = GET_MODE_NAME (fmode);
5329 tname = GET_MODE_NAME (tmode);
5331 if (DECIMAL_FLOAT_MODE_P(fmode) || DECIMAL_FLOAT_MODE_P(tmode))
5333 libfunc_name = dec_name;
5334 suffix = dec_suffix;
5338 libfunc_name = nondec_name;
5339 suffix = nondec_suffix;
5343 for (q = fname; *q; p++, q++)
5345 for (q = tname; *q; p++, q++)
5350 tab->handlers[tmode][fmode].libfunc
5351 = init_one_libfunc (ggc_alloc_string (libfunc_name,
5356 /* Initialize the libfunc fields of an entire group of entries of an
5357 intra-mode-class conversion optab. The string formation rules are
5358 similar to the ones for init_libfunc, above. WIDENING says whether
5359 the optab goes from narrow to wide modes or vice versa. These functions
5360 have two mode names _and_ an operand count. */
5362 init_intraclass_conv_libfuncs (convert_optab tab, const char *opname,
5363 enum mode_class class, bool widening)
5365 enum machine_mode first_mode = GET_CLASS_NARROWEST_MODE (class);
5366 size_t opname_len = strlen (opname);
5367 size_t max_mname_len = 0;
5369 enum machine_mode nmode, wmode;
5370 const char *nname, *wname;
5372 char *nondec_name, *dec_name, *nondec_suffix, *dec_suffix;
5373 char *libfunc_name, *suffix;
5376 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5377 depends on which underlying decimal floating point format is used. */
5378 const size_t dec_len = sizeof (DECIMAL_PREFIX) - 1;
5380 for (nmode = first_mode; nmode != VOIDmode;
5381 nmode = GET_MODE_WIDER_MODE (nmode))
5382 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (nmode)));
5384 nondec_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
5385 nondec_name[0] = '_';
5386 nondec_name[1] = '_';
5387 memcpy (&nondec_name[2], opname, opname_len);
5388 nondec_suffix = nondec_name + opname_len + 2;
5390 dec_name = alloca (2 + dec_len + opname_len + 2*max_mname_len + 1 + 1);
5393 memcpy (&dec_name[2], DECIMAL_PREFIX, dec_len);
5394 memcpy (&dec_name[2 + dec_len], opname, opname_len);
5395 dec_suffix = dec_name + dec_len + opname_len + 2;
5397 for (nmode = first_mode; nmode != VOIDmode;
5398 nmode = GET_MODE_WIDER_MODE (nmode))
5399 for (wmode = GET_MODE_WIDER_MODE (nmode); wmode != VOIDmode;
5400 wmode = GET_MODE_WIDER_MODE (wmode))
5402 nname = GET_MODE_NAME (nmode);
5403 wname = GET_MODE_NAME (wmode);
5405 if (DECIMAL_FLOAT_MODE_P(nmode) || DECIMAL_FLOAT_MODE_P(wmode))
5407 libfunc_name = dec_name;
5408 suffix = dec_suffix;
5412 libfunc_name = nondec_name;
5413 suffix = nondec_suffix;
5417 for (q = widening ? nname : wname; *q; p++, q++)
5419 for (q = widening ? wname : nname; *q; p++, q++)
5425 tab->handlers[widening ? wmode : nmode]
5426 [widening ? nmode : wmode].libfunc
5427 = init_one_libfunc (ggc_alloc_string (libfunc_name,
5434 init_one_libfunc (const char *name)
5438 /* Create a FUNCTION_DECL that can be passed to
5439 targetm.encode_section_info. */
5440 /* ??? We don't have any type information except for this is
5441 a function. Pretend this is "int foo()". */
5442 tree decl = build_decl (FUNCTION_DECL, get_identifier (name),
5443 build_function_type (integer_type_node, NULL_TREE));
5444 DECL_ARTIFICIAL (decl) = 1;
5445 DECL_EXTERNAL (decl) = 1;
5446 TREE_PUBLIC (decl) = 1;
5448 symbol = XEXP (DECL_RTL (decl), 0);
5450 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
5451 are the flags assigned by targetm.encode_section_info. */
5452 SET_SYMBOL_REF_DECL (symbol, 0);
5457 /* Call this to reset the function entry for one optab (OPTABLE) in mode
5458 MODE to NAME, which should be either 0 or a string constant. */
5460 set_optab_libfunc (optab optable, enum machine_mode mode, const char *name)
5463 optable->handlers[mode].libfunc = init_one_libfunc (name);
5465 optable->handlers[mode].libfunc = 0;
5468 /* Call this to reset the function entry for one conversion optab
5469 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
5470 either 0 or a string constant. */
5472 set_conv_libfunc (convert_optab optable, enum machine_mode tmode,
5473 enum machine_mode fmode, const char *name)
5476 optable->handlers[tmode][fmode].libfunc = init_one_libfunc (name);
5478 optable->handlers[tmode][fmode].libfunc = 0;
5481 /* Call this once to initialize the contents of the optabs
5482 appropriately for the current target machine. */
5488 enum machine_mode int_mode;
5490 /* Start by initializing all tables to contain CODE_FOR_nothing. */
5492 for (i = 0; i < NUM_RTX_CODE; i++)
5493 setcc_gen_code[i] = CODE_FOR_nothing;
5495 #ifdef HAVE_conditional_move
5496 for (i = 0; i < NUM_MACHINE_MODES; i++)
5497 movcc_gen_code[i] = CODE_FOR_nothing;
5500 for (i = 0; i < NUM_MACHINE_MODES; i++)
5502 vcond_gen_code[i] = CODE_FOR_nothing;
5503 vcondu_gen_code[i] = CODE_FOR_nothing;
5506 add_optab = init_optab (PLUS);
5507 addv_optab = init_optabv (PLUS);
5508 sub_optab = init_optab (MINUS);
5509 subv_optab = init_optabv (MINUS);
5510 smul_optab = init_optab (MULT);
5511 smulv_optab = init_optabv (MULT);
5512 smul_highpart_optab = init_optab (UNKNOWN);
5513 umul_highpart_optab = init_optab (UNKNOWN);
5514 smul_widen_optab = init_optab (UNKNOWN);
5515 umul_widen_optab = init_optab (UNKNOWN);
5516 usmul_widen_optab = init_optab (UNKNOWN);
5517 smadd_widen_optab = init_optab (UNKNOWN);
5518 umadd_widen_optab = init_optab (UNKNOWN);
5519 smsub_widen_optab = init_optab (UNKNOWN);
5520 umsub_widen_optab = init_optab (UNKNOWN);
5521 sdiv_optab = init_optab (DIV);
5522 sdivv_optab = init_optabv (DIV);
5523 sdivmod_optab = init_optab (UNKNOWN);
5524 udiv_optab = init_optab (UDIV);
5525 udivmod_optab = init_optab (UNKNOWN);
5526 smod_optab = init_optab (MOD);
5527 umod_optab = init_optab (UMOD);
5528 fmod_optab = init_optab (UNKNOWN);
5529 remainder_optab = init_optab (UNKNOWN);
5530 ftrunc_optab = init_optab (UNKNOWN);
5531 and_optab = init_optab (AND);
5532 ior_optab = init_optab (IOR);
5533 xor_optab = init_optab (XOR);
5534 ashl_optab = init_optab (ASHIFT);
5535 ashr_optab = init_optab (ASHIFTRT);
5536 lshr_optab = init_optab (LSHIFTRT);
5537 rotl_optab = init_optab (ROTATE);
5538 rotr_optab = init_optab (ROTATERT);
5539 smin_optab = init_optab (SMIN);
5540 smax_optab = init_optab (SMAX);
5541 umin_optab = init_optab (UMIN);
5542 umax_optab = init_optab (UMAX);
5543 pow_optab = init_optab (UNKNOWN);
5544 atan2_optab = init_optab (UNKNOWN);
5546 /* These three have codes assigned exclusively for the sake of
5548 mov_optab = init_optab (SET);
5549 movstrict_optab = init_optab (STRICT_LOW_PART);
5550 cmp_optab = init_optab (COMPARE);
5552 storent_optab = init_optab (UNKNOWN);
5554 ucmp_optab = init_optab (UNKNOWN);
5555 tst_optab = init_optab (UNKNOWN);
5557 eq_optab = init_optab (EQ);
5558 ne_optab = init_optab (NE);
5559 gt_optab = init_optab (GT);
5560 ge_optab = init_optab (GE);
5561 lt_optab = init_optab (LT);
5562 le_optab = init_optab (LE);
5563 unord_optab = init_optab (UNORDERED);
5565 neg_optab = init_optab (NEG);
5566 negv_optab = init_optabv (NEG);
5567 abs_optab = init_optab (ABS);
5568 absv_optab = init_optabv (ABS);
5569 addcc_optab = init_optab (UNKNOWN);
5570 one_cmpl_optab = init_optab (NOT);
5571 bswap_optab = init_optab (BSWAP);
5572 ffs_optab = init_optab (FFS);
5573 clz_optab = init_optab (CLZ);
5574 ctz_optab = init_optab (CTZ);
5575 popcount_optab = init_optab (POPCOUNT);
5576 parity_optab = init_optab (PARITY);
5577 sqrt_optab = init_optab (SQRT);
5578 floor_optab = init_optab (UNKNOWN);
5579 ceil_optab = init_optab (UNKNOWN);
5580 round_optab = init_optab (UNKNOWN);
5581 btrunc_optab = init_optab (UNKNOWN);
5582 nearbyint_optab = init_optab (UNKNOWN);
5583 rint_optab = init_optab (UNKNOWN);
5584 sincos_optab = init_optab (UNKNOWN);
5585 sin_optab = init_optab (UNKNOWN);
5586 asin_optab = init_optab (UNKNOWN);
5587 cos_optab = init_optab (UNKNOWN);
5588 acos_optab = init_optab (UNKNOWN);
5589 exp_optab = init_optab (UNKNOWN);
5590 exp10_optab = init_optab (UNKNOWN);
5591 exp2_optab = init_optab (UNKNOWN);
5592 expm1_optab = init_optab (UNKNOWN);
5593 ldexp_optab = init_optab (UNKNOWN);
5594 scalb_optab = init_optab (UNKNOWN);
5595 logb_optab = init_optab (UNKNOWN);
5596 ilogb_optab = init_optab (UNKNOWN);
5597 log_optab = init_optab (UNKNOWN);
5598 log10_optab = init_optab (UNKNOWN);
5599 log2_optab = init_optab (UNKNOWN);
5600 log1p_optab = init_optab (UNKNOWN);
5601 tan_optab = init_optab (UNKNOWN);
5602 atan_optab = init_optab (UNKNOWN);
5603 copysign_optab = init_optab (UNKNOWN);
5604 signbit_optab = init_optab (UNKNOWN);
5606 isinf_optab = init_optab (UNKNOWN);
5608 strlen_optab = init_optab (UNKNOWN);
5609 cbranch_optab = init_optab (UNKNOWN);
5610 cmov_optab = init_optab (UNKNOWN);
5611 cstore_optab = init_optab (UNKNOWN);
5612 push_optab = init_optab (UNKNOWN);
5614 reduc_smax_optab = init_optab (UNKNOWN);
5615 reduc_umax_optab = init_optab (UNKNOWN);
5616 reduc_smin_optab = init_optab (UNKNOWN);
5617 reduc_umin_optab = init_optab (UNKNOWN);
5618 reduc_splus_optab = init_optab (UNKNOWN);
5619 reduc_uplus_optab = init_optab (UNKNOWN);
5621 ssum_widen_optab = init_optab (UNKNOWN);
5622 usum_widen_optab = init_optab (UNKNOWN);
5623 sdot_prod_optab = init_optab (UNKNOWN);
5624 udot_prod_optab = init_optab (UNKNOWN);
5626 vec_extract_optab = init_optab (UNKNOWN);
5627 vec_extract_even_optab = init_optab (UNKNOWN);
5628 vec_extract_odd_optab = init_optab (UNKNOWN);
5629 vec_interleave_high_optab = init_optab (UNKNOWN);
5630 vec_interleave_low_optab = init_optab (UNKNOWN);
5631 vec_set_optab = init_optab (UNKNOWN);
5632 vec_init_optab = init_optab (UNKNOWN);
5633 vec_shl_optab = init_optab (UNKNOWN);
5634 vec_shr_optab = init_optab (UNKNOWN);
5635 vec_realign_load_optab = init_optab (UNKNOWN);
5636 movmisalign_optab = init_optab (UNKNOWN);
5637 vec_widen_umult_hi_optab = init_optab (UNKNOWN);
5638 vec_widen_umult_lo_optab = init_optab (UNKNOWN);
5639 vec_widen_smult_hi_optab = init_optab (UNKNOWN);
5640 vec_widen_smult_lo_optab = init_optab (UNKNOWN);
5641 vec_unpacks_hi_optab = init_optab (UNKNOWN);
5642 vec_unpacks_lo_optab = init_optab (UNKNOWN);
5643 vec_unpacku_hi_optab = init_optab (UNKNOWN);
5644 vec_unpacku_lo_optab = init_optab (UNKNOWN);
5645 vec_unpacks_float_hi_optab = init_optab (UNKNOWN);
5646 vec_unpacks_float_lo_optab = init_optab (UNKNOWN);
5647 vec_unpacku_float_hi_optab = init_optab (UNKNOWN);
5648 vec_unpacku_float_lo_optab = init_optab (UNKNOWN);
5649 vec_pack_trunc_optab = init_optab (UNKNOWN);
5650 vec_pack_usat_optab = init_optab (UNKNOWN);
5651 vec_pack_ssat_optab = init_optab (UNKNOWN);
5652 vec_pack_ufix_trunc_optab = init_optab (UNKNOWN);
5653 vec_pack_sfix_trunc_optab = init_optab (UNKNOWN);
5655 powi_optab = init_optab (UNKNOWN);
5658 sext_optab = init_convert_optab (SIGN_EXTEND);
5659 zext_optab = init_convert_optab (ZERO_EXTEND);
5660 trunc_optab = init_convert_optab (TRUNCATE);
5661 sfix_optab = init_convert_optab (FIX);
5662 ufix_optab = init_convert_optab (UNSIGNED_FIX);
5663 sfixtrunc_optab = init_convert_optab (UNKNOWN);
5664 ufixtrunc_optab = init_convert_optab (UNKNOWN);
5665 sfloat_optab = init_convert_optab (FLOAT);
5666 ufloat_optab = init_convert_optab (UNSIGNED_FLOAT);
5667 lrint_optab = init_convert_optab (UNKNOWN);
5668 lround_optab = init_convert_optab (UNKNOWN);
5669 lfloor_optab = init_convert_optab (UNKNOWN);
5670 lceil_optab = init_convert_optab (UNKNOWN);
5672 for (i = 0; i < NUM_MACHINE_MODES; i++)
5674 movmem_optab[i] = CODE_FOR_nothing;
5675 cmpstr_optab[i] = CODE_FOR_nothing;
5676 cmpstrn_optab[i] = CODE_FOR_nothing;
5677 cmpmem_optab[i] = CODE_FOR_nothing;
5678 setmem_optab[i] = CODE_FOR_nothing;
5680 sync_add_optab[i] = CODE_FOR_nothing;
5681 sync_sub_optab[i] = CODE_FOR_nothing;
5682 sync_ior_optab[i] = CODE_FOR_nothing;
5683 sync_and_optab[i] = CODE_FOR_nothing;
5684 sync_xor_optab[i] = CODE_FOR_nothing;
5685 sync_nand_optab[i] = CODE_FOR_nothing;
5686 sync_old_add_optab[i] = CODE_FOR_nothing;
5687 sync_old_sub_optab[i] = CODE_FOR_nothing;
5688 sync_old_ior_optab[i] = CODE_FOR_nothing;
5689 sync_old_and_optab[i] = CODE_FOR_nothing;
5690 sync_old_xor_optab[i] = CODE_FOR_nothing;
5691 sync_old_nand_optab[i] = CODE_FOR_nothing;
5692 sync_new_add_optab[i] = CODE_FOR_nothing;
5693 sync_new_sub_optab[i] = CODE_FOR_nothing;
5694 sync_new_ior_optab[i] = CODE_FOR_nothing;
5695 sync_new_and_optab[i] = CODE_FOR_nothing;
5696 sync_new_xor_optab[i] = CODE_FOR_nothing;
5697 sync_new_nand_optab[i] = CODE_FOR_nothing;
5698 sync_compare_and_swap[i] = CODE_FOR_nothing;
5699 sync_compare_and_swap_cc[i] = CODE_FOR_nothing;
5700 sync_lock_test_and_set[i] = CODE_FOR_nothing;
5701 sync_lock_release[i] = CODE_FOR_nothing;
5703 reload_in_optab[i] = reload_out_optab[i] = CODE_FOR_nothing;
5706 /* Fill in the optabs with the insns we support. */
5709 /* The ffs function operates on `int'. Fall back on it if we do not
5710 have a libgcc2 function for that width. */
5711 int_mode = mode_for_size (INT_TYPE_SIZE, MODE_INT, 0);
5712 ffs_optab->handlers[(int) int_mode].libfunc = init_one_libfunc ("ffs");
5714 /* Initialize the optabs with the names of the library functions. */
5715 init_integral_libfuncs (add_optab, "add", '3');
5716 init_floating_libfuncs (add_optab, "add", '3');
5717 init_integral_libfuncs (addv_optab, "addv", '3');
5718 init_floating_libfuncs (addv_optab, "add", '3');
5719 init_integral_libfuncs (sub_optab, "sub", '3');
5720 init_floating_libfuncs (sub_optab, "sub", '3');
5721 init_integral_libfuncs (subv_optab, "subv", '3');
5722 init_floating_libfuncs (subv_optab, "sub", '3');
5723 init_integral_libfuncs (smul_optab, "mul", '3');
5724 init_floating_libfuncs (smul_optab, "mul", '3');
5725 init_integral_libfuncs (smulv_optab, "mulv", '3');
5726 init_floating_libfuncs (smulv_optab, "mul", '3');
5727 init_integral_libfuncs (sdiv_optab, "div", '3');
5728 init_floating_libfuncs (sdiv_optab, "div", '3');
5729 init_integral_libfuncs (sdivv_optab, "divv", '3');
5730 init_integral_libfuncs (udiv_optab, "udiv", '3');
5731 init_integral_libfuncs (sdivmod_optab, "divmod", '4');
5732 init_integral_libfuncs (udivmod_optab, "udivmod", '4');
5733 init_integral_libfuncs (smod_optab, "mod", '3');
5734 init_integral_libfuncs (umod_optab, "umod", '3');
5735 init_floating_libfuncs (ftrunc_optab, "ftrunc", '2');
5736 init_integral_libfuncs (and_optab, "and", '3');
5737 init_integral_libfuncs (ior_optab, "ior", '3');
5738 init_integral_libfuncs (xor_optab, "xor", '3');
5739 init_integral_libfuncs (ashl_optab, "ashl", '3');
5740 init_integral_libfuncs (ashr_optab, "ashr", '3');
5741 init_integral_libfuncs (lshr_optab, "lshr", '3');
5742 init_integral_libfuncs (smin_optab, "min", '3');
5743 init_floating_libfuncs (smin_optab, "min", '3');
5744 init_integral_libfuncs (smax_optab, "max", '3');
5745 init_floating_libfuncs (smax_optab, "max", '3');
5746 init_integral_libfuncs (umin_optab, "umin", '3');
5747 init_integral_libfuncs (umax_optab, "umax", '3');
5748 init_integral_libfuncs (neg_optab, "neg", '2');
5749 init_floating_libfuncs (neg_optab, "neg", '2');
5750 init_integral_libfuncs (negv_optab, "negv", '2');
5751 init_floating_libfuncs (negv_optab, "neg", '2');
5752 init_integral_libfuncs (one_cmpl_optab, "one_cmpl", '2');
5753 init_integral_libfuncs (ffs_optab, "ffs", '2');
5754 init_integral_libfuncs (clz_optab, "clz", '2');
5755 init_integral_libfuncs (ctz_optab, "ctz", '2');
5756 init_integral_libfuncs (popcount_optab, "popcount", '2');
5757 init_integral_libfuncs (parity_optab, "parity", '2');
5759 /* Comparison libcalls for integers MUST come in pairs,
5761 init_integral_libfuncs (cmp_optab, "cmp", '2');
5762 init_integral_libfuncs (ucmp_optab, "ucmp", '2');
5763 init_floating_libfuncs (cmp_optab, "cmp", '2');
5765 /* EQ etc are floating point only. */
5766 init_floating_libfuncs (eq_optab, "eq", '2');
5767 init_floating_libfuncs (ne_optab, "ne", '2');
5768 init_floating_libfuncs (gt_optab, "gt", '2');
5769 init_floating_libfuncs (ge_optab, "ge", '2');
5770 init_floating_libfuncs (lt_optab, "lt", '2');
5771 init_floating_libfuncs (le_optab, "le", '2');
5772 init_floating_libfuncs (unord_optab, "unord", '2');
5774 init_floating_libfuncs (powi_optab, "powi", '2');
5777 init_interclass_conv_libfuncs (sfloat_optab, "float",
5778 MODE_INT, MODE_FLOAT);
5779 init_interclass_conv_libfuncs (sfloat_optab, "float",
5780 MODE_INT, MODE_DECIMAL_FLOAT);
5781 init_interclass_conv_libfuncs (ufloat_optab, "floatun",
5782 MODE_INT, MODE_FLOAT);
5783 init_interclass_conv_libfuncs (ufloat_optab, "floatun",
5784 MODE_INT, MODE_DECIMAL_FLOAT);
5785 init_interclass_conv_libfuncs (sfix_optab, "fix",
5786 MODE_FLOAT, MODE_INT);
5787 init_interclass_conv_libfuncs (sfix_optab, "fix",
5788 MODE_DECIMAL_FLOAT, MODE_INT);
5789 init_interclass_conv_libfuncs (ufix_optab, "fixuns",
5790 MODE_FLOAT, MODE_INT);
5791 init_interclass_conv_libfuncs (ufix_optab, "fixuns",
5792 MODE_DECIMAL_FLOAT, MODE_INT);
5793 init_interclass_conv_libfuncs (ufloat_optab, "floatuns",
5794 MODE_INT, MODE_DECIMAL_FLOAT);
5795 init_interclass_conv_libfuncs (lrint_optab, "lrint",
5796 MODE_INT, MODE_FLOAT);
5797 init_interclass_conv_libfuncs (lround_optab, "lround",
5798 MODE_INT, MODE_FLOAT);
5799 init_interclass_conv_libfuncs (lfloor_optab, "lfloor",
5800 MODE_INT, MODE_FLOAT);
5801 init_interclass_conv_libfuncs (lceil_optab, "lceil",
5802 MODE_INT, MODE_FLOAT);
5804 /* sext_optab is also used for FLOAT_EXTEND. */
5805 init_intraclass_conv_libfuncs (sext_optab, "extend", MODE_FLOAT, true);
5806 init_intraclass_conv_libfuncs (sext_optab, "extend", MODE_DECIMAL_FLOAT, true);
5807 init_interclass_conv_libfuncs (sext_optab, "extend", MODE_FLOAT, MODE_DECIMAL_FLOAT);
5808 init_interclass_conv_libfuncs (sext_optab, "extend", MODE_DECIMAL_FLOAT, MODE_FLOAT);
5809 init_intraclass_conv_libfuncs (trunc_optab, "trunc", MODE_FLOAT, false);
5810 init_intraclass_conv_libfuncs (trunc_optab, "trunc", MODE_DECIMAL_FLOAT, false);
5811 init_interclass_conv_libfuncs (trunc_optab, "trunc", MODE_FLOAT, MODE_DECIMAL_FLOAT);
5812 init_interclass_conv_libfuncs (trunc_optab, "trunc", MODE_DECIMAL_FLOAT, MODE_FLOAT);
5814 /* Explicitly initialize the bswap libfuncs since we need them to be
5815 valid for things other than word_mode. */
5816 set_optab_libfunc (bswap_optab, SImode, "__bswapsi2");
5817 set_optab_libfunc (bswap_optab, DImode, "__bswapdi2");
5819 /* Use cabs for double complex abs, since systems generally have cabs.
5820 Don't define any libcall for float complex, so that cabs will be used. */
5821 if (complex_double_type_node)
5822 abs_optab->handlers[TYPE_MODE (complex_double_type_node)].libfunc
5823 = init_one_libfunc ("cabs");
5825 abort_libfunc = init_one_libfunc ("abort");
5826 memcpy_libfunc = init_one_libfunc ("memcpy");
5827 memmove_libfunc = init_one_libfunc ("memmove");
5828 memcmp_libfunc = init_one_libfunc ("memcmp");
5829 memset_libfunc = init_one_libfunc ("memset");
5830 setbits_libfunc = init_one_libfunc ("__setbits");
5832 #ifndef DONT_USE_BUILTIN_SETJMP
5833 setjmp_libfunc = init_one_libfunc ("__builtin_setjmp");
5834 longjmp_libfunc = init_one_libfunc ("__builtin_longjmp");
5836 setjmp_libfunc = init_one_libfunc ("setjmp");
5837 longjmp_libfunc = init_one_libfunc ("longjmp");
5839 unwind_sjlj_register_libfunc = init_one_libfunc ("_Unwind_SjLj_Register");
5840 unwind_sjlj_unregister_libfunc
5841 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5843 /* For function entry/exit instrumentation. */
5844 profile_function_entry_libfunc
5845 = init_one_libfunc ("__cyg_profile_func_enter");
5846 profile_function_exit_libfunc
5847 = init_one_libfunc ("__cyg_profile_func_exit");
5849 gcov_flush_libfunc = init_one_libfunc ("__gcov_flush");
5851 if (HAVE_conditional_trap)
5852 trap_rtx = gen_rtx_fmt_ee (EQ, VOIDmode, NULL_RTX, NULL_RTX);
5854 /* Allow the target to add more libcalls or rename some, etc. */
5855 targetm.init_libfuncs ();
5860 /* Print information about the current contents of the optabs on
5864 debug_optab_libfuncs (void)
5870 /* Dump the arithmetic optabs. */
5871 for (i = 0; i != (int) OTI_MAX; i++)
5872 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5875 struct optab_handlers *h;
5878 h = &o->handlers[j];
5881 gcc_assert (GET_CODE (h->libfunc) == SYMBOL_REF);
5882 fprintf (stderr, "%s\t%s:\t%s\n",
5883 GET_RTX_NAME (o->code),
5885 XSTR (h->libfunc, 0));
5889 /* Dump the conversion optabs. */
5890 for (i = 0; i < (int) COI_MAX; ++i)
5891 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5892 for (k = 0; k < NUM_MACHINE_MODES; ++k)
5895 struct optab_handlers *h;
5897 o = &convert_optab_table[i];
5898 h = &o->handlers[j][k];
5901 gcc_assert (GET_CODE (h->libfunc) == SYMBOL_REF);
5902 fprintf (stderr, "%s\t%s\t%s:\t%s\n",
5903 GET_RTX_NAME (o->code),
5906 XSTR (h->libfunc, 0));
5914 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5915 CODE. Return 0 on failure. */
5918 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED, rtx op1,
5919 rtx op2 ATTRIBUTE_UNUSED, rtx tcode ATTRIBUTE_UNUSED)
5921 enum machine_mode mode = GET_MODE (op1);
5922 enum insn_code icode;
5925 if (!HAVE_conditional_trap)
5928 if (mode == VOIDmode)
5931 icode = cmp_optab->handlers[(int) mode].insn_code;
5932 if (icode == CODE_FOR_nothing)
5936 op1 = prepare_operand (icode, op1, 0, mode, mode, 0);
5937 op2 = prepare_operand (icode, op2, 1, mode, mode, 0);
5943 emit_insn (GEN_FCN (icode) (op1, op2));
5945 PUT_CODE (trap_rtx, code);
5946 gcc_assert (HAVE_conditional_trap);
5947 insn = gen_conditional_trap (trap_rtx, tcode);
5951 insn = get_insns ();
5958 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5959 or unsigned operation code. */
5961 static enum rtx_code
5962 get_rtx_code (enum tree_code tcode, bool unsignedp)
5974 code = unsignedp ? LTU : LT;
5977 code = unsignedp ? LEU : LE;
5980 code = unsignedp ? GTU : GT;
5983 code = unsignedp ? GEU : GE;
5986 case UNORDERED_EXPR:
6017 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
6018 unsigned operators. Do not generate compare instruction. */
6021 vector_compare_rtx (tree cond, bool unsignedp, enum insn_code icode)
6023 enum rtx_code rcode;
6025 rtx rtx_op0, rtx_op1;
6027 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
6028 ensures that condition is a relational operation. */
6029 gcc_assert (COMPARISON_CLASS_P (cond));
6031 rcode = get_rtx_code (TREE_CODE (cond), unsignedp);
6032 t_op0 = TREE_OPERAND (cond, 0);
6033 t_op1 = TREE_OPERAND (cond, 1);
6035 /* Expand operands. */
6036 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)),
6038 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)),
6041 if (!insn_data[icode].operand[4].predicate (rtx_op0, GET_MODE (rtx_op0))
6042 && GET_MODE (rtx_op0) != VOIDmode)
6043 rtx_op0 = force_reg (GET_MODE (rtx_op0), rtx_op0);
6045 if (!insn_data[icode].operand[5].predicate (rtx_op1, GET_MODE (rtx_op1))
6046 && GET_MODE (rtx_op1) != VOIDmode)
6047 rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1);
6049 return gen_rtx_fmt_ee (rcode, VOIDmode, rtx_op0, rtx_op1);
6052 /* Return insn code for VEC_COND_EXPR EXPR. */
6054 static inline enum insn_code
6055 get_vcond_icode (tree expr, enum machine_mode mode)
6057 enum insn_code icode = CODE_FOR_nothing;
6059 if (TYPE_UNSIGNED (TREE_TYPE (expr)))
6060 icode = vcondu_gen_code[mode];
6062 icode = vcond_gen_code[mode];
6066 /* Return TRUE iff, appropriate vector insns are available
6067 for vector cond expr expr in VMODE mode. */
6070 expand_vec_cond_expr_p (tree expr, enum machine_mode vmode)
6072 if (get_vcond_icode (expr, vmode) == CODE_FOR_nothing)
6077 /* Generate insns for VEC_COND_EXPR. */
6080 expand_vec_cond_expr (tree vec_cond_expr, rtx target)
6082 enum insn_code icode;
6083 rtx comparison, rtx_op1, rtx_op2, cc_op0, cc_op1;
6084 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_cond_expr));
6085 bool unsignedp = TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr));
6087 icode = get_vcond_icode (vec_cond_expr, mode);
6088 if (icode == CODE_FOR_nothing)
6091 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6092 target = gen_reg_rtx (mode);
6094 /* Get comparison rtx. First expand both cond expr operands. */
6095 comparison = vector_compare_rtx (TREE_OPERAND (vec_cond_expr, 0),
6097 cc_op0 = XEXP (comparison, 0);
6098 cc_op1 = XEXP (comparison, 1);
6099 /* Expand both operands and force them in reg, if required. */
6100 rtx_op1 = expand_normal (TREE_OPERAND (vec_cond_expr, 1));
6101 if (!insn_data[icode].operand[1].predicate (rtx_op1, mode)
6102 && mode != VOIDmode)
6103 rtx_op1 = force_reg (mode, rtx_op1);
6105 rtx_op2 = expand_normal (TREE_OPERAND (vec_cond_expr, 2));
6106 if (!insn_data[icode].operand[2].predicate (rtx_op2, mode)
6107 && mode != VOIDmode)
6108 rtx_op2 = force_reg (mode, rtx_op2);
6110 /* Emit instruction! */
6111 emit_insn (GEN_FCN (icode) (target, rtx_op1, rtx_op2,
6112 comparison, cc_op0, cc_op1));
6118 /* This is an internal subroutine of the other compare_and_swap expanders.
6119 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
6120 operation. TARGET is an optional place to store the value result of
6121 the operation. ICODE is the particular instruction to expand. Return
6122 the result of the operation. */
6125 expand_val_compare_and_swap_1 (rtx mem, rtx old_val, rtx new_val,
6126 rtx target, enum insn_code icode)
6128 enum machine_mode mode = GET_MODE (mem);
6131 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6132 target = gen_reg_rtx (mode);
6134 if (GET_MODE (old_val) != VOIDmode && GET_MODE (old_val) != mode)
6135 old_val = convert_modes (mode, GET_MODE (old_val), old_val, 1);
6136 if (!insn_data[icode].operand[2].predicate (old_val, mode))
6137 old_val = force_reg (mode, old_val);
6139 if (GET_MODE (new_val) != VOIDmode && GET_MODE (new_val) != mode)
6140 new_val = convert_modes (mode, GET_MODE (new_val), new_val, 1);
6141 if (!insn_data[icode].operand[3].predicate (new_val, mode))
6142 new_val = force_reg (mode, new_val);
6144 insn = GEN_FCN (icode) (target, mem, old_val, new_val);
6145 if (insn == NULL_RTX)
6152 /* Expand a compare-and-swap operation and return its value. */
6155 expand_val_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
6157 enum machine_mode mode = GET_MODE (mem);
6158 enum insn_code icode = sync_compare_and_swap[mode];
6160 if (icode == CODE_FOR_nothing)
6163 return expand_val_compare_and_swap_1 (mem, old_val, new_val, target, icode);
6166 /* Expand a compare-and-swap operation and store true into the result if
6167 the operation was successful and false otherwise. Return the result.
6168 Unlike other routines, TARGET is not optional. */
6171 expand_bool_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
6173 enum machine_mode mode = GET_MODE (mem);
6174 enum insn_code icode;
6175 rtx subtarget, label0, label1;
6177 /* If the target supports a compare-and-swap pattern that simultaneously
6178 sets some flag for success, then use it. Otherwise use the regular
6179 compare-and-swap and follow that immediately with a compare insn. */
6180 icode = sync_compare_and_swap_cc[mode];
6184 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
6186 if (subtarget != NULL_RTX)
6190 case CODE_FOR_nothing:
6191 icode = sync_compare_and_swap[mode];
6192 if (icode == CODE_FOR_nothing)
6195 /* Ensure that if old_val == mem, that we're not comparing
6196 against an old value. */
6197 if (MEM_P (old_val))
6198 old_val = force_reg (mode, old_val);
6200 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
6202 if (subtarget == NULL_RTX)
6205 emit_cmp_insn (subtarget, old_val, EQ, const0_rtx, mode, true);
6208 /* If the target has a sane STORE_FLAG_VALUE, then go ahead and use a
6209 setcc instruction from the beginning. We don't work too hard here,
6210 but it's nice to not be stupid about initial code gen either. */
6211 if (STORE_FLAG_VALUE == 1)
6213 icode = setcc_gen_code[EQ];
6214 if (icode != CODE_FOR_nothing)
6216 enum machine_mode cmode = insn_data[icode].operand[0].mode;
6220 if (!insn_data[icode].operand[0].predicate (target, cmode))
6221 subtarget = gen_reg_rtx (cmode);
6223 insn = GEN_FCN (icode) (subtarget);
6227 if (GET_MODE (target) != GET_MODE (subtarget))
6229 convert_move (target, subtarget, 1);
6237 /* Without an appropriate setcc instruction, use a set of branches to
6238 get 1 and 0 stored into target. Presumably if the target has a
6239 STORE_FLAG_VALUE that isn't 1, then this will get cleaned up by ifcvt. */
6241 label0 = gen_label_rtx ();
6242 label1 = gen_label_rtx ();
6244 emit_jump_insn (bcc_gen_fctn[EQ] (label0));
6245 emit_move_insn (target, const0_rtx);
6246 emit_jump_insn (gen_jump (label1));
6248 emit_label (label0);
6249 emit_move_insn (target, const1_rtx);
6250 emit_label (label1);
6255 /* This is a helper function for the other atomic operations. This function
6256 emits a loop that contains SEQ that iterates until a compare-and-swap
6257 operation at the end succeeds. MEM is the memory to be modified. SEQ is
6258 a set of instructions that takes a value from OLD_REG as an input and
6259 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
6260 set to the current contents of MEM. After SEQ, a compare-and-swap will
6261 attempt to update MEM with NEW_REG. The function returns true when the
6262 loop was generated successfully. */
6265 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
6267 enum machine_mode mode = GET_MODE (mem);
6268 enum insn_code icode;
6269 rtx label, cmp_reg, subtarget;
6271 /* The loop we want to generate looks like
6277 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
6278 if (cmp_reg != old_reg)
6281 Note that we only do the plain load from memory once. Subsequent
6282 iterations use the value loaded by the compare-and-swap pattern. */
6284 label = gen_label_rtx ();
6285 cmp_reg = gen_reg_rtx (mode);
6287 emit_move_insn (cmp_reg, mem);
6289 emit_move_insn (old_reg, cmp_reg);
6293 /* If the target supports a compare-and-swap pattern that simultaneously
6294 sets some flag for success, then use it. Otherwise use the regular
6295 compare-and-swap and follow that immediately with a compare insn. */
6296 icode = sync_compare_and_swap_cc[mode];
6300 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
6302 if (subtarget != NULL_RTX)
6304 gcc_assert (subtarget == cmp_reg);
6309 case CODE_FOR_nothing:
6310 icode = sync_compare_and_swap[mode];
6311 if (icode == CODE_FOR_nothing)
6314 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
6316 if (subtarget == NULL_RTX)
6318 if (subtarget != cmp_reg)
6319 emit_move_insn (cmp_reg, subtarget);
6321 emit_cmp_insn (cmp_reg, old_reg, EQ, const0_rtx, mode, true);
6324 /* ??? Mark this jump predicted not taken? */
6325 emit_jump_insn (bcc_gen_fctn[NE] (label));
6330 /* This function generates the atomic operation MEM CODE= VAL. In this
6331 case, we do not care about any resulting value. Returns NULL if we
6332 cannot generate the operation. */
6335 expand_sync_operation (rtx mem, rtx val, enum rtx_code code)
6337 enum machine_mode mode = GET_MODE (mem);
6338 enum insn_code icode;
6341 /* Look to see if the target supports the operation directly. */
6345 icode = sync_add_optab[mode];
6348 icode = sync_ior_optab[mode];
6351 icode = sync_xor_optab[mode];
6354 icode = sync_and_optab[mode];
6357 icode = sync_nand_optab[mode];
6361 icode = sync_sub_optab[mode];
6362 if (icode == CODE_FOR_nothing)
6364 icode = sync_add_optab[mode];
6365 if (icode != CODE_FOR_nothing)
6367 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
6377 /* Generate the direct operation, if present. */
6378 if (icode != CODE_FOR_nothing)
6380 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6381 val = convert_modes (mode, GET_MODE (val), val, 1);
6382 if (!insn_data[icode].operand[1].predicate (val, mode))
6383 val = force_reg (mode, val);
6385 insn = GEN_FCN (icode) (mem, val);
6393 /* Failing that, generate a compare-and-swap loop in which we perform the
6394 operation with normal arithmetic instructions. */
6395 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6397 rtx t0 = gen_reg_rtx (mode), t1;
6404 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
6407 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
6408 true, OPTAB_LIB_WIDEN);
6410 insn = get_insns ();
6413 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
6420 /* This function generates the atomic operation MEM CODE= VAL. In this
6421 case, we do care about the resulting value: if AFTER is true then
6422 return the value MEM holds after the operation, if AFTER is false
6423 then return the value MEM holds before the operation. TARGET is an
6424 optional place for the result value to be stored. */
6427 expand_sync_fetch_operation (rtx mem, rtx val, enum rtx_code code,
6428 bool after, rtx target)
6430 enum machine_mode mode = GET_MODE (mem);
6431 enum insn_code old_code, new_code, icode;
6435 /* Look to see if the target supports the operation directly. */
6439 old_code = sync_old_add_optab[mode];
6440 new_code = sync_new_add_optab[mode];
6443 old_code = sync_old_ior_optab[mode];
6444 new_code = sync_new_ior_optab[mode];
6447 old_code = sync_old_xor_optab[mode];
6448 new_code = sync_new_xor_optab[mode];
6451 old_code = sync_old_and_optab[mode];
6452 new_code = sync_new_and_optab[mode];
6455 old_code = sync_old_nand_optab[mode];
6456 new_code = sync_new_nand_optab[mode];
6460 old_code = sync_old_sub_optab[mode];
6461 new_code = sync_new_sub_optab[mode];
6462 if (old_code == CODE_FOR_nothing && new_code == CODE_FOR_nothing)
6464 old_code = sync_old_add_optab[mode];
6465 new_code = sync_new_add_optab[mode];
6466 if (old_code != CODE_FOR_nothing || new_code != CODE_FOR_nothing)
6468 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
6478 /* If the target does supports the proper new/old operation, great. But
6479 if we only support the opposite old/new operation, check to see if we
6480 can compensate. In the case in which the old value is supported, then
6481 we can always perform the operation again with normal arithmetic. In
6482 the case in which the new value is supported, then we can only handle
6483 this in the case the operation is reversible. */
6488 if (icode == CODE_FOR_nothing)
6491 if (icode != CODE_FOR_nothing)
6498 if (icode == CODE_FOR_nothing
6499 && (code == PLUS || code == MINUS || code == XOR))
6502 if (icode != CODE_FOR_nothing)
6507 /* If we found something supported, great. */
6508 if (icode != CODE_FOR_nothing)
6510 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6511 target = gen_reg_rtx (mode);
6513 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6514 val = convert_modes (mode, GET_MODE (val), val, 1);
6515 if (!insn_data[icode].operand[2].predicate (val, mode))
6516 val = force_reg (mode, val);
6518 insn = GEN_FCN (icode) (target, mem, val);
6523 /* If we need to compensate for using an operation with the
6524 wrong return value, do so now. */
6531 else if (code == MINUS)
6536 target = expand_simple_unop (mode, NOT, target, NULL_RTX, true);
6537 target = expand_simple_binop (mode, code, target, val, NULL_RTX,
6538 true, OPTAB_LIB_WIDEN);
6545 /* Failing that, generate a compare-and-swap loop in which we perform the
6546 operation with normal arithmetic instructions. */
6547 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6549 rtx t0 = gen_reg_rtx (mode), t1;
6551 if (!target || !register_operand (target, mode))
6552 target = gen_reg_rtx (mode);
6557 emit_move_insn (target, t0);
6561 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
6564 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
6565 true, OPTAB_LIB_WIDEN);
6567 emit_move_insn (target, t1);
6569 insn = get_insns ();
6572 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
6579 /* This function expands a test-and-set operation. Ideally we atomically
6580 store VAL in MEM and return the previous value in MEM. Some targets
6581 may not support this operation and only support VAL with the constant 1;
6582 in this case while the return value will be 0/1, but the exact value
6583 stored in MEM is target defined. TARGET is an option place to stick
6584 the return value. */
6587 expand_sync_lock_test_and_set (rtx mem, rtx val, rtx target)
6589 enum machine_mode mode = GET_MODE (mem);
6590 enum insn_code icode;
6593 /* If the target supports the test-and-set directly, great. */
6594 icode = sync_lock_test_and_set[mode];
6595 if (icode != CODE_FOR_nothing)
6597 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6598 target = gen_reg_rtx (mode);
6600 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6601 val = convert_modes (mode, GET_MODE (val), val, 1);
6602 if (!insn_data[icode].operand[2].predicate (val, mode))
6603 val = force_reg (mode, val);
6605 insn = GEN_FCN (icode) (target, mem, val);
6613 /* Otherwise, use a compare-and-swap loop for the exchange. */
6614 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6616 if (!target || !register_operand (target, mode))
6617 target = gen_reg_rtx (mode);
6618 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6619 val = convert_modes (mode, GET_MODE (val), val, 1);
6620 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
6627 #include "gt-optabs.h"