1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
26 #include "coretypes.h"
30 /* Include insn-config.h before expr.h so that HAVE_conditional_move
31 is properly defined. */
32 #include "insn-config.h"
46 #include "basic-block.h"
49 /* Each optab contains info on how this target machine
50 can perform a particular operation
51 for all sizes and kinds of operands.
53 The operation to be performed is often specified
54 by passing one of these optabs as an argument.
56 See expr.h for documentation of these optabs. */
58 optab optab_table[OTI_MAX];
60 rtx libfunc_table[LTI_MAX];
62 /* Tables of patterns for converting one mode to another. */
63 convert_optab convert_optab_table[COI_MAX];
65 /* Contains the optab used for each rtx code. */
66 optab code_to_optab[NUM_RTX_CODE + 1];
68 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
69 gives the gen_function to make a branch to test that condition. */
71 rtxfun bcc_gen_fctn[NUM_RTX_CODE];
73 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
74 gives the insn code to make a store-condition insn
75 to test that condition. */
77 enum insn_code setcc_gen_code[NUM_RTX_CODE];
79 #ifdef HAVE_conditional_move
80 /* Indexed by the machine mode, gives the insn code to make a conditional
81 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
82 setcc_gen_code to cut down on the number of named patterns. Consider a day
83 when a lot more rtx codes are conditional (eg: for the ARM). */
85 enum insn_code movcc_gen_code[NUM_MACHINE_MODES];
88 /* Indexed by the machine mode, gives the insn code for vector conditional
91 enum insn_code vcond_gen_code[NUM_MACHINE_MODES];
92 enum insn_code vcondu_gen_code[NUM_MACHINE_MODES];
94 /* The insn generating function can not take an rtx_code argument.
95 TRAP_RTX is used as an rtx argument. Its code is replaced with
96 the code to be used in the trap insn and all other fields are ignored. */
97 static GTY(()) rtx trap_rtx;
99 static int add_equal_note (rtx, rtx, enum rtx_code, rtx, rtx);
100 static rtx widen_operand (rtx, enum machine_mode, enum machine_mode, int,
102 static void prepare_cmp_insn (rtx *, rtx *, enum rtx_code *, rtx,
103 enum machine_mode *, int *,
104 enum can_compare_purpose);
105 static enum insn_code can_fix_p (enum machine_mode, enum machine_mode, int,
107 static enum insn_code can_float_p (enum machine_mode, enum machine_mode, int);
108 static optab new_optab (void);
109 static convert_optab new_convert_optab (void);
110 static inline optab init_optab (enum rtx_code);
111 static inline optab init_optabv (enum rtx_code);
112 static inline convert_optab init_convert_optab (enum rtx_code);
113 static void init_libfuncs (optab, int, int, const char *, int);
114 static void init_integral_libfuncs (optab, const char *, int);
115 static void init_floating_libfuncs (optab, const char *, int);
116 static void init_interclass_conv_libfuncs (convert_optab, const char *,
117 enum mode_class, enum mode_class);
118 static void init_intraclass_conv_libfuncs (convert_optab, const char *,
119 enum mode_class, bool);
120 static void emit_cmp_and_jump_insn_1 (rtx, rtx, enum machine_mode,
121 enum rtx_code, int, rtx);
122 static void prepare_float_lib_cmp (rtx *, rtx *, enum rtx_code *,
123 enum machine_mode *, int *);
124 static rtx widen_clz (enum machine_mode, rtx, rtx);
125 static rtx expand_parity (enum machine_mode, rtx, rtx);
126 static enum rtx_code get_rtx_code (enum tree_code, bool);
127 static rtx vector_compare_rtx (tree, bool, enum insn_code);
129 /* Current libcall id. It doesn't matter what these are, as long
130 as they are unique to each libcall that is emitted. */
131 static HOST_WIDE_INT libcall_id = 0;
133 #ifndef HAVE_conditional_trap
134 #define HAVE_conditional_trap 0
135 #define gen_conditional_trap(a,b) (gcc_unreachable (), NULL_RTX)
138 /* Prefixes for the current version of decimal floating point (BID vs. DPD) */
139 #if ENABLE_DECIMAL_BID_FORMAT
140 #define DECIMAL_PREFIX "bid_"
142 #define DECIMAL_PREFIX "dpd_"
146 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
147 the result of operation CODE applied to OP0 (and OP1 if it is a binary
150 If the last insn does not set TARGET, don't do anything, but return 1.
152 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
153 don't add the REG_EQUAL note but return 0. Our caller can then try
154 again, ensuring that TARGET is not one of the operands. */
157 add_equal_note (rtx insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
159 rtx last_insn, insn, set;
162 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
164 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
165 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
166 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
167 && GET_RTX_CLASS (code) != RTX_COMPARE
168 && GET_RTX_CLASS (code) != RTX_UNARY)
171 if (GET_CODE (target) == ZERO_EXTRACT)
174 for (last_insn = insns;
175 NEXT_INSN (last_insn) != NULL_RTX;
176 last_insn = NEXT_INSN (last_insn))
179 set = single_set (last_insn);
183 if (! rtx_equal_p (SET_DEST (set), target)
184 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
185 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
186 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
189 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
190 besides the last insn. */
191 if (reg_overlap_mentioned_p (target, op0)
192 || (op1 && reg_overlap_mentioned_p (target, op1)))
194 insn = PREV_INSN (last_insn);
195 while (insn != NULL_RTX)
197 if (reg_set_p (target, insn))
200 insn = PREV_INSN (insn);
204 if (GET_RTX_CLASS (code) == RTX_UNARY)
205 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
207 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
209 set_unique_reg_note (last_insn, REG_EQUAL, note);
214 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
215 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
216 not actually do a sign-extend or zero-extend, but can leave the
217 higher-order bits of the result rtx undefined, for example, in the case
218 of logical operations, but not right shifts. */
221 widen_operand (rtx op, enum machine_mode mode, enum machine_mode oldmode,
222 int unsignedp, int no_extend)
226 /* If we don't have to extend and this is a constant, return it. */
227 if (no_extend && GET_MODE (op) == VOIDmode)
230 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
231 extend since it will be more efficient to do so unless the signedness of
232 a promoted object differs from our extension. */
234 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
235 && SUBREG_PROMOTED_UNSIGNED_P (op) == unsignedp))
236 return convert_modes (mode, oldmode, op, unsignedp);
238 /* If MODE is no wider than a single word, we return a paradoxical
240 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
241 return gen_rtx_SUBREG (mode, force_reg (GET_MODE (op), op), 0);
243 /* Otherwise, get an object of MODE, clobber it, and set the low-order
246 result = gen_reg_rtx (mode);
247 emit_insn (gen_rtx_CLOBBER (VOIDmode, result));
248 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
252 /* Return the optab used for computing the operation given by
253 the tree code, CODE. This function is not always usable (for
254 example, it cannot give complete results for multiplication
255 or division) but probably ought to be relied on more widely
256 throughout the expander. */
258 optab_for_tree_code (enum tree_code code, tree type)
270 return one_cmpl_optab;
279 return TYPE_UNSIGNED (type) ? umod_optab : smod_optab;
287 return TYPE_UNSIGNED (type) ? udiv_optab : sdiv_optab;
293 return TYPE_UNSIGNED (type) ? lshr_optab : ashr_optab;
302 return TYPE_UNSIGNED (type) ? umax_optab : smax_optab;
305 return TYPE_UNSIGNED (type) ? umin_optab : smin_optab;
307 case REALIGN_LOAD_EXPR:
308 return vec_realign_load_optab;
311 return TYPE_UNSIGNED (type) ? usum_widen_optab : ssum_widen_optab;
314 return TYPE_UNSIGNED (type) ? udot_prod_optab : sdot_prod_optab;
317 return TYPE_UNSIGNED (type) ? reduc_umax_optab : reduc_smax_optab;
320 return TYPE_UNSIGNED (type) ? reduc_umin_optab : reduc_smin_optab;
322 case REDUC_PLUS_EXPR:
323 return TYPE_UNSIGNED (type) ? reduc_uplus_optab : reduc_splus_optab;
325 case VEC_LSHIFT_EXPR:
326 return vec_shl_optab;
328 case VEC_RSHIFT_EXPR:
329 return vec_shr_optab;
331 case VEC_WIDEN_MULT_HI_EXPR:
332 return TYPE_UNSIGNED (type) ?
333 vec_widen_umult_hi_optab : vec_widen_smult_hi_optab;
335 case VEC_WIDEN_MULT_LO_EXPR:
336 return TYPE_UNSIGNED (type) ?
337 vec_widen_umult_lo_optab : vec_widen_smult_lo_optab;
339 case VEC_UNPACK_HI_EXPR:
340 return TYPE_UNSIGNED (type) ?
341 vec_unpacku_hi_optab : vec_unpacks_hi_optab;
343 case VEC_UNPACK_LO_EXPR:
344 return TYPE_UNSIGNED (type) ?
345 vec_unpacku_lo_optab : vec_unpacks_lo_optab;
347 case VEC_UNPACK_FLOAT_HI_EXPR:
348 /* The signedness is determined from input operand. */
349 return TYPE_UNSIGNED (type) ?
350 vec_unpacku_float_hi_optab : vec_unpacks_float_hi_optab;
352 case VEC_UNPACK_FLOAT_LO_EXPR:
353 /* The signedness is determined from input operand. */
354 return TYPE_UNSIGNED (type) ?
355 vec_unpacku_float_lo_optab : vec_unpacks_float_lo_optab;
357 case VEC_PACK_TRUNC_EXPR:
358 return vec_pack_trunc_optab;
360 case VEC_PACK_SAT_EXPR:
361 return TYPE_UNSIGNED (type) ? vec_pack_usat_optab : vec_pack_ssat_optab;
363 case VEC_PACK_FIX_TRUNC_EXPR:
364 /* The signedness is determined from output operand. */
365 return TYPE_UNSIGNED (type) ?
366 vec_pack_ufix_trunc_optab : vec_pack_sfix_trunc_optab;
372 trapv = INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type);
375 case POINTER_PLUS_EXPR:
377 return trapv ? addv_optab : add_optab;
380 return trapv ? subv_optab : sub_optab;
383 return trapv ? smulv_optab : smul_optab;
386 return trapv ? negv_optab : neg_optab;
389 return trapv ? absv_optab : abs_optab;
391 case VEC_EXTRACT_EVEN_EXPR:
392 return vec_extract_even_optab;
394 case VEC_EXTRACT_ODD_EXPR:
395 return vec_extract_odd_optab;
397 case VEC_INTERLEAVE_HIGH_EXPR:
398 return vec_interleave_high_optab;
400 case VEC_INTERLEAVE_LOW_EXPR:
401 return vec_interleave_low_optab;
409 /* Expand vector widening operations.
411 There are two different classes of operations handled here:
412 1) Operations whose result is wider than all the arguments to the operation.
413 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
414 In this case OP0 and optionally OP1 would be initialized,
415 but WIDE_OP wouldn't (not relevant for this case).
416 2) Operations whose result is of the same size as the last argument to the
417 operation, but wider than all the other arguments to the operation.
418 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
419 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
421 E.g, when called to expand the following operations, this is how
422 the arguments will be initialized:
424 widening-sum 2 oprnd0 - oprnd1
425 widening-dot-product 3 oprnd0 oprnd1 oprnd2
426 widening-mult 2 oprnd0 oprnd1 -
427 type-promotion (vec-unpack) 1 oprnd0 - - */
430 expand_widen_pattern_expr (tree exp, rtx op0, rtx op1, rtx wide_op, rtx target,
433 tree oprnd0, oprnd1, oprnd2;
434 enum machine_mode wmode = 0, tmode0, tmode1 = 0;
435 optab widen_pattern_optab;
437 enum machine_mode xmode0, xmode1 = 0, wxmode = 0;
440 rtx xop0, xop1, wxop;
441 int nops = TREE_OPERAND_LENGTH (exp);
443 oprnd0 = TREE_OPERAND (exp, 0);
444 tmode0 = TYPE_MODE (TREE_TYPE (oprnd0));
445 widen_pattern_optab =
446 optab_for_tree_code (TREE_CODE (exp), TREE_TYPE (oprnd0));
447 icode = (int) widen_pattern_optab->handlers[(int) tmode0].insn_code;
448 gcc_assert (icode != CODE_FOR_nothing);
449 xmode0 = insn_data[icode].operand[1].mode;
453 oprnd1 = TREE_OPERAND (exp, 1);
454 tmode1 = TYPE_MODE (TREE_TYPE (oprnd1));
455 xmode1 = insn_data[icode].operand[2].mode;
458 /* The last operand is of a wider mode than the rest of the operands. */
466 gcc_assert (tmode1 == tmode0);
468 oprnd2 = TREE_OPERAND (exp, 2);
469 wmode = TYPE_MODE (TREE_TYPE (oprnd2));
470 wxmode = insn_data[icode].operand[3].mode;
474 wmode = wxmode = insn_data[icode].operand[0].mode;
477 || ! (*insn_data[icode].operand[0].predicate) (target, wmode))
478 temp = gen_reg_rtx (wmode);
486 /* In case the insn wants input operands in modes different from
487 those of the actual operands, convert the operands. It would
488 seem that we don't need to convert CONST_INTs, but we do, so
489 that they're properly zero-extended, sign-extended or truncated
492 if (GET_MODE (op0) != xmode0 && xmode0 != VOIDmode)
493 xop0 = convert_modes (xmode0,
494 GET_MODE (op0) != VOIDmode
500 if (GET_MODE (op1) != xmode1 && xmode1 != VOIDmode)
501 xop1 = convert_modes (xmode1,
502 GET_MODE (op1) != VOIDmode
508 if (GET_MODE (wide_op) != wxmode && wxmode != VOIDmode)
509 wxop = convert_modes (wxmode,
510 GET_MODE (wide_op) != VOIDmode
515 /* Now, if insn's predicates don't allow our operands, put them into
518 if (! (*insn_data[icode].operand[1].predicate) (xop0, xmode0)
519 && xmode0 != VOIDmode)
520 xop0 = copy_to_mode_reg (xmode0, xop0);
524 if (! (*insn_data[icode].operand[2].predicate) (xop1, xmode1)
525 && xmode1 != VOIDmode)
526 xop1 = copy_to_mode_reg (xmode1, xop1);
530 if (! (*insn_data[icode].operand[3].predicate) (wxop, wxmode)
531 && wxmode != VOIDmode)
532 wxop = copy_to_mode_reg (wxmode, wxop);
534 pat = GEN_FCN (icode) (temp, xop0, xop1, wxop);
537 pat = GEN_FCN (icode) (temp, xop0, xop1);
543 if (! (*insn_data[icode].operand[2].predicate) (wxop, wxmode)
544 && wxmode != VOIDmode)
545 wxop = copy_to_mode_reg (wxmode, wxop);
547 pat = GEN_FCN (icode) (temp, xop0, wxop);
550 pat = GEN_FCN (icode) (temp, xop0);
557 /* Generate code to perform an operation specified by TERNARY_OPTAB
558 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
560 UNSIGNEDP is for the case where we have to widen the operands
561 to perform the operation. It says to use zero-extension.
563 If TARGET is nonzero, the value
564 is generated there, if it is convenient to do so.
565 In all cases an rtx is returned for the locus of the value;
566 this may or may not be TARGET. */
569 expand_ternary_op (enum machine_mode mode, optab ternary_optab, rtx op0,
570 rtx op1, rtx op2, rtx target, int unsignedp)
572 int icode = (int) ternary_optab->handlers[(int) mode].insn_code;
573 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
574 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
575 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
578 rtx xop0 = op0, xop1 = op1, xop2 = op2;
580 gcc_assert (ternary_optab->handlers[(int) mode].insn_code
581 != CODE_FOR_nothing);
583 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
584 temp = gen_reg_rtx (mode);
588 /* In case the insn wants input operands in modes different from
589 those of the actual operands, convert the operands. It would
590 seem that we don't need to convert CONST_INTs, but we do, so
591 that they're properly zero-extended, sign-extended or truncated
594 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
595 xop0 = convert_modes (mode0,
596 GET_MODE (op0) != VOIDmode
601 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
602 xop1 = convert_modes (mode1,
603 GET_MODE (op1) != VOIDmode
608 if (GET_MODE (op2) != mode2 && mode2 != VOIDmode)
609 xop2 = convert_modes (mode2,
610 GET_MODE (op2) != VOIDmode
615 /* Now, if insn's predicates don't allow our operands, put them into
618 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
619 && mode0 != VOIDmode)
620 xop0 = copy_to_mode_reg (mode0, xop0);
622 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
623 && mode1 != VOIDmode)
624 xop1 = copy_to_mode_reg (mode1, xop1);
626 if (!insn_data[icode].operand[3].predicate (xop2, mode2)
627 && mode2 != VOIDmode)
628 xop2 = copy_to_mode_reg (mode2, xop2);
630 pat = GEN_FCN (icode) (temp, xop0, xop1, xop2);
637 /* Like expand_binop, but return a constant rtx if the result can be
638 calculated at compile time. The arguments and return value are
639 otherwise the same as for expand_binop. */
642 simplify_expand_binop (enum machine_mode mode, optab binoptab,
643 rtx op0, rtx op1, rtx target, int unsignedp,
644 enum optab_methods methods)
646 if (CONSTANT_P (op0) && CONSTANT_P (op1))
648 rtx x = simplify_binary_operation (binoptab->code, mode, op0, op1);
654 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
657 /* Like simplify_expand_binop, but always put the result in TARGET.
658 Return true if the expansion succeeded. */
661 force_expand_binop (enum machine_mode mode, optab binoptab,
662 rtx op0, rtx op1, rtx target, int unsignedp,
663 enum optab_methods methods)
665 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
666 target, unsignedp, methods);
670 emit_move_insn (target, x);
674 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
677 expand_vec_shift_expr (tree vec_shift_expr, rtx target)
679 enum insn_code icode;
680 rtx rtx_op1, rtx_op2;
681 enum machine_mode mode1;
682 enum machine_mode mode2;
683 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_shift_expr));
684 tree vec_oprnd = TREE_OPERAND (vec_shift_expr, 0);
685 tree shift_oprnd = TREE_OPERAND (vec_shift_expr, 1);
689 switch (TREE_CODE (vec_shift_expr))
691 case VEC_RSHIFT_EXPR:
692 shift_optab = vec_shr_optab;
694 case VEC_LSHIFT_EXPR:
695 shift_optab = vec_shl_optab;
701 icode = (int) shift_optab->handlers[(int) mode].insn_code;
702 gcc_assert (icode != CODE_FOR_nothing);
704 mode1 = insn_data[icode].operand[1].mode;
705 mode2 = insn_data[icode].operand[2].mode;
707 rtx_op1 = expand_normal (vec_oprnd);
708 if (!(*insn_data[icode].operand[1].predicate) (rtx_op1, mode1)
709 && mode1 != VOIDmode)
710 rtx_op1 = force_reg (mode1, rtx_op1);
712 rtx_op2 = expand_normal (shift_oprnd);
713 if (!(*insn_data[icode].operand[2].predicate) (rtx_op2, mode2)
714 && mode2 != VOIDmode)
715 rtx_op2 = force_reg (mode2, rtx_op2);
718 || ! (*insn_data[icode].operand[0].predicate) (target, mode))
719 target = gen_reg_rtx (mode);
721 /* Emit instruction */
722 pat = GEN_FCN (icode) (target, rtx_op1, rtx_op2);
729 /* This subroutine of expand_doubleword_shift handles the cases in which
730 the effective shift value is >= BITS_PER_WORD. The arguments and return
731 value are the same as for the parent routine, except that SUPERWORD_OP1
732 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
733 INTO_TARGET may be null if the caller has decided to calculate it. */
736 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
737 rtx outof_target, rtx into_target,
738 int unsignedp, enum optab_methods methods)
740 if (into_target != 0)
741 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
742 into_target, unsignedp, methods))
745 if (outof_target != 0)
747 /* For a signed right shift, we must fill OUTOF_TARGET with copies
748 of the sign bit, otherwise we must fill it with zeros. */
749 if (binoptab != ashr_optab)
750 emit_move_insn (outof_target, CONST0_RTX (word_mode));
752 if (!force_expand_binop (word_mode, binoptab,
753 outof_input, GEN_INT (BITS_PER_WORD - 1),
754 outof_target, unsignedp, methods))
760 /* This subroutine of expand_doubleword_shift handles the cases in which
761 the effective shift value is < BITS_PER_WORD. The arguments and return
762 value are the same as for the parent routine. */
765 expand_subword_shift (enum machine_mode op1_mode, optab binoptab,
766 rtx outof_input, rtx into_input, rtx op1,
767 rtx outof_target, rtx into_target,
768 int unsignedp, enum optab_methods methods,
769 unsigned HOST_WIDE_INT shift_mask)
771 optab reverse_unsigned_shift, unsigned_shift;
774 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
775 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
777 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
778 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
779 the opposite direction to BINOPTAB. */
780 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
782 carries = outof_input;
783 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
784 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
789 /* We must avoid shifting by BITS_PER_WORD bits since that is either
790 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
791 has unknown behavior. Do a single shift first, then shift by the
792 remainder. It's OK to use ~OP1 as the remainder if shift counts
793 are truncated to the mode size. */
794 carries = expand_binop (word_mode, reverse_unsigned_shift,
795 outof_input, const1_rtx, 0, unsignedp, methods);
796 if (shift_mask == BITS_PER_WORD - 1)
798 tmp = immed_double_const (-1, -1, op1_mode);
799 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
804 tmp = immed_double_const (BITS_PER_WORD - 1, 0, op1_mode);
805 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
809 if (tmp == 0 || carries == 0)
811 carries = expand_binop (word_mode, reverse_unsigned_shift,
812 carries, tmp, 0, unsignedp, methods);
816 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
817 so the result can go directly into INTO_TARGET if convenient. */
818 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
819 into_target, unsignedp, methods);
823 /* Now OR in the bits carried over from OUTOF_INPUT. */
824 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
825 into_target, unsignedp, methods))
828 /* Use a standard word_mode shift for the out-of half. */
829 if (outof_target != 0)
830 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
831 outof_target, unsignedp, methods))
838 #ifdef HAVE_conditional_move
839 /* Try implementing expand_doubleword_shift using conditional moves.
840 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
841 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
842 are the shift counts to use in the former and latter case. All other
843 arguments are the same as the parent routine. */
846 expand_doubleword_shift_condmove (enum machine_mode op1_mode, optab binoptab,
847 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
848 rtx outof_input, rtx into_input,
849 rtx subword_op1, rtx superword_op1,
850 rtx outof_target, rtx into_target,
851 int unsignedp, enum optab_methods methods,
852 unsigned HOST_WIDE_INT shift_mask)
854 rtx outof_superword, into_superword;
856 /* Put the superword version of the output into OUTOF_SUPERWORD and
858 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
859 if (outof_target != 0 && subword_op1 == superword_op1)
861 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
862 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
863 into_superword = outof_target;
864 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
865 outof_superword, 0, unsignedp, methods))
870 into_superword = gen_reg_rtx (word_mode);
871 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
872 outof_superword, into_superword,
877 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
878 if (!expand_subword_shift (op1_mode, binoptab,
879 outof_input, into_input, subword_op1,
880 outof_target, into_target,
881 unsignedp, methods, shift_mask))
884 /* Select between them. Do the INTO half first because INTO_SUPERWORD
885 might be the current value of OUTOF_TARGET. */
886 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
887 into_target, into_superword, word_mode, false))
890 if (outof_target != 0)
891 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
892 outof_target, outof_superword,
900 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
901 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
902 input operand; the shift moves bits in the direction OUTOF_INPUT->
903 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
904 of the target. OP1 is the shift count and OP1_MODE is its mode.
905 If OP1 is constant, it will have been truncated as appropriate
906 and is known to be nonzero.
908 If SHIFT_MASK is zero, the result of word shifts is undefined when the
909 shift count is outside the range [0, BITS_PER_WORD). This routine must
910 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
912 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
913 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
914 fill with zeros or sign bits as appropriate.
916 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
917 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
918 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
919 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
922 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
923 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
924 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
925 function wants to calculate it itself.
927 Return true if the shift could be successfully synthesized. */
930 expand_doubleword_shift (enum machine_mode op1_mode, optab binoptab,
931 rtx outof_input, rtx into_input, rtx op1,
932 rtx outof_target, rtx into_target,
933 int unsignedp, enum optab_methods methods,
934 unsigned HOST_WIDE_INT shift_mask)
936 rtx superword_op1, tmp, cmp1, cmp2;
937 rtx subword_label, done_label;
938 enum rtx_code cmp_code;
940 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
941 fill the result with sign or zero bits as appropriate. If so, the value
942 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
943 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
944 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
946 This isn't worthwhile for constant shifts since the optimizers will
947 cope better with in-range shift counts. */
948 if (shift_mask >= BITS_PER_WORD
950 && !CONSTANT_P (op1))
952 if (!expand_doubleword_shift (op1_mode, binoptab,
953 outof_input, into_input, op1,
955 unsignedp, methods, shift_mask))
957 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
958 outof_target, unsignedp, methods))
963 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
964 is true when the effective shift value is less than BITS_PER_WORD.
965 Set SUPERWORD_OP1 to the shift count that should be used to shift
966 OUTOF_INPUT into INTO_TARGET when the condition is false. */
967 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
968 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
970 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
971 is a subword shift count. */
972 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
974 cmp2 = CONST0_RTX (op1_mode);
980 /* Set CMP1 to OP1 - BITS_PER_WORD. */
981 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
983 cmp2 = CONST0_RTX (op1_mode);
985 superword_op1 = cmp1;
990 /* If we can compute the condition at compile time, pick the
991 appropriate subroutine. */
992 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
993 if (tmp != 0 && GET_CODE (tmp) == CONST_INT)
995 if (tmp == const0_rtx)
996 return expand_superword_shift (binoptab, outof_input, superword_op1,
997 outof_target, into_target,
1000 return expand_subword_shift (op1_mode, binoptab,
1001 outof_input, into_input, op1,
1002 outof_target, into_target,
1003 unsignedp, methods, shift_mask);
1006 #ifdef HAVE_conditional_move
1007 /* Try using conditional moves to generate straight-line code. */
1009 rtx start = get_last_insn ();
1010 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
1011 cmp_code, cmp1, cmp2,
1012 outof_input, into_input,
1014 outof_target, into_target,
1015 unsignedp, methods, shift_mask))
1017 delete_insns_since (start);
1021 /* As a last resort, use branches to select the correct alternative. */
1022 subword_label = gen_label_rtx ();
1023 done_label = gen_label_rtx ();
1026 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
1027 0, 0, subword_label);
1030 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
1031 outof_target, into_target,
1032 unsignedp, methods))
1035 emit_jump_insn (gen_jump (done_label));
1037 emit_label (subword_label);
1039 if (!expand_subword_shift (op1_mode, binoptab,
1040 outof_input, into_input, op1,
1041 outof_target, into_target,
1042 unsignedp, methods, shift_mask))
1045 emit_label (done_label);
1049 /* Subroutine of expand_binop. Perform a double word multiplication of
1050 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
1051 as the target's word_mode. This function return NULL_RTX if anything
1052 goes wrong, in which case it may have already emitted instructions
1053 which need to be deleted.
1055 If we want to multiply two two-word values and have normal and widening
1056 multiplies of single-word values, we can do this with three smaller
1057 multiplications. Note that we do not make a REG_NO_CONFLICT block here
1058 because we are not operating on one word at a time.
1060 The multiplication proceeds as follows:
1061 _______________________
1062 [__op0_high_|__op0_low__]
1063 _______________________
1064 * [__op1_high_|__op1_low__]
1065 _______________________________________________
1066 _______________________
1067 (1) [__op0_low__*__op1_low__]
1068 _______________________
1069 (2a) [__op0_low__*__op1_high_]
1070 _______________________
1071 (2b) [__op0_high_*__op1_low__]
1072 _______________________
1073 (3) [__op0_high_*__op1_high_]
1076 This gives a 4-word result. Since we are only interested in the
1077 lower 2 words, partial result (3) and the upper words of (2a) and
1078 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1079 calculated using non-widening multiplication.
1081 (1), however, needs to be calculated with an unsigned widening
1082 multiplication. If this operation is not directly supported we
1083 try using a signed widening multiplication and adjust the result.
1084 This adjustment works as follows:
1086 If both operands are positive then no adjustment is needed.
1088 If the operands have different signs, for example op0_low < 0 and
1089 op1_low >= 0, the instruction treats the most significant bit of
1090 op0_low as a sign bit instead of a bit with significance
1091 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1092 with 2**BITS_PER_WORD - op0_low, and two's complements the
1093 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1096 Similarly, if both operands are negative, we need to add
1097 (op0_low + op1_low) * 2**BITS_PER_WORD.
1099 We use a trick to adjust quickly. We logically shift op0_low right
1100 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1101 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1102 logical shift exists, we do an arithmetic right shift and subtract
1106 expand_doubleword_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
1107 bool umulp, enum optab_methods methods)
1109 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
1110 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
1111 rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
1112 rtx product, adjust, product_high, temp;
1114 rtx op0_high = operand_subword_force (op0, high, mode);
1115 rtx op0_low = operand_subword_force (op0, low, mode);
1116 rtx op1_high = operand_subword_force (op1, high, mode);
1117 rtx op1_low = operand_subword_force (op1, low, mode);
1119 /* If we're using an unsigned multiply to directly compute the product
1120 of the low-order words of the operands and perform any required
1121 adjustments of the operands, we begin by trying two more multiplications
1122 and then computing the appropriate sum.
1124 We have checked above that the required addition is provided.
1125 Full-word addition will normally always succeed, especially if
1126 it is provided at all, so we don't worry about its failure. The
1127 multiplication may well fail, however, so we do handle that. */
1131 /* ??? This could be done with emit_store_flag where available. */
1132 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
1133 NULL_RTX, 1, methods);
1135 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
1136 NULL_RTX, 0, OPTAB_DIRECT);
1139 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
1140 NULL_RTX, 0, methods);
1143 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
1144 NULL_RTX, 0, OPTAB_DIRECT);
1151 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
1152 NULL_RTX, 0, OPTAB_DIRECT);
1156 /* OP0_HIGH should now be dead. */
1160 /* ??? This could be done with emit_store_flag where available. */
1161 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
1162 NULL_RTX, 1, methods);
1164 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
1165 NULL_RTX, 0, OPTAB_DIRECT);
1168 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
1169 NULL_RTX, 0, methods);
1172 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
1173 NULL_RTX, 0, OPTAB_DIRECT);
1180 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
1181 NULL_RTX, 0, OPTAB_DIRECT);
1185 /* OP1_HIGH should now be dead. */
1187 adjust = expand_binop (word_mode, add_optab, adjust, temp,
1188 adjust, 0, OPTAB_DIRECT);
1190 if (target && !REG_P (target))
1194 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
1195 target, 1, OPTAB_DIRECT);
1197 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
1198 target, 1, OPTAB_DIRECT);
1203 product_high = operand_subword (product, high, 1, mode);
1204 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
1205 REG_P (product_high) ? product_high : adjust,
1207 emit_move_insn (product_high, adjust);
1211 /* Wrapper around expand_binop which takes an rtx code to specify
1212 the operation to perform, not an optab pointer. All other
1213 arguments are the same. */
1215 expand_simple_binop (enum machine_mode mode, enum rtx_code code, rtx op0,
1216 rtx op1, rtx target, int unsignedp,
1217 enum optab_methods methods)
1219 optab binop = code_to_optab[(int) code];
1222 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
1225 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1226 binop. Order them according to commutative_operand_precedence and, if
1227 possible, try to put TARGET or a pseudo first. */
1229 swap_commutative_operands_with_target (rtx target, rtx op0, rtx op1)
1231 int op0_prec = commutative_operand_precedence (op0);
1232 int op1_prec = commutative_operand_precedence (op1);
1234 if (op0_prec < op1_prec)
1237 if (op0_prec > op1_prec)
1240 /* With equal precedence, both orders are ok, but it is better if the
1241 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
1242 if (target == 0 || REG_P (target))
1243 return (REG_P (op1) && !REG_P (op0)) || target == op1;
1245 return rtx_equal_p (op1, target);
1249 /* Helper function for expand_binop: handle the case where there
1250 is an insn that directly implements the indicated operation.
1251 Returns null if this is not possible. */
1253 expand_binop_directly (enum machine_mode mode, optab binoptab,
1255 rtx target, int unsignedp, enum optab_methods methods,
1256 int commutative_op, rtx last)
1258 int icode = (int) binoptab->handlers[(int) mode].insn_code;
1259 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
1260 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
1261 enum machine_mode tmp_mode;
1263 rtx xop0 = op0, xop1 = op1;
1269 temp = gen_reg_rtx (mode);
1271 /* If it is a commutative operator and the modes would match
1272 if we would swap the operands, we can save the conversions. */
1275 if (GET_MODE (op0) != mode0 && GET_MODE (op1) != mode1
1276 && GET_MODE (op0) == mode1 && GET_MODE (op1) == mode0)
1280 tmp = op0; op0 = op1; op1 = tmp;
1281 tmp = xop0; xop0 = xop1; xop1 = tmp;
1285 /* In case the insn wants input operands in modes different from
1286 those of the actual operands, convert the operands. It would
1287 seem that we don't need to convert CONST_INTs, but we do, so
1288 that they're properly zero-extended, sign-extended or truncated
1291 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
1292 xop0 = convert_modes (mode0,
1293 GET_MODE (op0) != VOIDmode
1298 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
1299 xop1 = convert_modes (mode1,
1300 GET_MODE (op1) != VOIDmode
1305 /* Now, if insn's predicates don't allow our operands, put them into
1308 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
1309 && mode0 != VOIDmode)
1310 xop0 = copy_to_mode_reg (mode0, xop0);
1312 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
1313 && mode1 != VOIDmode)
1314 xop1 = copy_to_mode_reg (mode1, xop1);
1316 if (binoptab == vec_pack_trunc_optab
1317 || binoptab == vec_pack_usat_optab
1318 || binoptab == vec_pack_ssat_optab
1319 || binoptab == vec_pack_ufix_trunc_optab
1320 || binoptab == vec_pack_sfix_trunc_optab)
1322 /* The mode of the result is different then the mode of the
1324 tmp_mode = insn_data[icode].operand[0].mode;
1325 if (GET_MODE_NUNITS (tmp_mode) != 2 * GET_MODE_NUNITS (mode))
1331 if (!insn_data[icode].operand[0].predicate (temp, tmp_mode))
1332 temp = gen_reg_rtx (tmp_mode);
1334 pat = GEN_FCN (icode) (temp, xop0, xop1);
1337 /* If PAT is composed of more than one insn, try to add an appropriate
1338 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1339 operand, call expand_binop again, this time without a target. */
1340 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1341 && ! add_equal_note (pat, temp, binoptab->code, xop0, xop1))
1343 delete_insns_since (last);
1344 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1345 unsignedp, methods);
1352 delete_insns_since (last);
1356 /* Generate code to perform an operation specified by BINOPTAB
1357 on operands OP0 and OP1, with result having machine-mode MODE.
1359 UNSIGNEDP is for the case where we have to widen the operands
1360 to perform the operation. It says to use zero-extension.
1362 If TARGET is nonzero, the value
1363 is generated there, if it is convenient to do so.
1364 In all cases an rtx is returned for the locus of the value;
1365 this may or may not be TARGET. */
1368 expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1,
1369 rtx target, int unsignedp, enum optab_methods methods)
1371 enum optab_methods next_methods
1372 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1373 ? OPTAB_WIDEN : methods);
1374 enum mode_class class;
1375 enum machine_mode wider_mode;
1377 int commutative_op = 0;
1378 int shift_op = (binoptab->code == ASHIFT
1379 || binoptab->code == ASHIFTRT
1380 || binoptab->code == LSHIFTRT
1381 || binoptab->code == ROTATE
1382 || binoptab->code == ROTATERT);
1383 rtx entry_last = get_last_insn ();
1386 class = GET_MODE_CLASS (mode);
1388 /* If subtracting an integer constant, convert this into an addition of
1389 the negated constant. */
1391 if (binoptab == sub_optab && GET_CODE (op1) == CONST_INT)
1393 op1 = negate_rtx (mode, op1);
1394 binoptab = add_optab;
1397 /* If we are inside an appropriately-short loop and we are optimizing,
1398 force expensive constants into a register. */
1399 if (CONSTANT_P (op0) && optimize
1400 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
1402 if (GET_MODE (op0) != VOIDmode)
1403 op0 = convert_modes (mode, VOIDmode, op0, unsignedp);
1404 op0 = force_reg (mode, op0);
1407 if (CONSTANT_P (op1) && optimize
1408 && ! shift_op && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
1410 if (GET_MODE (op1) != VOIDmode)
1411 op1 = convert_modes (mode, VOIDmode, op1, unsignedp);
1412 op1 = force_reg (mode, op1);
1415 /* Record where to delete back to if we backtrack. */
1416 last = get_last_insn ();
1418 /* If operation is commutative,
1419 try to make the first operand a register.
1420 Even better, try to make it the same as the target.
1421 Also try to make the last operand a constant. */
1422 if (GET_RTX_CLASS (binoptab->code) == RTX_COMM_ARITH
1423 || binoptab == smul_widen_optab
1424 || binoptab == umul_widen_optab
1425 || binoptab == smul_highpart_optab
1426 || binoptab == umul_highpart_optab)
1430 if (swap_commutative_operands_with_target (target, op0, op1))
1438 /* If we can do it with a three-operand insn, do so. */
1440 if (methods != OPTAB_MUST_WIDEN
1441 && binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1443 temp = expand_binop_directly (mode, binoptab, op0, op1, target,
1444 unsignedp, methods, commutative_op, last);
1449 /* If we were trying to rotate, and that didn't work, try rotating
1450 the other direction before falling back to shifts and bitwise-or. */
1451 if (((binoptab == rotl_optab
1452 && rotr_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1453 || (binoptab == rotr_optab
1454 && rotl_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing))
1455 && class == MODE_INT)
1457 optab otheroptab = (binoptab == rotl_optab ? rotr_optab : rotl_optab);
1459 unsigned int bits = GET_MODE_BITSIZE (mode);
1461 if (GET_CODE (op1) == CONST_INT)
1462 newop1 = GEN_INT (bits - INTVAL (op1));
1463 else if (targetm.shift_truncation_mask (mode) == bits - 1)
1464 newop1 = negate_rtx (mode, op1);
1466 newop1 = expand_binop (mode, sub_optab,
1467 GEN_INT (bits), op1,
1468 NULL_RTX, unsignedp, OPTAB_DIRECT);
1470 temp = expand_binop_directly (mode, otheroptab, op0, newop1,
1471 target, unsignedp, methods,
1472 commutative_op, last);
1477 /* If this is a multiply, see if we can do a widening operation that
1478 takes operands of this mode and makes a wider mode. */
1480 if (binoptab == smul_optab
1481 && GET_MODE_WIDER_MODE (mode) != VOIDmode
1482 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1483 ->handlers[(int) GET_MODE_WIDER_MODE (mode)].insn_code)
1484 != CODE_FOR_nothing))
1486 temp = expand_binop (GET_MODE_WIDER_MODE (mode),
1487 unsignedp ? umul_widen_optab : smul_widen_optab,
1488 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1492 if (GET_MODE_CLASS (mode) == MODE_INT
1493 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1494 GET_MODE_BITSIZE (GET_MODE (temp))))
1495 return gen_lowpart (mode, temp);
1497 return convert_to_mode (mode, temp, unsignedp);
1501 /* Look for a wider mode of the same class for which we think we
1502 can open-code the operation. Check for a widening multiply at the
1503 wider mode as well. */
1505 if (CLASS_HAS_WIDER_MODES_P (class)
1506 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1507 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1508 wider_mode != VOIDmode;
1509 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1511 if (binoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
1512 || (binoptab == smul_optab
1513 && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
1514 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1515 ->handlers[(int) GET_MODE_WIDER_MODE (wider_mode)].insn_code)
1516 != CODE_FOR_nothing)))
1518 rtx xop0 = op0, xop1 = op1;
1521 /* For certain integer operations, we need not actually extend
1522 the narrow operands, as long as we will truncate
1523 the results to the same narrowness. */
1525 if ((binoptab == ior_optab || binoptab == and_optab
1526 || binoptab == xor_optab
1527 || binoptab == add_optab || binoptab == sub_optab
1528 || binoptab == smul_optab || binoptab == ashl_optab)
1529 && class == MODE_INT)
1532 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1534 /* The second operand of a shift must always be extended. */
1535 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1536 no_extend && binoptab != ashl_optab);
1538 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1539 unsignedp, OPTAB_DIRECT);
1542 if (class != MODE_INT
1543 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1544 GET_MODE_BITSIZE (wider_mode)))
1547 target = gen_reg_rtx (mode);
1548 convert_move (target, temp, 0);
1552 return gen_lowpart (mode, temp);
1555 delete_insns_since (last);
1559 /* These can be done a word at a time. */
1560 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1561 && class == MODE_INT
1562 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
1563 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1569 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1570 won't be accurate, so use a new target. */
1571 if (target == 0 || target == op0 || target == op1)
1572 target = gen_reg_rtx (mode);
1576 /* Do the actual arithmetic. */
1577 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
1579 rtx target_piece = operand_subword (target, i, 1, mode);
1580 rtx x = expand_binop (word_mode, binoptab,
1581 operand_subword_force (op0, i, mode),
1582 operand_subword_force (op1, i, mode),
1583 target_piece, unsignedp, next_methods);
1588 if (target_piece != x)
1589 emit_move_insn (target_piece, x);
1592 insns = get_insns ();
1595 if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
1597 if (binoptab->code != UNKNOWN)
1599 = gen_rtx_fmt_ee (binoptab->code, mode,
1600 copy_rtx (op0), copy_rtx (op1));
1604 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1609 /* Synthesize double word shifts from single word shifts. */
1610 if ((binoptab == lshr_optab || binoptab == ashl_optab
1611 || binoptab == ashr_optab)
1612 && class == MODE_INT
1613 && (GET_CODE (op1) == CONST_INT || !optimize_size)
1614 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1615 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1616 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1617 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1619 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1620 enum machine_mode op1_mode;
1622 double_shift_mask = targetm.shift_truncation_mask (mode);
1623 shift_mask = targetm.shift_truncation_mask (word_mode);
1624 op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode;
1626 /* Apply the truncation to constant shifts. */
1627 if (double_shift_mask > 0 && GET_CODE (op1) == CONST_INT)
1628 op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
1630 if (op1 == CONST0_RTX (op1_mode))
1633 /* Make sure that this is a combination that expand_doubleword_shift
1634 can handle. See the comments there for details. */
1635 if (double_shift_mask == 0
1636 || (shift_mask == BITS_PER_WORD - 1
1637 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1639 rtx insns, equiv_value;
1640 rtx into_target, outof_target;
1641 rtx into_input, outof_input;
1642 int left_shift, outof_word;
1644 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1645 won't be accurate, so use a new target. */
1646 if (target == 0 || target == op0 || target == op1)
1647 target = gen_reg_rtx (mode);
1651 /* OUTOF_* is the word we are shifting bits away from, and
1652 INTO_* is the word that we are shifting bits towards, thus
1653 they differ depending on the direction of the shift and
1654 WORDS_BIG_ENDIAN. */
1656 left_shift = binoptab == ashl_optab;
1657 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1659 outof_target = operand_subword (target, outof_word, 1, mode);
1660 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1662 outof_input = operand_subword_force (op0, outof_word, mode);
1663 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1665 if (expand_doubleword_shift (op1_mode, binoptab,
1666 outof_input, into_input, op1,
1667 outof_target, into_target,
1668 unsignedp, next_methods, shift_mask))
1670 insns = get_insns ();
1673 equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, op0, op1);
1674 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1681 /* Synthesize double word rotates from single word shifts. */
1682 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1683 && class == MODE_INT
1684 && GET_CODE (op1) == CONST_INT
1685 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1686 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1687 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1690 rtx into_target, outof_target;
1691 rtx into_input, outof_input;
1693 int shift_count, left_shift, outof_word;
1695 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1696 won't be accurate, so use a new target. Do this also if target is not
1697 a REG, first because having a register instead may open optimization
1698 opportunities, and second because if target and op0 happen to be MEMs
1699 designating the same location, we would risk clobbering it too early
1700 in the code sequence we generate below. */
1701 if (target == 0 || target == op0 || target == op1 || ! REG_P (target))
1702 target = gen_reg_rtx (mode);
1706 shift_count = INTVAL (op1);
1708 /* OUTOF_* is the word we are shifting bits away from, and
1709 INTO_* is the word that we are shifting bits towards, thus
1710 they differ depending on the direction of the shift and
1711 WORDS_BIG_ENDIAN. */
1713 left_shift = (binoptab == rotl_optab);
1714 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1716 outof_target = operand_subword (target, outof_word, 1, mode);
1717 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1719 outof_input = operand_subword_force (op0, outof_word, mode);
1720 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1722 if (shift_count == BITS_PER_WORD)
1724 /* This is just a word swap. */
1725 emit_move_insn (outof_target, into_input);
1726 emit_move_insn (into_target, outof_input);
1731 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1732 rtx first_shift_count, second_shift_count;
1733 optab reverse_unsigned_shift, unsigned_shift;
1735 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1736 ? lshr_optab : ashl_optab);
1738 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1739 ? ashl_optab : lshr_optab);
1741 if (shift_count > BITS_PER_WORD)
1743 first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1744 second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1748 first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1749 second_shift_count = GEN_INT (shift_count);
1752 into_temp1 = expand_binop (word_mode, unsigned_shift,
1753 outof_input, first_shift_count,
1754 NULL_RTX, unsignedp, next_methods);
1755 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1756 into_input, second_shift_count,
1757 NULL_RTX, unsignedp, next_methods);
1759 if (into_temp1 != 0 && into_temp2 != 0)
1760 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1761 into_target, unsignedp, next_methods);
1765 if (inter != 0 && inter != into_target)
1766 emit_move_insn (into_target, inter);
1768 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1769 into_input, first_shift_count,
1770 NULL_RTX, unsignedp, next_methods);
1771 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1772 outof_input, second_shift_count,
1773 NULL_RTX, unsignedp, next_methods);
1775 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1776 inter = expand_binop (word_mode, ior_optab,
1777 outof_temp1, outof_temp2,
1778 outof_target, unsignedp, next_methods);
1780 if (inter != 0 && inter != outof_target)
1781 emit_move_insn (outof_target, inter);
1784 insns = get_insns ();
1789 /* One may be tempted to wrap the insns in a REG_NO_CONFLICT
1790 block to help the register allocator a bit. But a multi-word
1791 rotate will need all the input bits when setting the output
1792 bits, so there clearly is a conflict between the input and
1793 output registers. So we can't use a no-conflict block here. */
1799 /* These can be done a word at a time by propagating carries. */
1800 if ((binoptab == add_optab || binoptab == sub_optab)
1801 && class == MODE_INT
1802 && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
1803 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1806 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1807 const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
1808 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1809 rtx xop0, xop1, xtarget;
1811 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1812 value is one of those, use it. Otherwise, use 1 since it is the
1813 one easiest to get. */
1814 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1815 int normalizep = STORE_FLAG_VALUE;
1820 /* Prepare the operands. */
1821 xop0 = force_reg (mode, op0);
1822 xop1 = force_reg (mode, op1);
1824 xtarget = gen_reg_rtx (mode);
1826 if (target == 0 || !REG_P (target))
1829 /* Indicate for flow that the entire target reg is being set. */
1831 emit_insn (gen_rtx_CLOBBER (VOIDmode, xtarget));
1833 /* Do the actual arithmetic. */
1834 for (i = 0; i < nwords; i++)
1836 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1837 rtx target_piece = operand_subword (xtarget, index, 1, mode);
1838 rtx op0_piece = operand_subword_force (xop0, index, mode);
1839 rtx op1_piece = operand_subword_force (xop1, index, mode);
1842 /* Main add/subtract of the input operands. */
1843 x = expand_binop (word_mode, binoptab,
1844 op0_piece, op1_piece,
1845 target_piece, unsignedp, next_methods);
1851 /* Store carry from main add/subtract. */
1852 carry_out = gen_reg_rtx (word_mode);
1853 carry_out = emit_store_flag_force (carry_out,
1854 (binoptab == add_optab
1857 word_mode, 1, normalizep);
1864 /* Add/subtract previous carry to main result. */
1865 newx = expand_binop (word_mode,
1866 normalizep == 1 ? binoptab : otheroptab,
1868 NULL_RTX, 1, next_methods);
1872 /* Get out carry from adding/subtracting carry in. */
1873 rtx carry_tmp = gen_reg_rtx (word_mode);
1874 carry_tmp = emit_store_flag_force (carry_tmp,
1875 (binoptab == add_optab
1878 word_mode, 1, normalizep);
1880 /* Logical-ior the two poss. carry together. */
1881 carry_out = expand_binop (word_mode, ior_optab,
1882 carry_out, carry_tmp,
1883 carry_out, 0, next_methods);
1887 emit_move_insn (target_piece, newx);
1891 if (x != target_piece)
1892 emit_move_insn (target_piece, x);
1895 carry_in = carry_out;
1898 if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
1900 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
1901 || ! rtx_equal_p (target, xtarget))
1903 rtx temp = emit_move_insn (target, xtarget);
1905 set_unique_reg_note (temp,
1907 gen_rtx_fmt_ee (binoptab->code, mode,
1918 delete_insns_since (last);
1921 /* Attempt to synthesize double word multiplies using a sequence of word
1922 mode multiplications. We first attempt to generate a sequence using a
1923 more efficient unsigned widening multiply, and if that fails we then
1924 try using a signed widening multiply. */
1926 if (binoptab == smul_optab
1927 && class == MODE_INT
1928 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1929 && smul_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1930 && add_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1932 rtx product = NULL_RTX;
1934 if (umul_widen_optab->handlers[(int) mode].insn_code
1935 != CODE_FOR_nothing)
1937 product = expand_doubleword_mult (mode, op0, op1, target,
1940 delete_insns_since (last);
1943 if (product == NULL_RTX
1944 && smul_widen_optab->handlers[(int) mode].insn_code
1945 != CODE_FOR_nothing)
1947 product = expand_doubleword_mult (mode, op0, op1, target,
1950 delete_insns_since (last);
1953 if (product != NULL_RTX)
1955 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1957 temp = emit_move_insn (target ? target : product, product);
1958 set_unique_reg_note (temp,
1960 gen_rtx_fmt_ee (MULT, mode,
1968 /* It can't be open-coded in this mode.
1969 Use a library call if one is available and caller says that's ok. */
1971 if (binoptab->handlers[(int) mode].libfunc
1972 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
1976 enum machine_mode op1_mode = mode;
1983 op1_mode = targetm.libgcc_shift_count_mode ();
1984 /* Specify unsigned here,
1985 since negative shift counts are meaningless. */
1986 op1x = convert_to_mode (op1_mode, op1, 1);
1989 if (GET_MODE (op0) != VOIDmode
1990 && GET_MODE (op0) != mode)
1991 op0 = convert_to_mode (mode, op0, unsignedp);
1993 /* Pass 1 for NO_QUEUE so we don't lose any increments
1994 if the libcall is cse'd or moved. */
1995 value = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
1996 NULL_RTX, LCT_CONST, mode, 2,
1997 op0, mode, op1x, op1_mode);
1999 insns = get_insns ();
2002 target = gen_reg_rtx (mode);
2003 emit_libcall_block (insns, target, value,
2004 gen_rtx_fmt_ee (binoptab->code, mode, op0, op1));
2009 delete_insns_since (last);
2011 /* It can't be done in this mode. Can we do it in a wider mode? */
2013 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
2014 || methods == OPTAB_MUST_WIDEN))
2016 /* Caller says, don't even try. */
2017 delete_insns_since (entry_last);
2021 /* Compute the value of METHODS to pass to recursive calls.
2022 Don't allow widening to be tried recursively. */
2024 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
2026 /* Look for a wider mode of the same class for which it appears we can do
2029 if (CLASS_HAS_WIDER_MODES_P (class))
2031 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2032 wider_mode != VOIDmode;
2033 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2035 if ((binoptab->handlers[(int) wider_mode].insn_code
2036 != CODE_FOR_nothing)
2037 || (methods == OPTAB_LIB
2038 && binoptab->handlers[(int) wider_mode].libfunc))
2040 rtx xop0 = op0, xop1 = op1;
2043 /* For certain integer operations, we need not actually extend
2044 the narrow operands, as long as we will truncate
2045 the results to the same narrowness. */
2047 if ((binoptab == ior_optab || binoptab == and_optab
2048 || binoptab == xor_optab
2049 || binoptab == add_optab || binoptab == sub_optab
2050 || binoptab == smul_optab || binoptab == ashl_optab)
2051 && class == MODE_INT)
2054 xop0 = widen_operand (xop0, wider_mode, mode,
2055 unsignedp, no_extend);
2057 /* The second operand of a shift must always be extended. */
2058 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
2059 no_extend && binoptab != ashl_optab);
2061 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
2062 unsignedp, methods);
2065 if (class != MODE_INT
2066 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
2067 GET_MODE_BITSIZE (wider_mode)))
2070 target = gen_reg_rtx (mode);
2071 convert_move (target, temp, 0);
2075 return gen_lowpart (mode, temp);
2078 delete_insns_since (last);
2083 delete_insns_since (entry_last);
2087 /* Expand a binary operator which has both signed and unsigned forms.
2088 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2091 If we widen unsigned operands, we may use a signed wider operation instead
2092 of an unsigned wider operation, since the result would be the same. */
2095 sign_expand_binop (enum machine_mode mode, optab uoptab, optab soptab,
2096 rtx op0, rtx op1, rtx target, int unsignedp,
2097 enum optab_methods methods)
2100 optab direct_optab = unsignedp ? uoptab : soptab;
2101 struct optab wide_soptab;
2103 /* Do it without widening, if possible. */
2104 temp = expand_binop (mode, direct_optab, op0, op1, target,
2105 unsignedp, OPTAB_DIRECT);
2106 if (temp || methods == OPTAB_DIRECT)
2109 /* Try widening to a signed int. Make a fake signed optab that
2110 hides any signed insn for direct use. */
2111 wide_soptab = *soptab;
2112 wide_soptab.handlers[(int) mode].insn_code = CODE_FOR_nothing;
2113 wide_soptab.handlers[(int) mode].libfunc = 0;
2115 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2116 unsignedp, OPTAB_WIDEN);
2118 /* For unsigned operands, try widening to an unsigned int. */
2119 if (temp == 0 && unsignedp)
2120 temp = expand_binop (mode, uoptab, op0, op1, target,
2121 unsignedp, OPTAB_WIDEN);
2122 if (temp || methods == OPTAB_WIDEN)
2125 /* Use the right width lib call if that exists. */
2126 temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_LIB);
2127 if (temp || methods == OPTAB_LIB)
2130 /* Must widen and use a lib call, use either signed or unsigned. */
2131 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2132 unsignedp, methods);
2136 return expand_binop (mode, uoptab, op0, op1, target,
2137 unsignedp, methods);
2141 /* Generate code to perform an operation specified by UNOPPTAB
2142 on operand OP0, with two results to TARG0 and TARG1.
2143 We assume that the order of the operands for the instruction
2144 is TARG0, TARG1, OP0.
2146 Either TARG0 or TARG1 may be zero, but what that means is that
2147 the result is not actually wanted. We will generate it into
2148 a dummy pseudo-reg and discard it. They may not both be zero.
2150 Returns 1 if this operation can be performed; 0 if not. */
2153 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
2156 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2157 enum mode_class class;
2158 enum machine_mode wider_mode;
2159 rtx entry_last = get_last_insn ();
2162 class = GET_MODE_CLASS (mode);
2165 targ0 = gen_reg_rtx (mode);
2167 targ1 = gen_reg_rtx (mode);
2169 /* Record where to go back to if we fail. */
2170 last = get_last_insn ();
2172 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2174 int icode = (int) unoptab->handlers[(int) mode].insn_code;
2175 enum machine_mode mode0 = insn_data[icode].operand[2].mode;
2179 if (GET_MODE (xop0) != VOIDmode
2180 && GET_MODE (xop0) != mode0)
2181 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2183 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2184 if (!insn_data[icode].operand[2].predicate (xop0, mode0))
2185 xop0 = copy_to_mode_reg (mode0, xop0);
2187 /* We could handle this, but we should always be called with a pseudo
2188 for our targets and all insns should take them as outputs. */
2189 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2190 gcc_assert (insn_data[icode].operand[1].predicate (targ1, mode));
2192 pat = GEN_FCN (icode) (targ0, targ1, xop0);
2199 delete_insns_since (last);
2202 /* It can't be done in this mode. Can we do it in a wider mode? */
2204 if (CLASS_HAS_WIDER_MODES_P (class))
2206 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2207 wider_mode != VOIDmode;
2208 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2210 if (unoptab->handlers[(int) wider_mode].insn_code
2211 != CODE_FOR_nothing)
2213 rtx t0 = gen_reg_rtx (wider_mode);
2214 rtx t1 = gen_reg_rtx (wider_mode);
2215 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2217 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
2219 convert_move (targ0, t0, unsignedp);
2220 convert_move (targ1, t1, unsignedp);
2224 delete_insns_since (last);
2229 delete_insns_since (entry_last);
2233 /* Generate code to perform an operation specified by BINOPTAB
2234 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2235 We assume that the order of the operands for the instruction
2236 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2237 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2239 Either TARG0 or TARG1 may be zero, but what that means is that
2240 the result is not actually wanted. We will generate it into
2241 a dummy pseudo-reg and discard it. They may not both be zero.
2243 Returns 1 if this operation can be performed; 0 if not. */
2246 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
2249 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2250 enum mode_class class;
2251 enum machine_mode wider_mode;
2252 rtx entry_last = get_last_insn ();
2255 class = GET_MODE_CLASS (mode);
2257 /* If we are inside an appropriately-short loop and we are optimizing,
2258 force expensive constants into a register. */
2259 if (CONSTANT_P (op0) && optimize
2260 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
2261 op0 = force_reg (mode, op0);
2263 if (CONSTANT_P (op1) && optimize
2264 && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
2265 op1 = force_reg (mode, op1);
2268 targ0 = gen_reg_rtx (mode);
2270 targ1 = gen_reg_rtx (mode);
2272 /* Record where to go back to if we fail. */
2273 last = get_last_insn ();
2275 if (binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2277 int icode = (int) binoptab->handlers[(int) mode].insn_code;
2278 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2279 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
2281 rtx xop0 = op0, xop1 = op1;
2283 /* In case the insn wants input operands in modes different from
2284 those of the actual operands, convert the operands. It would
2285 seem that we don't need to convert CONST_INTs, but we do, so
2286 that they're properly zero-extended, sign-extended or truncated
2289 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
2290 xop0 = convert_modes (mode0,
2291 GET_MODE (op0) != VOIDmode
2296 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
2297 xop1 = convert_modes (mode1,
2298 GET_MODE (op1) != VOIDmode
2303 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2304 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2305 xop0 = copy_to_mode_reg (mode0, xop0);
2307 if (!insn_data[icode].operand[2].predicate (xop1, mode1))
2308 xop1 = copy_to_mode_reg (mode1, xop1);
2310 /* We could handle this, but we should always be called with a pseudo
2311 for our targets and all insns should take them as outputs. */
2312 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2313 gcc_assert (insn_data[icode].operand[3].predicate (targ1, mode));
2315 pat = GEN_FCN (icode) (targ0, xop0, xop1, targ1);
2322 delete_insns_since (last);
2325 /* It can't be done in this mode. Can we do it in a wider mode? */
2327 if (CLASS_HAS_WIDER_MODES_P (class))
2329 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2330 wider_mode != VOIDmode;
2331 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2333 if (binoptab->handlers[(int) wider_mode].insn_code
2334 != CODE_FOR_nothing)
2336 rtx t0 = gen_reg_rtx (wider_mode);
2337 rtx t1 = gen_reg_rtx (wider_mode);
2338 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2339 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2341 if (expand_twoval_binop (binoptab, cop0, cop1,
2344 convert_move (targ0, t0, unsignedp);
2345 convert_move (targ1, t1, unsignedp);
2349 delete_insns_since (last);
2354 delete_insns_since (entry_last);
2358 /* Expand the two-valued library call indicated by BINOPTAB, but
2359 preserve only one of the values. If TARG0 is non-NULL, the first
2360 value is placed into TARG0; otherwise the second value is placed
2361 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2362 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2363 This routine assumes that the value returned by the library call is
2364 as if the return value was of an integral mode twice as wide as the
2365 mode of OP0. Returns 1 if the call was successful. */
2368 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2369 rtx targ0, rtx targ1, enum rtx_code code)
2371 enum machine_mode mode;
2372 enum machine_mode libval_mode;
2376 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2377 gcc_assert (!targ0 != !targ1);
2379 mode = GET_MODE (op0);
2380 if (!binoptab->handlers[(int) mode].libfunc)
2383 /* The value returned by the library function will have twice as
2384 many bits as the nominal MODE. */
2385 libval_mode = smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode),
2388 libval = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
2389 NULL_RTX, LCT_CONST,
2393 /* Get the part of VAL containing the value that we want. */
2394 libval = simplify_gen_subreg (mode, libval, libval_mode,
2395 targ0 ? 0 : GET_MODE_SIZE (mode));
2396 insns = get_insns ();
2398 /* Move the into the desired location. */
2399 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2400 gen_rtx_fmt_ee (code, mode, op0, op1));
2406 /* Wrapper around expand_unop which takes an rtx code to specify
2407 the operation to perform, not an optab pointer. All other
2408 arguments are the same. */
2410 expand_simple_unop (enum machine_mode mode, enum rtx_code code, rtx op0,
2411 rtx target, int unsignedp)
2413 optab unop = code_to_optab[(int) code];
2416 return expand_unop (mode, unop, op0, target, unsignedp);
2422 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2424 widen_clz (enum machine_mode mode, rtx op0, rtx target)
2426 enum mode_class class = GET_MODE_CLASS (mode);
2427 if (CLASS_HAS_WIDER_MODES_P (class))
2429 enum machine_mode wider_mode;
2430 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2431 wider_mode != VOIDmode;
2432 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2434 if (clz_optab->handlers[(int) wider_mode].insn_code
2435 != CODE_FOR_nothing)
2437 rtx xop0, temp, last;
2439 last = get_last_insn ();
2442 target = gen_reg_rtx (mode);
2443 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2444 temp = expand_unop (wider_mode, clz_optab, xop0, NULL_RTX, true);
2446 temp = expand_binop (wider_mode, sub_optab, temp,
2447 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2448 - GET_MODE_BITSIZE (mode)),
2449 target, true, OPTAB_DIRECT);
2451 delete_insns_since (last);
2463 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2465 widen_bswap (enum machine_mode mode, rtx op0, rtx target)
2467 enum mode_class class = GET_MODE_CLASS (mode);
2468 enum machine_mode wider_mode;
2471 if (!CLASS_HAS_WIDER_MODES_P (class))
2474 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2475 wider_mode != VOIDmode;
2476 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2477 if (bswap_optab->handlers[wider_mode].insn_code != CODE_FOR_nothing)
2482 last = get_last_insn ();
2484 x = widen_operand (op0, wider_mode, mode, true, true);
2485 x = expand_unop (wider_mode, bswap_optab, x, NULL_RTX, true);
2488 x = expand_shift (RSHIFT_EXPR, wider_mode, x,
2489 size_int (GET_MODE_BITSIZE (wider_mode)
2490 - GET_MODE_BITSIZE (mode)),
2496 target = gen_reg_rtx (mode);
2497 emit_move_insn (target, gen_lowpart (mode, x));
2500 delete_insns_since (last);
2505 /* Try calculating bswap as two bswaps of two word-sized operands. */
2508 expand_doubleword_bswap (enum machine_mode mode, rtx op, rtx target)
2512 t1 = expand_unop (word_mode, bswap_optab,
2513 operand_subword_force (op, 0, mode), NULL_RTX, true);
2514 t0 = expand_unop (word_mode, bswap_optab,
2515 operand_subword_force (op, 1, mode), NULL_RTX, true);
2518 target = gen_reg_rtx (mode);
2520 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
2521 emit_move_insn (operand_subword (target, 0, 1, mode), t0);
2522 emit_move_insn (operand_subword (target, 1, 1, mode), t1);
2527 /* Try calculating (parity x) as (and (popcount x) 1), where
2528 popcount can also be done in a wider mode. */
2530 expand_parity (enum machine_mode mode, rtx op0, rtx target)
2532 enum mode_class class = GET_MODE_CLASS (mode);
2533 if (CLASS_HAS_WIDER_MODES_P (class))
2535 enum machine_mode wider_mode;
2536 for (wider_mode = mode; wider_mode != VOIDmode;
2537 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2539 if (popcount_optab->handlers[(int) wider_mode].insn_code
2540 != CODE_FOR_nothing)
2542 rtx xop0, temp, last;
2544 last = get_last_insn ();
2547 target = gen_reg_rtx (mode);
2548 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2549 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2552 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2553 target, true, OPTAB_DIRECT);
2555 delete_insns_since (last);
2564 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2565 conditions, VAL may already be a SUBREG against which we cannot generate
2566 a further SUBREG. In this case, we expect forcing the value into a
2567 register will work around the situation. */
2570 lowpart_subreg_maybe_copy (enum machine_mode omode, rtx val,
2571 enum machine_mode imode)
2574 ret = lowpart_subreg (omode, val, imode);
2577 val = force_reg (imode, val);
2578 ret = lowpart_subreg (omode, val, imode);
2579 gcc_assert (ret != NULL);
2584 /* Expand a floating point absolute value or negation operation via a
2585 logical operation on the sign bit. */
2588 expand_absneg_bit (enum rtx_code code, enum machine_mode mode,
2589 rtx op0, rtx target)
2591 const struct real_format *fmt;
2592 int bitpos, word, nwords, i;
2593 enum machine_mode imode;
2594 HOST_WIDE_INT hi, lo;
2597 /* The format has to have a simple sign bit. */
2598 fmt = REAL_MODE_FORMAT (mode);
2602 bitpos = fmt->signbit_rw;
2606 /* Don't create negative zeros if the format doesn't support them. */
2607 if (code == NEG && !fmt->has_signed_zero)
2610 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2612 imode = int_mode_for_mode (mode);
2613 if (imode == BLKmode)
2622 if (FLOAT_WORDS_BIG_ENDIAN)
2623 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2625 word = bitpos / BITS_PER_WORD;
2626 bitpos = bitpos % BITS_PER_WORD;
2627 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2630 if (bitpos < HOST_BITS_PER_WIDE_INT)
2633 lo = (HOST_WIDE_INT) 1 << bitpos;
2637 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2643 if (target == 0 || target == op0)
2644 target = gen_reg_rtx (mode);
2650 for (i = 0; i < nwords; ++i)
2652 rtx targ_piece = operand_subword (target, i, 1, mode);
2653 rtx op0_piece = operand_subword_force (op0, i, mode);
2657 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2659 immed_double_const (lo, hi, imode),
2660 targ_piece, 1, OPTAB_LIB_WIDEN);
2661 if (temp != targ_piece)
2662 emit_move_insn (targ_piece, temp);
2665 emit_move_insn (targ_piece, op0_piece);
2668 insns = get_insns ();
2671 temp = gen_rtx_fmt_e (code, mode, copy_rtx (op0));
2672 emit_no_conflict_block (insns, target, op0, NULL_RTX, temp);
2676 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2677 gen_lowpart (imode, op0),
2678 immed_double_const (lo, hi, imode),
2679 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2680 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2682 set_unique_reg_note (get_last_insn (), REG_EQUAL,
2683 gen_rtx_fmt_e (code, mode, copy_rtx (op0)));
2689 /* Generate code to perform an operation specified by UNOPTAB
2690 on operand OP0, with result having machine-mode MODE.
2692 UNSIGNEDP is for the case where we have to widen the operands
2693 to perform the operation. It says to use zero-extension.
2695 If TARGET is nonzero, the value
2696 is generated there, if it is convenient to do so.
2697 In all cases an rtx is returned for the locus of the value;
2698 this may or may not be TARGET. */
2701 expand_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
2704 enum mode_class class;
2705 enum machine_mode wider_mode;
2707 rtx last = get_last_insn ();
2710 class = GET_MODE_CLASS (mode);
2712 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2714 int icode = (int) unoptab->handlers[(int) mode].insn_code;
2715 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2721 temp = gen_reg_rtx (mode);
2723 if (GET_MODE (xop0) != VOIDmode
2724 && GET_MODE (xop0) != mode0)
2725 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2727 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2729 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2730 xop0 = copy_to_mode_reg (mode0, xop0);
2732 if (!insn_data[icode].operand[0].predicate (temp, mode))
2733 temp = gen_reg_rtx (mode);
2735 pat = GEN_FCN (icode) (temp, xop0);
2738 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
2739 && ! add_equal_note (pat, temp, unoptab->code, xop0, NULL_RTX))
2741 delete_insns_since (last);
2742 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
2750 delete_insns_since (last);
2753 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2755 /* Widening clz needs special treatment. */
2756 if (unoptab == clz_optab)
2758 temp = widen_clz (mode, op0, target);
2765 /* Widening (or narrowing) bswap needs special treatment. */
2766 if (unoptab == bswap_optab)
2768 temp = widen_bswap (mode, op0, target);
2772 if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
2773 && unoptab->handlers[word_mode].insn_code != CODE_FOR_nothing)
2775 temp = expand_doubleword_bswap (mode, op0, target);
2783 if (CLASS_HAS_WIDER_MODES_P (class))
2784 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2785 wider_mode != VOIDmode;
2786 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2788 if (unoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing)
2792 /* For certain operations, we need not actually extend
2793 the narrow operand, as long as we will truncate the
2794 results to the same narrowness. */
2796 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2797 (unoptab == neg_optab
2798 || unoptab == one_cmpl_optab)
2799 && class == MODE_INT);
2801 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2806 if (class != MODE_INT
2807 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
2808 GET_MODE_BITSIZE (wider_mode)))
2811 target = gen_reg_rtx (mode);
2812 convert_move (target, temp, 0);
2816 return gen_lowpart (mode, temp);
2819 delete_insns_since (last);
2823 /* These can be done a word at a time. */
2824 if (unoptab == one_cmpl_optab
2825 && class == MODE_INT
2826 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
2827 && unoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
2832 if (target == 0 || target == op0)
2833 target = gen_reg_rtx (mode);
2837 /* Do the actual arithmetic. */
2838 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
2840 rtx target_piece = operand_subword (target, i, 1, mode);
2841 rtx x = expand_unop (word_mode, unoptab,
2842 operand_subword_force (op0, i, mode),
2843 target_piece, unsignedp);
2845 if (target_piece != x)
2846 emit_move_insn (target_piece, x);
2849 insns = get_insns ();
2852 emit_no_conflict_block (insns, target, op0, NULL_RTX,
2853 gen_rtx_fmt_e (unoptab->code, mode,
2858 if (unoptab->code == NEG)
2860 /* Try negating floating point values by flipping the sign bit. */
2861 if (SCALAR_FLOAT_MODE_P (mode))
2863 temp = expand_absneg_bit (NEG, mode, op0, target);
2868 /* If there is no negation pattern, and we have no negative zero,
2869 try subtracting from zero. */
2870 if (!HONOR_SIGNED_ZEROS (mode))
2872 temp = expand_binop (mode, (unoptab == negv_optab
2873 ? subv_optab : sub_optab),
2874 CONST0_RTX (mode), op0, target,
2875 unsignedp, OPTAB_DIRECT);
2881 /* Try calculating parity (x) as popcount (x) % 2. */
2882 if (unoptab == parity_optab)
2884 temp = expand_parity (mode, op0, target);
2890 /* Now try a library call in this mode. */
2891 if (unoptab->handlers[(int) mode].libfunc)
2895 enum machine_mode outmode = mode;
2897 /* All of these functions return small values. Thus we choose to
2898 have them return something that isn't a double-word. */
2899 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
2900 || unoptab == popcount_optab || unoptab == parity_optab)
2902 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node)));
2906 /* Pass 1 for NO_QUEUE so we don't lose any increments
2907 if the libcall is cse'd or moved. */
2908 value = emit_library_call_value (unoptab->handlers[(int) mode].libfunc,
2909 NULL_RTX, LCT_CONST, outmode,
2911 insns = get_insns ();
2914 target = gen_reg_rtx (outmode);
2915 emit_libcall_block (insns, target, value,
2916 gen_rtx_fmt_e (unoptab->code, outmode, op0));
2921 /* It can't be done in this mode. Can we do it in a wider mode? */
2923 if (CLASS_HAS_WIDER_MODES_P (class))
2925 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2926 wider_mode != VOIDmode;
2927 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2929 if ((unoptab->handlers[(int) wider_mode].insn_code
2930 != CODE_FOR_nothing)
2931 || unoptab->handlers[(int) wider_mode].libfunc)
2935 /* For certain operations, we need not actually extend
2936 the narrow operand, as long as we will truncate the
2937 results to the same narrowness. */
2939 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2940 (unoptab == neg_optab
2941 || unoptab == one_cmpl_optab)
2942 && class == MODE_INT);
2944 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2947 /* If we are generating clz using wider mode, adjust the
2949 if (unoptab == clz_optab && temp != 0)
2950 temp = expand_binop (wider_mode, sub_optab, temp,
2951 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2952 - GET_MODE_BITSIZE (mode)),
2953 target, true, OPTAB_DIRECT);
2957 if (class != MODE_INT)
2960 target = gen_reg_rtx (mode);
2961 convert_move (target, temp, 0);
2965 return gen_lowpart (mode, temp);
2968 delete_insns_since (last);
2973 /* One final attempt at implementing negation via subtraction,
2974 this time allowing widening of the operand. */
2975 if (unoptab->code == NEG && !HONOR_SIGNED_ZEROS (mode))
2978 temp = expand_binop (mode,
2979 unoptab == negv_optab ? subv_optab : sub_optab,
2980 CONST0_RTX (mode), op0,
2981 target, unsignedp, OPTAB_LIB_WIDEN);
2989 /* Emit code to compute the absolute value of OP0, with result to
2990 TARGET if convenient. (TARGET may be 0.) The return value says
2991 where the result actually is to be found.
2993 MODE is the mode of the operand; the mode of the result is
2994 different but can be deduced from MODE.
2999 expand_abs_nojump (enum machine_mode mode, rtx op0, rtx target,
3000 int result_unsignedp)
3005 result_unsignedp = 1;
3007 /* First try to do it with a special abs instruction. */
3008 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
3013 /* For floating point modes, try clearing the sign bit. */
3014 if (SCALAR_FLOAT_MODE_P (mode))
3016 temp = expand_absneg_bit (ABS, mode, op0, target);
3021 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3022 if (smax_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
3023 && !HONOR_SIGNED_ZEROS (mode))
3025 rtx last = get_last_insn ();
3027 temp = expand_unop (mode, neg_optab, op0, NULL_RTX, 0);
3029 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3035 delete_insns_since (last);
3038 /* If this machine has expensive jumps, we can do integer absolute
3039 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3040 where W is the width of MODE. */
3042 if (GET_MODE_CLASS (mode) == MODE_INT && BRANCH_COST >= 2)
3044 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
3045 size_int (GET_MODE_BITSIZE (mode) - 1),
3048 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
3051 temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab,
3052 temp, extended, target, 0, OPTAB_LIB_WIDEN);
3062 expand_abs (enum machine_mode mode, rtx op0, rtx target,
3063 int result_unsignedp, int safe)
3068 result_unsignedp = 1;
3070 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
3074 /* If that does not win, use conditional jump and negate. */
3076 /* It is safe to use the target if it is the same
3077 as the source if this is also a pseudo register */
3078 if (op0 == target && REG_P (op0)
3079 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
3082 op1 = gen_label_rtx ();
3083 if (target == 0 || ! safe
3084 || GET_MODE (target) != mode
3085 || (MEM_P (target) && MEM_VOLATILE_P (target))
3087 && REGNO (target) < FIRST_PSEUDO_REGISTER))
3088 target = gen_reg_rtx (mode);
3090 emit_move_insn (target, op0);
3093 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
3094 NULL_RTX, NULL_RTX, op1);
3096 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3099 emit_move_insn (target, op0);
3105 /* A subroutine of expand_copysign, perform the copysign operation using the
3106 abs and neg primitives advertised to exist on the target. The assumption
3107 is that we have a split register file, and leaving op0 in fp registers,
3108 and not playing with subregs so much, will help the register allocator. */
3111 expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3112 int bitpos, bool op0_is_abs)
3114 enum machine_mode imode;
3115 HOST_WIDE_INT hi, lo;
3124 op0 = expand_unop (mode, abs_optab, op0, target, 0);
3131 if (target == NULL_RTX)
3132 target = copy_to_reg (op0);
3134 emit_move_insn (target, op0);
3137 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3139 imode = int_mode_for_mode (mode);
3140 if (imode == BLKmode)
3142 op1 = gen_lowpart (imode, op1);
3147 if (FLOAT_WORDS_BIG_ENDIAN)
3148 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3150 word = bitpos / BITS_PER_WORD;
3151 bitpos = bitpos % BITS_PER_WORD;
3152 op1 = operand_subword_force (op1, word, mode);
3155 if (bitpos < HOST_BITS_PER_WIDE_INT)
3158 lo = (HOST_WIDE_INT) 1 << bitpos;
3162 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
3166 op1 = expand_binop (imode, and_optab, op1,
3167 immed_double_const (lo, hi, imode),
3168 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3170 label = gen_label_rtx ();
3171 emit_cmp_and_jump_insns (op1, const0_rtx, EQ, NULL_RTX, imode, 1, label);
3173 if (GET_CODE (op0) == CONST_DOUBLE)
3174 op0 = simplify_unary_operation (NEG, mode, op0, mode);
3176 op0 = expand_unop (mode, neg_optab, op0, target, 0);
3178 emit_move_insn (target, op0);
3186 /* A subroutine of expand_copysign, perform the entire copysign operation
3187 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3188 is true if op0 is known to have its sign bit clear. */
3191 expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3192 int bitpos, bool op0_is_abs)
3194 enum machine_mode imode;
3195 HOST_WIDE_INT hi, lo;
3196 int word, nwords, i;
3199 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3201 imode = int_mode_for_mode (mode);
3202 if (imode == BLKmode)
3211 if (FLOAT_WORDS_BIG_ENDIAN)
3212 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3214 word = bitpos / BITS_PER_WORD;
3215 bitpos = bitpos % BITS_PER_WORD;
3216 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
3219 if (bitpos < HOST_BITS_PER_WIDE_INT)
3222 lo = (HOST_WIDE_INT) 1 << bitpos;
3226 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
3230 if (target == 0 || target == op0 || target == op1)
3231 target = gen_reg_rtx (mode);
3237 for (i = 0; i < nwords; ++i)
3239 rtx targ_piece = operand_subword (target, i, 1, mode);
3240 rtx op0_piece = operand_subword_force (op0, i, mode);
3245 op0_piece = expand_binop (imode, and_optab, op0_piece,
3246 immed_double_const (~lo, ~hi, imode),
3247 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3249 op1 = expand_binop (imode, and_optab,
3250 operand_subword_force (op1, i, mode),
3251 immed_double_const (lo, hi, imode),
3252 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3254 temp = expand_binop (imode, ior_optab, op0_piece, op1,
3255 targ_piece, 1, OPTAB_LIB_WIDEN);
3256 if (temp != targ_piece)
3257 emit_move_insn (targ_piece, temp);
3260 emit_move_insn (targ_piece, op0_piece);
3263 insns = get_insns ();
3266 emit_no_conflict_block (insns, target, op0, op1, NULL_RTX);
3270 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
3271 immed_double_const (lo, hi, imode),
3272 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3274 op0 = gen_lowpart (imode, op0);
3276 op0 = expand_binop (imode, and_optab, op0,
3277 immed_double_const (~lo, ~hi, imode),
3278 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3280 temp = expand_binop (imode, ior_optab, op0, op1,
3281 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3282 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3288 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3289 scalar floating point mode. Return NULL if we do not know how to
3290 expand the operation inline. */
3293 expand_copysign (rtx op0, rtx op1, rtx target)
3295 enum machine_mode mode = GET_MODE (op0);
3296 const struct real_format *fmt;
3300 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3301 gcc_assert (GET_MODE (op1) == mode);
3303 /* First try to do it with a special instruction. */
3304 temp = expand_binop (mode, copysign_optab, op0, op1,
3305 target, 0, OPTAB_DIRECT);
3309 fmt = REAL_MODE_FORMAT (mode);
3310 if (fmt == NULL || !fmt->has_signed_zero)
3314 if (GET_CODE (op0) == CONST_DOUBLE)
3316 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
3317 op0 = simplify_unary_operation (ABS, mode, op0, mode);
3321 if (fmt->signbit_ro >= 0
3322 && (GET_CODE (op0) == CONST_DOUBLE
3323 || (neg_optab->handlers[mode].insn_code != CODE_FOR_nothing
3324 && abs_optab->handlers[mode].insn_code != CODE_FOR_nothing)))
3326 temp = expand_copysign_absneg (mode, op0, op1, target,
3327 fmt->signbit_ro, op0_is_abs);
3332 if (fmt->signbit_rw < 0)
3334 return expand_copysign_bit (mode, op0, op1, target,
3335 fmt->signbit_rw, op0_is_abs);
3338 /* Generate an instruction whose insn-code is INSN_CODE,
3339 with two operands: an output TARGET and an input OP0.
3340 TARGET *must* be nonzero, and the output is always stored there.
3341 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3342 the value that is stored into TARGET. */
3345 emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code)
3348 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
3353 /* Now, if insn does not accept our operands, put them into pseudos. */
3355 if (!insn_data[icode].operand[1].predicate (op0, mode0))
3356 op0 = copy_to_mode_reg (mode0, op0);
3358 if (!insn_data[icode].operand[0].predicate (temp, GET_MODE (temp)))
3359 temp = gen_reg_rtx (GET_MODE (temp));
3361 pat = GEN_FCN (icode) (temp, op0);
3363 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && code != UNKNOWN)
3364 add_equal_note (pat, temp, code, op0, NULL_RTX);
3369 emit_move_insn (target, temp);
3372 struct no_conflict_data
3374 rtx target, first, insn;
3378 /* Called via note_stores by emit_no_conflict_block and emit_libcall_block.
3379 Set P->must_stay if the currently examined clobber / store has to stay
3380 in the list of insns that constitute the actual no_conflict block /
3383 no_conflict_move_test (rtx dest, rtx set, void *p0)
3385 struct no_conflict_data *p= p0;
3387 /* If this inns directly contributes to setting the target, it must stay. */
3388 if (reg_overlap_mentioned_p (p->target, dest))
3389 p->must_stay = true;
3390 /* If we haven't committed to keeping any other insns in the list yet,
3391 there is nothing more to check. */
3392 else if (p->insn == p->first)
3394 /* If this insn sets / clobbers a register that feeds one of the insns
3395 already in the list, this insn has to stay too. */
3396 else if (reg_overlap_mentioned_p (dest, PATTERN (p->first))
3397 || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest)))
3398 || reg_used_between_p (dest, p->first, p->insn)
3399 /* Likewise if this insn depends on a register set by a previous
3400 insn in the list, or if it sets a result (presumably a hard
3401 register) that is set or clobbered by a previous insn.
3402 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3403 SET_DEST perform the former check on the address, and the latter
3404 check on the MEM. */
3405 || (GET_CODE (set) == SET
3406 && (modified_in_p (SET_SRC (set), p->first)
3407 || modified_in_p (SET_DEST (set), p->first)
3408 || modified_between_p (SET_SRC (set), p->first, p->insn)
3409 || modified_between_p (SET_DEST (set), p->first, p->insn))))
3410 p->must_stay = true;
3413 /* Encapsulate the block starting at FIRST and ending with LAST, which is
3414 logically equivalent to EQUIV, so it gets manipulated as a unit if it
3415 is possible to do so. */
3418 maybe_encapsulate_block (rtx first, rtx last, rtx equiv)
3420 if (!flag_non_call_exceptions || !may_trap_p (equiv))
3422 /* We can't attach the REG_LIBCALL and REG_RETVAL notes when the
3423 encapsulated region would not be in one basic block, i.e. when
3424 there is a control_flow_insn_p insn between FIRST and LAST. */
3425 bool attach_libcall_retval_notes = true;
3426 rtx insn, next = NEXT_INSN (last);
3428 for (insn = first; insn != next; insn = NEXT_INSN (insn))
3429 if (control_flow_insn_p (insn))
3431 attach_libcall_retval_notes = false;
3435 if (attach_libcall_retval_notes)
3437 REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
3439 REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first,
3441 next = NEXT_INSN (last);
3442 for (insn = first; insn != next; insn = NEXT_INSN (insn))
3443 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_LIBCALL_ID,
3444 GEN_INT (libcall_id),
3451 /* Emit code to perform a series of operations on a multi-word quantity, one
3454 Such a block is preceded by a CLOBBER of the output, consists of multiple
3455 insns, each setting one word of the output, and followed by a SET copying
3456 the output to itself.
3458 Each of the insns setting words of the output receives a REG_NO_CONFLICT
3459 note indicating that it doesn't conflict with the (also multi-word)
3460 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
3463 INSNS is a block of code generated to perform the operation, not including
3464 the CLOBBER and final copy. All insns that compute intermediate values
3465 are first emitted, followed by the block as described above.
3467 TARGET, OP0, and OP1 are the output and inputs of the operations,
3468 respectively. OP1 may be zero for a unary operation.
3470 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
3473 If TARGET is not a register, INSNS is simply emitted with no special
3474 processing. Likewise if anything in INSNS is not an INSN or if
3475 there is a libcall block inside INSNS.
3477 The final insn emitted is returned. */
3480 emit_no_conflict_block (rtx insns, rtx target, rtx op0, rtx op1, rtx equiv)
3482 rtx prev, next, first, last, insn;
3484 if (!REG_P (target) || reload_in_progress)
3485 return emit_insn (insns);
3487 for (insn = insns; insn; insn = NEXT_INSN (insn))
3488 if (!NONJUMP_INSN_P (insn)
3489 || find_reg_note (insn, REG_LIBCALL, NULL_RTX))
3490 return emit_insn (insns);
3492 /* First emit all insns that do not store into words of the output and remove
3493 these from the list. */
3494 for (insn = insns; insn; insn = next)
3497 struct no_conflict_data data;
3499 next = NEXT_INSN (insn);
3501 /* Some ports (cris) create a libcall regions at their own. We must
3502 avoid any potential nesting of LIBCALLs. */
3503 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3504 remove_note (insn, note);
3505 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3506 remove_note (insn, note);
3507 if ((note = find_reg_note (insn, REG_LIBCALL_ID, NULL)) != NULL)
3508 remove_note (insn, note);
3510 data.target = target;
3514 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3515 if (! data.must_stay)
3517 if (PREV_INSN (insn))
3518 NEXT_INSN (PREV_INSN (insn)) = next;
3523 PREV_INSN (next) = PREV_INSN (insn);
3529 prev = get_last_insn ();
3531 /* Now write the CLOBBER of the output, followed by the setting of each
3532 of the words, followed by the final copy. */
3533 if (target != op0 && target != op1)
3534 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
3536 for (insn = insns; insn; insn = next)
3538 next = NEXT_INSN (insn);
3541 if (op1 && REG_P (op1))
3542 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op1,
3545 if (op0 && REG_P (op0))
3546 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op0,
3550 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3551 != CODE_FOR_nothing)
3553 last = emit_move_insn (target, target);
3555 set_unique_reg_note (last, REG_EQUAL, equiv);
3559 last = get_last_insn ();
3561 /* Remove any existing REG_EQUAL note from "last", or else it will
3562 be mistaken for a note referring to the full contents of the
3563 alleged libcall value when found together with the REG_RETVAL
3564 note added below. An existing note can come from an insn
3565 expansion at "last". */
3566 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3570 first = get_insns ();
3572 first = NEXT_INSN (prev);
3574 maybe_encapsulate_block (first, last, equiv);
3579 /* Emit code to make a call to a constant function or a library call.
3581 INSNS is a list containing all insns emitted in the call.
3582 These insns leave the result in RESULT. Our block is to copy RESULT
3583 to TARGET, which is logically equivalent to EQUIV.
3585 We first emit any insns that set a pseudo on the assumption that these are
3586 loading constants into registers; doing so allows them to be safely cse'ed
3587 between blocks. Then we emit all the other insns in the block, followed by
3588 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3589 note with an operand of EQUIV.
3591 Moving assignments to pseudos outside of the block is done to improve
3592 the generated code, but is not required to generate correct code,
3593 hence being unable to move an assignment is not grounds for not making
3594 a libcall block. There are two reasons why it is safe to leave these
3595 insns inside the block: First, we know that these pseudos cannot be
3596 used in generated RTL outside the block since they are created for
3597 temporary purposes within the block. Second, CSE will not record the
3598 values of anything set inside a libcall block, so we know they must
3599 be dead at the end of the block.
3601 Except for the first group of insns (the ones setting pseudos), the
3602 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3604 emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv)
3606 rtx final_dest = target;
3607 rtx prev, next, first, last, insn;
3609 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3610 into a MEM later. Protect the libcall block from this change. */
3611 if (! REG_P (target) || REG_USERVAR_P (target))
3612 target = gen_reg_rtx (GET_MODE (target));
3614 /* If we're using non-call exceptions, a libcall corresponding to an
3615 operation that may trap may also trap. */
3616 if (flag_non_call_exceptions && may_trap_p (equiv))
3618 for (insn = insns; insn; insn = NEXT_INSN (insn))
3621 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3623 if (note != 0 && INTVAL (XEXP (note, 0)) <= 0)
3624 remove_note (insn, note);
3628 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3629 reg note to indicate that this call cannot throw or execute a nonlocal
3630 goto (unless there is already a REG_EH_REGION note, in which case
3632 for (insn = insns; insn; insn = NEXT_INSN (insn))
3635 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3638 XEXP (note, 0) = constm1_rtx;
3640 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx,
3644 /* First emit all insns that set pseudos. Remove them from the list as
3645 we go. Avoid insns that set pseudos which were referenced in previous
3646 insns. These can be generated by move_by_pieces, for example,
3647 to update an address. Similarly, avoid insns that reference things
3648 set in previous insns. */
3650 for (insn = insns; insn; insn = next)
3652 rtx set = single_set (insn);
3655 /* Some ports (cris) create a libcall regions at their own. We must
3656 avoid any potential nesting of LIBCALLs. */
3657 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3658 remove_note (insn, note);
3659 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3660 remove_note (insn, note);
3661 if ((note = find_reg_note (insn, REG_LIBCALL_ID, NULL)) != NULL)
3662 remove_note (insn, note);
3664 next = NEXT_INSN (insn);
3666 if (set != 0 && REG_P (SET_DEST (set))
3667 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3669 struct no_conflict_data data;
3671 data.target = const0_rtx;
3675 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3676 if (! data.must_stay)
3678 if (PREV_INSN (insn))
3679 NEXT_INSN (PREV_INSN (insn)) = next;
3684 PREV_INSN (next) = PREV_INSN (insn);
3690 /* Some ports use a loop to copy large arguments onto the stack.
3691 Don't move anything outside such a loop. */
3696 prev = get_last_insn ();
3698 /* Write the remaining insns followed by the final copy. */
3700 for (insn = insns; insn; insn = next)
3702 next = NEXT_INSN (insn);
3707 last = emit_move_insn (target, result);
3708 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3709 != CODE_FOR_nothing)
3710 set_unique_reg_note (last, REG_EQUAL, copy_rtx (equiv));
3713 /* Remove any existing REG_EQUAL note from "last", or else it will
3714 be mistaken for a note referring to the full contents of the
3715 libcall value when found together with the REG_RETVAL note added
3716 below. An existing note can come from an insn expansion at
3718 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3721 if (final_dest != target)
3722 emit_move_insn (final_dest, target);
3725 first = get_insns ();
3727 first = NEXT_INSN (prev);
3729 maybe_encapsulate_block (first, last, equiv);
3732 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3733 PURPOSE describes how this comparison will be used. CODE is the rtx
3734 comparison code we will be using.
3736 ??? Actually, CODE is slightly weaker than that. A target is still
3737 required to implement all of the normal bcc operations, but not
3738 required to implement all (or any) of the unordered bcc operations. */
3741 can_compare_p (enum rtx_code code, enum machine_mode mode,
3742 enum can_compare_purpose purpose)
3746 if (cmp_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3748 if (purpose == ccp_jump)
3749 return bcc_gen_fctn[(int) code] != NULL;
3750 else if (purpose == ccp_store_flag)
3751 return setcc_gen_code[(int) code] != CODE_FOR_nothing;
3753 /* There's only one cmov entry point, and it's allowed to fail. */
3756 if (purpose == ccp_jump
3757 && cbranch_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3759 if (purpose == ccp_cmov
3760 && cmov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3762 if (purpose == ccp_store_flag
3763 && cstore_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3765 mode = GET_MODE_WIDER_MODE (mode);
3767 while (mode != VOIDmode);
3772 /* This function is called when we are going to emit a compare instruction that
3773 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3775 *PMODE is the mode of the inputs (in case they are const_int).
3776 *PUNSIGNEDP nonzero says that the operands are unsigned;
3777 this matters if they need to be widened.
3779 If they have mode BLKmode, then SIZE specifies the size of both operands.
3781 This function performs all the setup necessary so that the caller only has
3782 to emit a single comparison insn. This setup can involve doing a BLKmode
3783 comparison or emitting a library call to perform the comparison if no insn
3784 is available to handle it.
3785 The values which are passed in through pointers can be modified; the caller
3786 should perform the comparison on the modified values. Constant
3787 comparisons must have already been folded. */
3790 prepare_cmp_insn (rtx *px, rtx *py, enum rtx_code *pcomparison, rtx size,
3791 enum machine_mode *pmode, int *punsignedp,
3792 enum can_compare_purpose purpose)
3794 enum machine_mode mode = *pmode;
3795 rtx x = *px, y = *py;
3796 int unsignedp = *punsignedp;
3798 /* If we are inside an appropriately-short loop and we are optimizing,
3799 force expensive constants into a register. */
3800 if (CONSTANT_P (x) && optimize
3801 && rtx_cost (x, COMPARE) > COSTS_N_INSNS (1))
3802 x = force_reg (mode, x);
3804 if (CONSTANT_P (y) && optimize
3805 && rtx_cost (y, COMPARE) > COSTS_N_INSNS (1))
3806 y = force_reg (mode, y);
3809 /* Make sure if we have a canonical comparison. The RTL
3810 documentation states that canonical comparisons are required only
3811 for targets which have cc0. */
3812 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
3815 /* Don't let both operands fail to indicate the mode. */
3816 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
3817 x = force_reg (mode, x);
3819 /* Handle all BLKmode compares. */
3821 if (mode == BLKmode)
3823 enum machine_mode cmp_mode, result_mode;
3824 enum insn_code cmp_code;
3829 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
3833 /* Try to use a memory block compare insn - either cmpstr
3834 or cmpmem will do. */
3835 for (cmp_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
3836 cmp_mode != VOIDmode;
3837 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode))
3839 cmp_code = cmpmem_optab[cmp_mode];
3840 if (cmp_code == CODE_FOR_nothing)
3841 cmp_code = cmpstr_optab[cmp_mode];
3842 if (cmp_code == CODE_FOR_nothing)
3843 cmp_code = cmpstrn_optab[cmp_mode];
3844 if (cmp_code == CODE_FOR_nothing)
3847 /* Must make sure the size fits the insn's mode. */
3848 if ((GET_CODE (size) == CONST_INT
3849 && INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode)))
3850 || (GET_MODE_BITSIZE (GET_MODE (size))
3851 > GET_MODE_BITSIZE (cmp_mode)))
3854 result_mode = insn_data[cmp_code].operand[0].mode;
3855 result = gen_reg_rtx (result_mode);
3856 size = convert_to_mode (cmp_mode, size, 1);
3857 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
3861 *pmode = result_mode;
3865 /* Otherwise call a library function, memcmp. */
3866 libfunc = memcmp_libfunc;
3867 length_type = sizetype;
3868 result_mode = TYPE_MODE (integer_type_node);
3869 cmp_mode = TYPE_MODE (length_type);
3870 size = convert_to_mode (TYPE_MODE (length_type), size,
3871 TYPE_UNSIGNED (length_type));
3873 result = emit_library_call_value (libfunc, 0, LCT_PURE_MAKE_BLOCK,
3880 *pmode = result_mode;
3884 /* Don't allow operands to the compare to trap, as that can put the
3885 compare and branch in different basic blocks. */
3886 if (flag_non_call_exceptions)
3889 x = force_reg (mode, x);
3891 y = force_reg (mode, y);
3896 if (can_compare_p (*pcomparison, mode, purpose))
3899 /* Handle a lib call just for the mode we are using. */
3901 if (cmp_optab->handlers[(int) mode].libfunc && !SCALAR_FLOAT_MODE_P (mode))
3903 rtx libfunc = cmp_optab->handlers[(int) mode].libfunc;
3906 /* If we want unsigned, and this mode has a distinct unsigned
3907 comparison routine, use that. */
3908 if (unsignedp && ucmp_optab->handlers[(int) mode].libfunc)
3909 libfunc = ucmp_optab->handlers[(int) mode].libfunc;
3911 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST_MAKE_BLOCK,
3912 targetm.libgcc_cmp_return_mode (),
3913 2, x, mode, y, mode);
3915 /* There are two kinds of comparison routines. Biased routines
3916 return 0/1/2, and unbiased routines return -1/0/1. Other parts
3917 of gcc expect that the comparison operation is equivalent
3918 to the modified comparison. For signed comparisons compare the
3919 result against 1 in the biased case, and zero in the unbiased
3920 case. For unsigned comparisons always compare against 1 after
3921 biasing the unbiased result by adding 1. This gives us a way to
3927 if (!TARGET_LIB_INT_CMP_BIASED)
3930 *px = plus_constant (result, 1);
3937 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3938 prepare_float_lib_cmp (px, py, pcomparison, pmode, punsignedp);
3941 /* Before emitting an insn with code ICODE, make sure that X, which is going
3942 to be used for operand OPNUM of the insn, is converted from mode MODE to
3943 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3944 that it is accepted by the operand predicate. Return the new value. */
3947 prepare_operand (int icode, rtx x, int opnum, enum machine_mode mode,
3948 enum machine_mode wider_mode, int unsignedp)
3950 if (mode != wider_mode)
3951 x = convert_modes (wider_mode, mode, x, unsignedp);
3953 if (!insn_data[icode].operand[opnum].predicate
3954 (x, insn_data[icode].operand[opnum].mode))
3958 x = copy_to_mode_reg (insn_data[icode].operand[opnum].mode, x);
3964 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3965 we can do the comparison.
3966 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3967 be NULL_RTX which indicates that only a comparison is to be generated. */
3970 emit_cmp_and_jump_insn_1 (rtx x, rtx y, enum machine_mode mode,
3971 enum rtx_code comparison, int unsignedp, rtx label)
3973 rtx test = gen_rtx_fmt_ee (comparison, mode, x, y);
3974 enum mode_class class = GET_MODE_CLASS (mode);
3975 enum machine_mode wider_mode = mode;
3977 /* Try combined insns first. */
3980 enum insn_code icode;
3981 PUT_MODE (test, wider_mode);
3985 icode = cbranch_optab->handlers[(int) wider_mode].insn_code;
3987 if (icode != CODE_FOR_nothing
3988 && insn_data[icode].operand[0].predicate (test, wider_mode))
3990 x = prepare_operand (icode, x, 1, mode, wider_mode, unsignedp);
3991 y = prepare_operand (icode, y, 2, mode, wider_mode, unsignedp);
3992 emit_jump_insn (GEN_FCN (icode) (test, x, y, label));
3997 /* Handle some compares against zero. */
3998 icode = (int) tst_optab->handlers[(int) wider_mode].insn_code;
3999 if (y == CONST0_RTX (mode) && icode != CODE_FOR_nothing)
4001 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
4002 emit_insn (GEN_FCN (icode) (x));
4004 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
4008 /* Handle compares for which there is a directly suitable insn. */
4010 icode = (int) cmp_optab->handlers[(int) wider_mode].insn_code;
4011 if (icode != CODE_FOR_nothing)
4013 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
4014 y = prepare_operand (icode, y, 1, mode, wider_mode, unsignedp);
4015 emit_insn (GEN_FCN (icode) (x, y));
4017 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
4021 if (!CLASS_HAS_WIDER_MODES_P (class))
4024 wider_mode = GET_MODE_WIDER_MODE (wider_mode);
4026 while (wider_mode != VOIDmode);
4031 /* Generate code to compare X with Y so that the condition codes are
4032 set and to jump to LABEL if the condition is true. If X is a
4033 constant and Y is not a constant, then the comparison is swapped to
4034 ensure that the comparison RTL has the canonical form.
4036 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
4037 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
4038 the proper branch condition code.
4040 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4042 MODE is the mode of the inputs (in case they are const_int).
4044 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
4045 be passed unchanged to emit_cmp_insn, then potentially converted into an
4046 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
4049 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
4050 enum machine_mode mode, int unsignedp, rtx label)
4052 rtx op0 = x, op1 = y;
4054 /* Swap operands and condition to ensure canonical RTL. */
4055 if (swap_commutative_operands_p (x, y))
4057 /* If we're not emitting a branch, this means some caller
4062 comparison = swap_condition (comparison);
4066 /* If OP0 is still a constant, then both X and Y must be constants.
4067 Force X into a register to create canonical RTL. */
4068 if (CONSTANT_P (op0))
4069 op0 = force_reg (mode, op0);
4073 comparison = unsigned_condition (comparison);
4075 prepare_cmp_insn (&op0, &op1, &comparison, size, &mode, &unsignedp,
4077 emit_cmp_and_jump_insn_1 (op0, op1, mode, comparison, unsignedp, label);
4080 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
4083 emit_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
4084 enum machine_mode mode, int unsignedp)
4086 emit_cmp_and_jump_insns (x, y, comparison, size, mode, unsignedp, 0);
4089 /* Emit a library call comparison between floating point X and Y.
4090 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4093 prepare_float_lib_cmp (rtx *px, rtx *py, enum rtx_code *pcomparison,
4094 enum machine_mode *pmode, int *punsignedp)
4096 enum rtx_code comparison = *pcomparison;
4097 enum rtx_code swapped = swap_condition (comparison);
4098 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
4101 enum machine_mode orig_mode = GET_MODE (x);
4102 enum machine_mode mode;
4103 rtx value, target, insns, equiv;
4105 bool reversed_p = false;
4107 for (mode = orig_mode;
4109 mode = GET_MODE_WIDER_MODE (mode))
4111 if ((libfunc = code_to_optab[comparison]->handlers[mode].libfunc))
4114 if ((libfunc = code_to_optab[swapped]->handlers[mode].libfunc))
4117 tmp = x; x = y; y = tmp;
4118 comparison = swapped;
4122 if ((libfunc = code_to_optab[reversed]->handlers[mode].libfunc)
4123 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, reversed))
4125 comparison = reversed;
4131 gcc_assert (mode != VOIDmode);
4133 if (mode != orig_mode)
4135 x = convert_to_mode (mode, x, 0);
4136 y = convert_to_mode (mode, y, 0);
4139 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4140 the RTL. The allows the RTL optimizers to delete the libcall if the
4141 condition can be determined at compile-time. */
4142 if (comparison == UNORDERED)
4144 rtx temp = simplify_gen_relational (NE, word_mode, mode, x, x);
4145 equiv = simplify_gen_relational (NE, word_mode, mode, y, y);
4146 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
4147 temp, const_true_rtx, equiv);
4151 equiv = simplify_gen_relational (comparison, word_mode, mode, x, y);
4152 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4154 rtx true_rtx, false_rtx;
4159 true_rtx = const0_rtx;
4160 false_rtx = const_true_rtx;
4164 true_rtx = const_true_rtx;
4165 false_rtx = const0_rtx;
4169 true_rtx = const1_rtx;
4170 false_rtx = const0_rtx;
4174 true_rtx = const0_rtx;
4175 false_rtx = constm1_rtx;
4179 true_rtx = constm1_rtx;
4180 false_rtx = const0_rtx;
4184 true_rtx = const0_rtx;
4185 false_rtx = const1_rtx;
4191 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
4192 equiv, true_rtx, false_rtx);
4197 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4198 word_mode, 2, x, mode, y, mode);
4199 insns = get_insns ();
4202 target = gen_reg_rtx (word_mode);
4203 emit_libcall_block (insns, target, value, equiv);
4205 if (comparison == UNORDERED
4206 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4207 comparison = reversed_p ? EQ : NE;
4212 *pcomparison = comparison;
4216 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4219 emit_indirect_jump (rtx loc)
4221 if (!insn_data[(int) CODE_FOR_indirect_jump].operand[0].predicate
4223 loc = copy_to_mode_reg (Pmode, loc);
4225 emit_jump_insn (gen_indirect_jump (loc));
4229 #ifdef HAVE_conditional_move
4231 /* Emit a conditional move instruction if the machine supports one for that
4232 condition and machine mode.
4234 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4235 the mode to use should they be constants. If it is VOIDmode, they cannot
4238 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4239 should be stored there. MODE is the mode to use should they be constants.
4240 If it is VOIDmode, they cannot both be constants.
4242 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4243 is not supported. */
4246 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
4247 enum machine_mode cmode, rtx op2, rtx op3,
4248 enum machine_mode mode, int unsignedp)
4250 rtx tem, subtarget, comparison, insn;
4251 enum insn_code icode;
4252 enum rtx_code reversed;
4254 /* If one operand is constant, make it the second one. Only do this
4255 if the other operand is not constant as well. */
4257 if (swap_commutative_operands_p (op0, op1))
4262 code = swap_condition (code);
4265 /* get_condition will prefer to generate LT and GT even if the old
4266 comparison was against zero, so undo that canonicalization here since
4267 comparisons against zero are cheaper. */
4268 if (code == LT && op1 == const1_rtx)
4269 code = LE, op1 = const0_rtx;
4270 else if (code == GT && op1 == constm1_rtx)
4271 code = GE, op1 = const0_rtx;
4273 if (cmode == VOIDmode)
4274 cmode = GET_MODE (op0);
4276 if (swap_commutative_operands_p (op2, op3)
4277 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4286 if (mode == VOIDmode)
4287 mode = GET_MODE (op2);
4289 icode = movcc_gen_code[mode];
4291 if (icode == CODE_FOR_nothing)
4295 target = gen_reg_rtx (mode);
4299 /* If the insn doesn't accept these operands, put them in pseudos. */
4301 if (!insn_data[icode].operand[0].predicate
4302 (subtarget, insn_data[icode].operand[0].mode))
4303 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4305 if (!insn_data[icode].operand[2].predicate
4306 (op2, insn_data[icode].operand[2].mode))
4307 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4309 if (!insn_data[icode].operand[3].predicate
4310 (op3, insn_data[icode].operand[3].mode))
4311 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4313 /* Everything should now be in the suitable form, so emit the compare insn
4314 and then the conditional move. */
4317 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4319 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4320 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4321 return NULL and let the caller figure out how best to deal with this
4323 if (GET_CODE (comparison) != code)
4326 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4328 /* If that failed, then give up. */
4334 if (subtarget != target)
4335 convert_move (target, subtarget, 0);
4340 /* Return nonzero if a conditional move of mode MODE is supported.
4342 This function is for combine so it can tell whether an insn that looks
4343 like a conditional move is actually supported by the hardware. If we
4344 guess wrong we lose a bit on optimization, but that's it. */
4345 /* ??? sparc64 supports conditionally moving integers values based on fp
4346 comparisons, and vice versa. How do we handle them? */
4349 can_conditionally_move_p (enum machine_mode mode)
4351 if (movcc_gen_code[mode] != CODE_FOR_nothing)
4357 #endif /* HAVE_conditional_move */
4359 /* Emit a conditional addition instruction if the machine supports one for that
4360 condition and machine mode.
4362 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4363 the mode to use should they be constants. If it is VOIDmode, they cannot
4366 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
4367 should be stored there. MODE is the mode to use should they be constants.
4368 If it is VOIDmode, they cannot both be constants.
4370 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4371 is not supported. */
4374 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
4375 enum machine_mode cmode, rtx op2, rtx op3,
4376 enum machine_mode mode, int unsignedp)
4378 rtx tem, subtarget, comparison, insn;
4379 enum insn_code icode;
4380 enum rtx_code reversed;
4382 /* If one operand is constant, make it the second one. Only do this
4383 if the other operand is not constant as well. */
4385 if (swap_commutative_operands_p (op0, op1))
4390 code = swap_condition (code);
4393 /* get_condition will prefer to generate LT and GT even if the old
4394 comparison was against zero, so undo that canonicalization here since
4395 comparisons against zero are cheaper. */
4396 if (code == LT && op1 == const1_rtx)
4397 code = LE, op1 = const0_rtx;
4398 else if (code == GT && op1 == constm1_rtx)
4399 code = GE, op1 = const0_rtx;
4401 if (cmode == VOIDmode)
4402 cmode = GET_MODE (op0);
4404 if (swap_commutative_operands_p (op2, op3)
4405 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4414 if (mode == VOIDmode)
4415 mode = GET_MODE (op2);
4417 icode = addcc_optab->handlers[(int) mode].insn_code;
4419 if (icode == CODE_FOR_nothing)
4423 target = gen_reg_rtx (mode);
4425 /* If the insn doesn't accept these operands, put them in pseudos. */
4427 if (!insn_data[icode].operand[0].predicate
4428 (target, insn_data[icode].operand[0].mode))
4429 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4433 if (!insn_data[icode].operand[2].predicate
4434 (op2, insn_data[icode].operand[2].mode))
4435 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4437 if (!insn_data[icode].operand[3].predicate
4438 (op3, insn_data[icode].operand[3].mode))
4439 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4441 /* Everything should now be in the suitable form, so emit the compare insn
4442 and then the conditional move. */
4445 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4447 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4448 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4449 return NULL and let the caller figure out how best to deal with this
4451 if (GET_CODE (comparison) != code)
4454 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4456 /* If that failed, then give up. */
4462 if (subtarget != target)
4463 convert_move (target, subtarget, 0);
4468 /* These functions attempt to generate an insn body, rather than
4469 emitting the insn, but if the gen function already emits them, we
4470 make no attempt to turn them back into naked patterns. */
4472 /* Generate and return an insn body to add Y to X. */
4475 gen_add2_insn (rtx x, rtx y)
4477 int icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4479 gcc_assert (insn_data[icode].operand[0].predicate
4480 (x, insn_data[icode].operand[0].mode));
4481 gcc_assert (insn_data[icode].operand[1].predicate
4482 (x, insn_data[icode].operand[1].mode));
4483 gcc_assert (insn_data[icode].operand[2].predicate
4484 (y, insn_data[icode].operand[2].mode));
4486 return GEN_FCN (icode) (x, x, y);
4489 /* Generate and return an insn body to add r1 and c,
4490 storing the result in r0. */
4492 gen_add3_insn (rtx r0, rtx r1, rtx c)
4494 int icode = (int) add_optab->handlers[(int) GET_MODE (r0)].insn_code;
4496 if (icode == CODE_FOR_nothing
4497 || !(insn_data[icode].operand[0].predicate
4498 (r0, insn_data[icode].operand[0].mode))
4499 || !(insn_data[icode].operand[1].predicate
4500 (r1, insn_data[icode].operand[1].mode))
4501 || !(insn_data[icode].operand[2].predicate
4502 (c, insn_data[icode].operand[2].mode)))
4505 return GEN_FCN (icode) (r0, r1, c);
4509 have_add2_insn (rtx x, rtx y)
4513 gcc_assert (GET_MODE (x) != VOIDmode);
4515 icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4517 if (icode == CODE_FOR_nothing)
4520 if (!(insn_data[icode].operand[0].predicate
4521 (x, insn_data[icode].operand[0].mode))
4522 || !(insn_data[icode].operand[1].predicate
4523 (x, insn_data[icode].operand[1].mode))
4524 || !(insn_data[icode].operand[2].predicate
4525 (y, insn_data[icode].operand[2].mode)))
4531 /* Generate and return an insn body to subtract Y from X. */
4534 gen_sub2_insn (rtx x, rtx y)
4536 int icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4538 gcc_assert (insn_data[icode].operand[0].predicate
4539 (x, insn_data[icode].operand[0].mode));
4540 gcc_assert (insn_data[icode].operand[1].predicate
4541 (x, insn_data[icode].operand[1].mode));
4542 gcc_assert (insn_data[icode].operand[2].predicate
4543 (y, insn_data[icode].operand[2].mode));
4545 return GEN_FCN (icode) (x, x, y);
4548 /* Generate and return an insn body to subtract r1 and c,
4549 storing the result in r0. */
4551 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4553 int icode = (int) sub_optab->handlers[(int) GET_MODE (r0)].insn_code;
4555 if (icode == CODE_FOR_nothing
4556 || !(insn_data[icode].operand[0].predicate
4557 (r0, insn_data[icode].operand[0].mode))
4558 || !(insn_data[icode].operand[1].predicate
4559 (r1, insn_data[icode].operand[1].mode))
4560 || !(insn_data[icode].operand[2].predicate
4561 (c, insn_data[icode].operand[2].mode)))
4564 return GEN_FCN (icode) (r0, r1, c);
4568 have_sub2_insn (rtx x, rtx y)
4572 gcc_assert (GET_MODE (x) != VOIDmode);
4574 icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4576 if (icode == CODE_FOR_nothing)
4579 if (!(insn_data[icode].operand[0].predicate
4580 (x, insn_data[icode].operand[0].mode))
4581 || !(insn_data[icode].operand[1].predicate
4582 (x, insn_data[icode].operand[1].mode))
4583 || !(insn_data[icode].operand[2].predicate
4584 (y, insn_data[icode].operand[2].mode)))
4590 /* Generate the body of an instruction to copy Y into X.
4591 It may be a list of insns, if one insn isn't enough. */
4594 gen_move_insn (rtx x, rtx y)
4599 emit_move_insn_1 (x, y);
4605 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4606 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4607 no such operation exists, CODE_FOR_nothing will be returned. */
4610 can_extend_p (enum machine_mode to_mode, enum machine_mode from_mode,
4614 #ifdef HAVE_ptr_extend
4616 return CODE_FOR_ptr_extend;
4619 tab = unsignedp ? zext_optab : sext_optab;
4620 return tab->handlers[to_mode][from_mode].insn_code;
4623 /* Generate the body of an insn to extend Y (with mode MFROM)
4624 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4627 gen_extend_insn (rtx x, rtx y, enum machine_mode mto,
4628 enum machine_mode mfrom, int unsignedp)
4630 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
4631 return GEN_FCN (icode) (x, y);
4634 /* can_fix_p and can_float_p say whether the target machine
4635 can directly convert a given fixed point type to
4636 a given floating point type, or vice versa.
4637 The returned value is the CODE_FOR_... value to use,
4638 or CODE_FOR_nothing if these modes cannot be directly converted.
4640 *TRUNCP_PTR is set to 1 if it is necessary to output
4641 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4643 static enum insn_code
4644 can_fix_p (enum machine_mode fixmode, enum machine_mode fltmode,
4645 int unsignedp, int *truncp_ptr)
4648 enum insn_code icode;
4650 tab = unsignedp ? ufixtrunc_optab : sfixtrunc_optab;
4651 icode = tab->handlers[fixmode][fltmode].insn_code;
4652 if (icode != CODE_FOR_nothing)
4658 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4659 for this to work. We need to rework the fix* and ftrunc* patterns
4660 and documentation. */
4661 tab = unsignedp ? ufix_optab : sfix_optab;
4662 icode = tab->handlers[fixmode][fltmode].insn_code;
4663 if (icode != CODE_FOR_nothing
4664 && ftrunc_optab->handlers[fltmode].insn_code != CODE_FOR_nothing)
4671 return CODE_FOR_nothing;
4674 static enum insn_code
4675 can_float_p (enum machine_mode fltmode, enum machine_mode fixmode,
4680 tab = unsignedp ? ufloat_optab : sfloat_optab;
4681 return tab->handlers[fltmode][fixmode].insn_code;
4684 /* Generate code to convert FROM to floating point
4685 and store in TO. FROM must be fixed point and not VOIDmode.
4686 UNSIGNEDP nonzero means regard FROM as unsigned.
4687 Normally this is done by correcting the final value
4688 if it is negative. */
4691 expand_float (rtx to, rtx from, int unsignedp)
4693 enum insn_code icode;
4695 enum machine_mode fmode, imode;
4696 bool can_do_signed = false;
4698 /* Crash now, because we won't be able to decide which mode to use. */
4699 gcc_assert (GET_MODE (from) != VOIDmode);
4701 /* Look for an insn to do the conversion. Do it in the specified
4702 modes if possible; otherwise convert either input, output or both to
4703 wider mode. If the integer mode is wider than the mode of FROM,
4704 we can do the conversion signed even if the input is unsigned. */
4706 for (fmode = GET_MODE (to); fmode != VOIDmode;
4707 fmode = GET_MODE_WIDER_MODE (fmode))
4708 for (imode = GET_MODE (from); imode != VOIDmode;
4709 imode = GET_MODE_WIDER_MODE (imode))
4711 int doing_unsigned = unsignedp;
4713 if (fmode != GET_MODE (to)
4714 && significand_size (fmode) < GET_MODE_BITSIZE (GET_MODE (from)))
4717 icode = can_float_p (fmode, imode, unsignedp);
4718 if (icode == CODE_FOR_nothing && unsignedp)
4720 enum insn_code scode = can_float_p (fmode, imode, 0);
4721 if (scode != CODE_FOR_nothing)
4722 can_do_signed = true;
4723 if (imode != GET_MODE (from))
4724 icode = scode, doing_unsigned = 0;
4727 if (icode != CODE_FOR_nothing)
4729 if (imode != GET_MODE (from))
4730 from = convert_to_mode (imode, from, unsignedp);
4732 if (fmode != GET_MODE (to))
4733 target = gen_reg_rtx (fmode);
4735 emit_unop_insn (icode, target, from,
4736 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
4739 convert_move (to, target, 0);
4744 /* Unsigned integer, and no way to convert directly. For binary
4745 floating point modes, convert as signed, then conditionally adjust
4747 if (unsignedp && can_do_signed && !DECIMAL_FLOAT_MODE_P (GET_MODE (to)))
4749 rtx label = gen_label_rtx ();
4751 REAL_VALUE_TYPE offset;
4753 /* Look for a usable floating mode FMODE wider than the source and at
4754 least as wide as the target. Using FMODE will avoid rounding woes
4755 with unsigned values greater than the signed maximum value. */
4757 for (fmode = GET_MODE (to); fmode != VOIDmode;
4758 fmode = GET_MODE_WIDER_MODE (fmode))
4759 if (GET_MODE_BITSIZE (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
4760 && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
4763 if (fmode == VOIDmode)
4765 /* There is no such mode. Pretend the target is wide enough. */
4766 fmode = GET_MODE (to);
4768 /* Avoid double-rounding when TO is narrower than FROM. */
4769 if ((significand_size (fmode) + 1)
4770 < GET_MODE_BITSIZE (GET_MODE (from)))
4773 rtx neglabel = gen_label_rtx ();
4775 /* Don't use TARGET if it isn't a register, is a hard register,
4776 or is the wrong mode. */
4778 || REGNO (target) < FIRST_PSEUDO_REGISTER
4779 || GET_MODE (target) != fmode)
4780 target = gen_reg_rtx (fmode);
4782 imode = GET_MODE (from);
4783 do_pending_stack_adjust ();
4785 /* Test whether the sign bit is set. */
4786 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
4789 /* The sign bit is not set. Convert as signed. */
4790 expand_float (target, from, 0);
4791 emit_jump_insn (gen_jump (label));
4794 /* The sign bit is set.
4795 Convert to a usable (positive signed) value by shifting right
4796 one bit, while remembering if a nonzero bit was shifted
4797 out; i.e., compute (from & 1) | (from >> 1). */
4799 emit_label (neglabel);
4800 temp = expand_binop (imode, and_optab, from, const1_rtx,
4801 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4802 temp1 = expand_shift (RSHIFT_EXPR, imode, from, integer_one_node,
4804 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
4806 expand_float (target, temp, 0);
4808 /* Multiply by 2 to undo the shift above. */
4809 temp = expand_binop (fmode, add_optab, target, target,
4810 target, 0, OPTAB_LIB_WIDEN);
4812 emit_move_insn (target, temp);
4814 do_pending_stack_adjust ();
4820 /* If we are about to do some arithmetic to correct for an
4821 unsigned operand, do it in a pseudo-register. */
4823 if (GET_MODE (to) != fmode
4824 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
4825 target = gen_reg_rtx (fmode);
4827 /* Convert as signed integer to floating. */
4828 expand_float (target, from, 0);
4830 /* If FROM is negative (and therefore TO is negative),
4831 correct its value by 2**bitwidth. */
4833 do_pending_stack_adjust ();
4834 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from),
4838 real_2expN (&offset, GET_MODE_BITSIZE (GET_MODE (from)));
4839 temp = expand_binop (fmode, add_optab, target,
4840 CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode),
4841 target, 0, OPTAB_LIB_WIDEN);
4843 emit_move_insn (target, temp);
4845 do_pending_stack_adjust ();
4850 /* No hardware instruction available; call a library routine. */
4855 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
4857 if (GET_MODE_SIZE (GET_MODE (from)) < GET_MODE_SIZE (SImode))
4858 from = convert_to_mode (SImode, from, unsignedp);
4860 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
4861 gcc_assert (libfunc);
4865 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4866 GET_MODE (to), 1, from,
4868 insns = get_insns ();
4871 emit_libcall_block (insns, target, value,
4872 gen_rtx_FLOAT (GET_MODE (to), from));
4877 /* Copy result to requested destination
4878 if we have been computing in a temp location. */
4882 if (GET_MODE (target) == GET_MODE (to))
4883 emit_move_insn (to, target);
4885 convert_move (to, target, 0);
4889 /* Generate code to convert FROM to fixed point and store in TO. FROM
4890 must be floating point. */
4893 expand_fix (rtx to, rtx from, int unsignedp)
4895 enum insn_code icode;
4897 enum machine_mode fmode, imode;
4900 /* We first try to find a pair of modes, one real and one integer, at
4901 least as wide as FROM and TO, respectively, in which we can open-code
4902 this conversion. If the integer mode is wider than the mode of TO,
4903 we can do the conversion either signed or unsigned. */
4905 for (fmode = GET_MODE (from); fmode != VOIDmode;
4906 fmode = GET_MODE_WIDER_MODE (fmode))
4907 for (imode = GET_MODE (to); imode != VOIDmode;
4908 imode = GET_MODE_WIDER_MODE (imode))
4910 int doing_unsigned = unsignedp;
4912 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
4913 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
4914 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
4916 if (icode != CODE_FOR_nothing)
4918 if (fmode != GET_MODE (from))
4919 from = convert_to_mode (fmode, from, 0);
4923 rtx temp = gen_reg_rtx (GET_MODE (from));
4924 from = expand_unop (GET_MODE (from), ftrunc_optab, from,
4928 if (imode != GET_MODE (to))
4929 target = gen_reg_rtx (imode);
4931 emit_unop_insn (icode, target, from,
4932 doing_unsigned ? UNSIGNED_FIX : FIX);
4934 convert_move (to, target, unsignedp);
4939 /* For an unsigned conversion, there is one more way to do it.
4940 If we have a signed conversion, we generate code that compares
4941 the real value to the largest representable positive number. If if
4942 is smaller, the conversion is done normally. Otherwise, subtract
4943 one plus the highest signed number, convert, and add it back.
4945 We only need to check all real modes, since we know we didn't find
4946 anything with a wider integer mode.
4948 This code used to extend FP value into mode wider than the destination.
4949 This is not needed. Consider, for instance conversion from SFmode
4952 The hot path through the code is dealing with inputs smaller than 2^63
4953 and doing just the conversion, so there is no bits to lose.
4955 In the other path we know the value is positive in the range 2^63..2^64-1
4956 inclusive. (as for other imput overflow happens and result is undefined)
4957 So we know that the most important bit set in mantissa corresponds to
4958 2^63. The subtraction of 2^63 should not generate any rounding as it
4959 simply clears out that bit. The rest is trivial. */
4961 if (unsignedp && GET_MODE_BITSIZE (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
4962 for (fmode = GET_MODE (from); fmode != VOIDmode;
4963 fmode = GET_MODE_WIDER_MODE (fmode))
4964 if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0,
4968 REAL_VALUE_TYPE offset;
4969 rtx limit, lab1, lab2, insn;
4971 bitsize = GET_MODE_BITSIZE (GET_MODE (to));
4972 real_2expN (&offset, bitsize - 1);
4973 limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode);
4974 lab1 = gen_label_rtx ();
4975 lab2 = gen_label_rtx ();
4977 if (fmode != GET_MODE (from))
4978 from = convert_to_mode (fmode, from, 0);
4980 /* See if we need to do the subtraction. */
4981 do_pending_stack_adjust ();
4982 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from),
4985 /* If not, do the signed "fix" and branch around fixup code. */
4986 expand_fix (to, from, 0);
4987 emit_jump_insn (gen_jump (lab2));
4990 /* Otherwise, subtract 2**(N-1), convert to signed number,
4991 then add 2**(N-1). Do the addition using XOR since this
4992 will often generate better code. */
4994 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
4995 NULL_RTX, 0, OPTAB_LIB_WIDEN);
4996 expand_fix (to, target, 0);
4997 target = expand_binop (GET_MODE (to), xor_optab, to,
4999 ((HOST_WIDE_INT) 1 << (bitsize - 1),
5001 to, 1, OPTAB_LIB_WIDEN);
5004 emit_move_insn (to, target);
5008 if (mov_optab->handlers[(int) GET_MODE (to)].insn_code
5009 != CODE_FOR_nothing)
5011 /* Make a place for a REG_NOTE and add it. */
5012 insn = emit_move_insn (to, to);
5013 set_unique_reg_note (insn,
5015 gen_rtx_fmt_e (UNSIGNED_FIX,
5023 /* We can't do it with an insn, so use a library call. But first ensure
5024 that the mode of TO is at least as wide as SImode, since those are the
5025 only library calls we know about. */
5027 if (GET_MODE_SIZE (GET_MODE (to)) < GET_MODE_SIZE (SImode))
5029 target = gen_reg_rtx (SImode);
5031 expand_fix (target, from, unsignedp);
5039 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
5040 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
5041 gcc_assert (libfunc);
5045 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
5046 GET_MODE (to), 1, from,
5048 insns = get_insns ();
5051 emit_libcall_block (insns, target, value,
5052 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
5053 GET_MODE (to), from));
5058 if (GET_MODE (to) == GET_MODE (target))
5059 emit_move_insn (to, target);
5061 convert_move (to, target, 0);
5065 /* Generate code to convert FROM to fixed point and store in TO. FROM
5066 must be floating point, TO must be signed. Use the conversion optab
5067 TAB to do the conversion. */
5070 expand_sfix_optab (rtx to, rtx from, convert_optab tab)
5072 enum insn_code icode;
5074 enum machine_mode fmode, imode;
5076 /* We first try to find a pair of modes, one real and one integer, at
5077 least as wide as FROM and TO, respectively, in which we can open-code
5078 this conversion. If the integer mode is wider than the mode of TO,
5079 we can do the conversion either signed or unsigned. */
5081 for (fmode = GET_MODE (from); fmode != VOIDmode;
5082 fmode = GET_MODE_WIDER_MODE (fmode))
5083 for (imode = GET_MODE (to); imode != VOIDmode;
5084 imode = GET_MODE_WIDER_MODE (imode))
5086 icode = tab->handlers[imode][fmode].insn_code;
5087 if (icode != CODE_FOR_nothing)
5089 if (fmode != GET_MODE (from))
5090 from = convert_to_mode (fmode, from, 0);
5092 if (imode != GET_MODE (to))
5093 target = gen_reg_rtx (imode);
5095 emit_unop_insn (icode, target, from, UNKNOWN);
5097 convert_move (to, target, 0);
5105 /* Report whether we have an instruction to perform the operation
5106 specified by CODE on operands of mode MODE. */
5108 have_insn_for (enum rtx_code code, enum machine_mode mode)
5110 return (code_to_optab[(int) code] != 0
5111 && (code_to_optab[(int) code]->handlers[(int) mode].insn_code
5112 != CODE_FOR_nothing));
5115 /* Create a blank optab. */
5120 optab op = ggc_alloc (sizeof (struct optab));
5121 for (i = 0; i < NUM_MACHINE_MODES; i++)
5123 op->handlers[i].insn_code = CODE_FOR_nothing;
5124 op->handlers[i].libfunc = 0;
5130 static convert_optab
5131 new_convert_optab (void)
5134 convert_optab op = ggc_alloc (sizeof (struct convert_optab));
5135 for (i = 0; i < NUM_MACHINE_MODES; i++)
5136 for (j = 0; j < NUM_MACHINE_MODES; j++)
5138 op->handlers[i][j].insn_code = CODE_FOR_nothing;
5139 op->handlers[i][j].libfunc = 0;
5144 /* Same, but fill in its code as CODE, and write it into the
5145 code_to_optab table. */
5147 init_optab (enum rtx_code code)
5149 optab op = new_optab ();
5151 code_to_optab[(int) code] = op;
5155 /* Same, but fill in its code as CODE, and do _not_ write it into
5156 the code_to_optab table. */
5158 init_optabv (enum rtx_code code)
5160 optab op = new_optab ();
5165 /* Conversion optabs never go in the code_to_optab table. */
5166 static inline convert_optab
5167 init_convert_optab (enum rtx_code code)
5169 convert_optab op = new_convert_optab ();
5174 /* Initialize the libfunc fields of an entire group of entries in some
5175 optab. Each entry is set equal to a string consisting of a leading
5176 pair of underscores followed by a generic operation name followed by
5177 a mode name (downshifted to lowercase) followed by a single character
5178 representing the number of operands for the given operation (which is
5179 usually one of the characters '2', '3', or '4').
5181 OPTABLE is the table in which libfunc fields are to be initialized.
5182 FIRST_MODE is the first machine mode index in the given optab to
5184 LAST_MODE is the last machine mode index in the given optab to
5186 OPNAME is the generic (string) name of the operation.
5187 SUFFIX is the character which specifies the number of operands for
5188 the given generic operation.
5192 init_libfuncs (optab optable, int first_mode, int last_mode,
5193 const char *opname, int suffix)
5196 unsigned opname_len = strlen (opname);
5198 for (mode = first_mode; (int) mode <= (int) last_mode;
5199 mode = (enum machine_mode) ((int) mode + 1))
5201 const char *mname = GET_MODE_NAME (mode);
5202 unsigned mname_len = strlen (mname);
5203 char *libfunc_name = alloca (2 + opname_len + mname_len + 1 + 1);
5210 for (q = opname; *q; )
5212 for (q = mname; *q; q++)
5213 *p++ = TOLOWER (*q);
5217 optable->handlers[(int) mode].libfunc
5218 = init_one_libfunc (ggc_alloc_string (libfunc_name, p - libfunc_name));
5222 /* Initialize the libfunc fields of an entire group of entries in some
5223 optab which correspond to all integer mode operations. The parameters
5224 have the same meaning as similarly named ones for the `init_libfuncs'
5225 routine. (See above). */
5228 init_integral_libfuncs (optab optable, const char *opname, int suffix)
5230 int maxsize = 2*BITS_PER_WORD;
5231 if (maxsize < LONG_LONG_TYPE_SIZE)
5232 maxsize = LONG_LONG_TYPE_SIZE;
5233 init_libfuncs (optable, word_mode,
5234 mode_for_size (maxsize, MODE_INT, 0),
5238 /* Initialize the libfunc fields of an entire group of entries in some
5239 optab which correspond to all real mode operations. The parameters
5240 have the same meaning as similarly named ones for the `init_libfuncs'
5241 routine. (See above). */
5244 init_floating_libfuncs (optab optable, const char *opname, int suffix)
5246 char *dec_opname = alloca (sizeof (DECIMAL_PREFIX) + strlen (opname));
5248 /* For BID support, change the name to have either a bid_ or dpd_ prefix
5249 depending on the low level floating format used. */
5250 memcpy (dec_opname, DECIMAL_PREFIX, sizeof (DECIMAL_PREFIX) - 1);
5251 strcpy (dec_opname + sizeof (DECIMAL_PREFIX) - 1, opname);
5253 init_libfuncs (optable, MIN_MODE_FLOAT, MAX_MODE_FLOAT, opname, suffix);
5254 init_libfuncs (optable, MIN_MODE_DECIMAL_FLOAT, MAX_MODE_DECIMAL_FLOAT,
5255 dec_opname, suffix);
5258 /* Initialize the libfunc fields of an entire group of entries of an
5259 inter-mode-class conversion optab. The string formation rules are
5260 similar to the ones for init_libfuncs, above, but instead of having
5261 a mode name and an operand count these functions have two mode names
5262 and no operand count. */
5264 init_interclass_conv_libfuncs (convert_optab tab, const char *opname,
5265 enum mode_class from_class,
5266 enum mode_class to_class)
5268 enum machine_mode first_from_mode = GET_CLASS_NARROWEST_MODE (from_class);
5269 enum machine_mode first_to_mode = GET_CLASS_NARROWEST_MODE (to_class);
5270 size_t opname_len = strlen (opname);
5271 size_t max_mname_len = 0;
5273 enum machine_mode fmode, tmode;
5274 const char *fname, *tname;
5276 char *libfunc_name, *suffix;
5277 char *nondec_name, *dec_name, *nondec_suffix, *dec_suffix;
5280 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5281 depends on which underlying decimal floating point format is used. */
5282 const size_t dec_len = sizeof (DECIMAL_PREFIX) - 1;
5284 for (fmode = first_from_mode;
5286 fmode = GET_MODE_WIDER_MODE (fmode))
5287 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (fmode)));
5289 for (tmode = first_to_mode;
5291 tmode = GET_MODE_WIDER_MODE (tmode))
5292 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (tmode)));
5294 nondec_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
5295 nondec_name[0] = '_';
5296 nondec_name[1] = '_';
5297 memcpy (&nondec_name[2], opname, opname_len);
5298 nondec_suffix = nondec_name + opname_len + 2;
5300 dec_name = alloca (2 + dec_len + opname_len + 2*max_mname_len + 1 + 1);
5303 memcpy (&dec_name[2], DECIMAL_PREFIX, dec_len);
5304 memcpy (&dec_name[2+dec_len], opname, opname_len);
5305 dec_suffix = dec_name + dec_len + opname_len + 2;
5307 for (fmode = first_from_mode; fmode != VOIDmode;
5308 fmode = GET_MODE_WIDER_MODE (fmode))
5309 for (tmode = first_to_mode; tmode != VOIDmode;
5310 tmode = GET_MODE_WIDER_MODE (tmode))
5312 fname = GET_MODE_NAME (fmode);
5313 tname = GET_MODE_NAME (tmode);
5315 if (DECIMAL_FLOAT_MODE_P(fmode) || DECIMAL_FLOAT_MODE_P(tmode))
5317 libfunc_name = dec_name;
5318 suffix = dec_suffix;
5322 libfunc_name = nondec_name;
5323 suffix = nondec_suffix;
5327 for (q = fname; *q; p++, q++)
5329 for (q = tname; *q; p++, q++)
5334 tab->handlers[tmode][fmode].libfunc
5335 = init_one_libfunc (ggc_alloc_string (libfunc_name,
5340 /* Initialize the libfunc fields of an entire group of entries of an
5341 intra-mode-class conversion optab. The string formation rules are
5342 similar to the ones for init_libfunc, above. WIDENING says whether
5343 the optab goes from narrow to wide modes or vice versa. These functions
5344 have two mode names _and_ an operand count. */
5346 init_intraclass_conv_libfuncs (convert_optab tab, const char *opname,
5347 enum mode_class class, bool widening)
5349 enum machine_mode first_mode = GET_CLASS_NARROWEST_MODE (class);
5350 size_t opname_len = strlen (opname);
5351 size_t max_mname_len = 0;
5353 enum machine_mode nmode, wmode;
5354 const char *nname, *wname;
5356 char *nondec_name, *dec_name, *nondec_suffix, *dec_suffix;
5357 char *libfunc_name, *suffix;
5360 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5361 depends on which underlying decimal floating point format is used. */
5362 const size_t dec_len = sizeof (DECIMAL_PREFIX) - 1;
5364 for (nmode = first_mode; nmode != VOIDmode;
5365 nmode = GET_MODE_WIDER_MODE (nmode))
5366 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (nmode)));
5368 nondec_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
5369 nondec_name[0] = '_';
5370 nondec_name[1] = '_';
5371 memcpy (&nondec_name[2], opname, opname_len);
5372 nondec_suffix = nondec_name + opname_len + 2;
5374 dec_name = alloca (2 + dec_len + opname_len + 2*max_mname_len + 1 + 1);
5377 memcpy (&dec_name[2], DECIMAL_PREFIX, dec_len);
5378 memcpy (&dec_name[2 + dec_len], opname, opname_len);
5379 dec_suffix = dec_name + dec_len + opname_len + 2;
5381 for (nmode = first_mode; nmode != VOIDmode;
5382 nmode = GET_MODE_WIDER_MODE (nmode))
5383 for (wmode = GET_MODE_WIDER_MODE (nmode); wmode != VOIDmode;
5384 wmode = GET_MODE_WIDER_MODE (wmode))
5386 nname = GET_MODE_NAME (nmode);
5387 wname = GET_MODE_NAME (wmode);
5389 if (DECIMAL_FLOAT_MODE_P(nmode) || DECIMAL_FLOAT_MODE_P(wmode))
5391 libfunc_name = dec_name;
5392 suffix = dec_suffix;
5396 libfunc_name = nondec_name;
5397 suffix = nondec_suffix;
5401 for (q = widening ? nname : wname; *q; p++, q++)
5403 for (q = widening ? wname : nname; *q; p++, q++)
5409 tab->handlers[widening ? wmode : nmode]
5410 [widening ? nmode : wmode].libfunc
5411 = init_one_libfunc (ggc_alloc_string (libfunc_name,
5418 init_one_libfunc (const char *name)
5422 /* Create a FUNCTION_DECL that can be passed to
5423 targetm.encode_section_info. */
5424 /* ??? We don't have any type information except for this is
5425 a function. Pretend this is "int foo()". */
5426 tree decl = build_decl (FUNCTION_DECL, get_identifier (name),
5427 build_function_type (integer_type_node, NULL_TREE));
5428 DECL_ARTIFICIAL (decl) = 1;
5429 DECL_EXTERNAL (decl) = 1;
5430 TREE_PUBLIC (decl) = 1;
5432 symbol = XEXP (DECL_RTL (decl), 0);
5434 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
5435 are the flags assigned by targetm.encode_section_info. */
5436 SET_SYMBOL_REF_DECL (symbol, 0);
5441 /* Call this to reset the function entry for one optab (OPTABLE) in mode
5442 MODE to NAME, which should be either 0 or a string constant. */
5444 set_optab_libfunc (optab optable, enum machine_mode mode, const char *name)
5447 optable->handlers[mode].libfunc = init_one_libfunc (name);
5449 optable->handlers[mode].libfunc = 0;
5452 /* Call this to reset the function entry for one conversion optab
5453 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
5454 either 0 or a string constant. */
5456 set_conv_libfunc (convert_optab optable, enum machine_mode tmode,
5457 enum machine_mode fmode, const char *name)
5460 optable->handlers[tmode][fmode].libfunc = init_one_libfunc (name);
5462 optable->handlers[tmode][fmode].libfunc = 0;
5465 /* Call this once to initialize the contents of the optabs
5466 appropriately for the current target machine. */
5472 enum machine_mode int_mode;
5474 /* Start by initializing all tables to contain CODE_FOR_nothing. */
5476 for (i = 0; i < NUM_RTX_CODE; i++)
5477 setcc_gen_code[i] = CODE_FOR_nothing;
5479 #ifdef HAVE_conditional_move
5480 for (i = 0; i < NUM_MACHINE_MODES; i++)
5481 movcc_gen_code[i] = CODE_FOR_nothing;
5484 for (i = 0; i < NUM_MACHINE_MODES; i++)
5486 vcond_gen_code[i] = CODE_FOR_nothing;
5487 vcondu_gen_code[i] = CODE_FOR_nothing;
5490 add_optab = init_optab (PLUS);
5491 addv_optab = init_optabv (PLUS);
5492 sub_optab = init_optab (MINUS);
5493 subv_optab = init_optabv (MINUS);
5494 smul_optab = init_optab (MULT);
5495 smulv_optab = init_optabv (MULT);
5496 smul_highpart_optab = init_optab (UNKNOWN);
5497 umul_highpart_optab = init_optab (UNKNOWN);
5498 smul_widen_optab = init_optab (UNKNOWN);
5499 umul_widen_optab = init_optab (UNKNOWN);
5500 usmul_widen_optab = init_optab (UNKNOWN);
5501 smadd_widen_optab = init_optab (UNKNOWN);
5502 umadd_widen_optab = init_optab (UNKNOWN);
5503 smsub_widen_optab = init_optab (UNKNOWN);
5504 umsub_widen_optab = init_optab (UNKNOWN);
5505 sdiv_optab = init_optab (DIV);
5506 sdivv_optab = init_optabv (DIV);
5507 sdivmod_optab = init_optab (UNKNOWN);
5508 udiv_optab = init_optab (UDIV);
5509 udivmod_optab = init_optab (UNKNOWN);
5510 smod_optab = init_optab (MOD);
5511 umod_optab = init_optab (UMOD);
5512 fmod_optab = init_optab (UNKNOWN);
5513 remainder_optab = init_optab (UNKNOWN);
5514 ftrunc_optab = init_optab (UNKNOWN);
5515 and_optab = init_optab (AND);
5516 ior_optab = init_optab (IOR);
5517 xor_optab = init_optab (XOR);
5518 ashl_optab = init_optab (ASHIFT);
5519 ashr_optab = init_optab (ASHIFTRT);
5520 lshr_optab = init_optab (LSHIFTRT);
5521 rotl_optab = init_optab (ROTATE);
5522 rotr_optab = init_optab (ROTATERT);
5523 smin_optab = init_optab (SMIN);
5524 smax_optab = init_optab (SMAX);
5525 umin_optab = init_optab (UMIN);
5526 umax_optab = init_optab (UMAX);
5527 pow_optab = init_optab (UNKNOWN);
5528 atan2_optab = init_optab (UNKNOWN);
5530 /* These three have codes assigned exclusively for the sake of
5532 mov_optab = init_optab (SET);
5533 movstrict_optab = init_optab (STRICT_LOW_PART);
5534 cmp_optab = init_optab (COMPARE);
5536 storent_optab = init_optab (UNKNOWN);
5538 ucmp_optab = init_optab (UNKNOWN);
5539 tst_optab = init_optab (UNKNOWN);
5541 eq_optab = init_optab (EQ);
5542 ne_optab = init_optab (NE);
5543 gt_optab = init_optab (GT);
5544 ge_optab = init_optab (GE);
5545 lt_optab = init_optab (LT);
5546 le_optab = init_optab (LE);
5547 unord_optab = init_optab (UNORDERED);
5549 neg_optab = init_optab (NEG);
5550 negv_optab = init_optabv (NEG);
5551 abs_optab = init_optab (ABS);
5552 absv_optab = init_optabv (ABS);
5553 addcc_optab = init_optab (UNKNOWN);
5554 one_cmpl_optab = init_optab (NOT);
5555 bswap_optab = init_optab (BSWAP);
5556 ffs_optab = init_optab (FFS);
5557 clz_optab = init_optab (CLZ);
5558 ctz_optab = init_optab (CTZ);
5559 popcount_optab = init_optab (POPCOUNT);
5560 parity_optab = init_optab (PARITY);
5561 sqrt_optab = init_optab (SQRT);
5562 floor_optab = init_optab (UNKNOWN);
5563 ceil_optab = init_optab (UNKNOWN);
5564 round_optab = init_optab (UNKNOWN);
5565 btrunc_optab = init_optab (UNKNOWN);
5566 nearbyint_optab = init_optab (UNKNOWN);
5567 rint_optab = init_optab (UNKNOWN);
5568 sincos_optab = init_optab (UNKNOWN);
5569 sin_optab = init_optab (UNKNOWN);
5570 asin_optab = init_optab (UNKNOWN);
5571 cos_optab = init_optab (UNKNOWN);
5572 acos_optab = init_optab (UNKNOWN);
5573 exp_optab = init_optab (UNKNOWN);
5574 exp10_optab = init_optab (UNKNOWN);
5575 exp2_optab = init_optab (UNKNOWN);
5576 expm1_optab = init_optab (UNKNOWN);
5577 ldexp_optab = init_optab (UNKNOWN);
5578 scalb_optab = init_optab (UNKNOWN);
5579 logb_optab = init_optab (UNKNOWN);
5580 ilogb_optab = init_optab (UNKNOWN);
5581 log_optab = init_optab (UNKNOWN);
5582 log10_optab = init_optab (UNKNOWN);
5583 log2_optab = init_optab (UNKNOWN);
5584 log1p_optab = init_optab (UNKNOWN);
5585 tan_optab = init_optab (UNKNOWN);
5586 atan_optab = init_optab (UNKNOWN);
5587 copysign_optab = init_optab (UNKNOWN);
5589 isinf_optab = init_optab (UNKNOWN);
5591 strlen_optab = init_optab (UNKNOWN);
5592 cbranch_optab = init_optab (UNKNOWN);
5593 cmov_optab = init_optab (UNKNOWN);
5594 cstore_optab = init_optab (UNKNOWN);
5595 push_optab = init_optab (UNKNOWN);
5597 reduc_smax_optab = init_optab (UNKNOWN);
5598 reduc_umax_optab = init_optab (UNKNOWN);
5599 reduc_smin_optab = init_optab (UNKNOWN);
5600 reduc_umin_optab = init_optab (UNKNOWN);
5601 reduc_splus_optab = init_optab (UNKNOWN);
5602 reduc_uplus_optab = init_optab (UNKNOWN);
5604 ssum_widen_optab = init_optab (UNKNOWN);
5605 usum_widen_optab = init_optab (UNKNOWN);
5606 sdot_prod_optab = init_optab (UNKNOWN);
5607 udot_prod_optab = init_optab (UNKNOWN);
5609 vec_extract_optab = init_optab (UNKNOWN);
5610 vec_extract_even_optab = init_optab (UNKNOWN);
5611 vec_extract_odd_optab = init_optab (UNKNOWN);
5612 vec_interleave_high_optab = init_optab (UNKNOWN);
5613 vec_interleave_low_optab = init_optab (UNKNOWN);
5614 vec_set_optab = init_optab (UNKNOWN);
5615 vec_init_optab = init_optab (UNKNOWN);
5616 vec_shl_optab = init_optab (UNKNOWN);
5617 vec_shr_optab = init_optab (UNKNOWN);
5618 vec_realign_load_optab = init_optab (UNKNOWN);
5619 movmisalign_optab = init_optab (UNKNOWN);
5620 vec_widen_umult_hi_optab = init_optab (UNKNOWN);
5621 vec_widen_umult_lo_optab = init_optab (UNKNOWN);
5622 vec_widen_smult_hi_optab = init_optab (UNKNOWN);
5623 vec_widen_smult_lo_optab = init_optab (UNKNOWN);
5624 vec_unpacks_hi_optab = init_optab (UNKNOWN);
5625 vec_unpacks_lo_optab = init_optab (UNKNOWN);
5626 vec_unpacku_hi_optab = init_optab (UNKNOWN);
5627 vec_unpacku_lo_optab = init_optab (UNKNOWN);
5628 vec_unpacks_float_hi_optab = init_optab (UNKNOWN);
5629 vec_unpacks_float_lo_optab = init_optab (UNKNOWN);
5630 vec_unpacku_float_hi_optab = init_optab (UNKNOWN);
5631 vec_unpacku_float_lo_optab = init_optab (UNKNOWN);
5632 vec_pack_trunc_optab = init_optab (UNKNOWN);
5633 vec_pack_usat_optab = init_optab (UNKNOWN);
5634 vec_pack_ssat_optab = init_optab (UNKNOWN);
5635 vec_pack_ufix_trunc_optab = init_optab (UNKNOWN);
5636 vec_pack_sfix_trunc_optab = init_optab (UNKNOWN);
5638 powi_optab = init_optab (UNKNOWN);
5641 sext_optab = init_convert_optab (SIGN_EXTEND);
5642 zext_optab = init_convert_optab (ZERO_EXTEND);
5643 trunc_optab = init_convert_optab (TRUNCATE);
5644 sfix_optab = init_convert_optab (FIX);
5645 ufix_optab = init_convert_optab (UNSIGNED_FIX);
5646 sfixtrunc_optab = init_convert_optab (UNKNOWN);
5647 ufixtrunc_optab = init_convert_optab (UNKNOWN);
5648 sfloat_optab = init_convert_optab (FLOAT);
5649 ufloat_optab = init_convert_optab (UNSIGNED_FLOAT);
5650 lrint_optab = init_convert_optab (UNKNOWN);
5651 lround_optab = init_convert_optab (UNKNOWN);
5652 lfloor_optab = init_convert_optab (UNKNOWN);
5653 lceil_optab = init_convert_optab (UNKNOWN);
5655 for (i = 0; i < NUM_MACHINE_MODES; i++)
5657 movmem_optab[i] = CODE_FOR_nothing;
5658 cmpstr_optab[i] = CODE_FOR_nothing;
5659 cmpstrn_optab[i] = CODE_FOR_nothing;
5660 cmpmem_optab[i] = CODE_FOR_nothing;
5661 setmem_optab[i] = CODE_FOR_nothing;
5663 sync_add_optab[i] = CODE_FOR_nothing;
5664 sync_sub_optab[i] = CODE_FOR_nothing;
5665 sync_ior_optab[i] = CODE_FOR_nothing;
5666 sync_and_optab[i] = CODE_FOR_nothing;
5667 sync_xor_optab[i] = CODE_FOR_nothing;
5668 sync_nand_optab[i] = CODE_FOR_nothing;
5669 sync_old_add_optab[i] = CODE_FOR_nothing;
5670 sync_old_sub_optab[i] = CODE_FOR_nothing;
5671 sync_old_ior_optab[i] = CODE_FOR_nothing;
5672 sync_old_and_optab[i] = CODE_FOR_nothing;
5673 sync_old_xor_optab[i] = CODE_FOR_nothing;
5674 sync_old_nand_optab[i] = CODE_FOR_nothing;
5675 sync_new_add_optab[i] = CODE_FOR_nothing;
5676 sync_new_sub_optab[i] = CODE_FOR_nothing;
5677 sync_new_ior_optab[i] = CODE_FOR_nothing;
5678 sync_new_and_optab[i] = CODE_FOR_nothing;
5679 sync_new_xor_optab[i] = CODE_FOR_nothing;
5680 sync_new_nand_optab[i] = CODE_FOR_nothing;
5681 sync_compare_and_swap[i] = CODE_FOR_nothing;
5682 sync_compare_and_swap_cc[i] = CODE_FOR_nothing;
5683 sync_lock_test_and_set[i] = CODE_FOR_nothing;
5684 sync_lock_release[i] = CODE_FOR_nothing;
5686 reload_in_optab[i] = reload_out_optab[i] = CODE_FOR_nothing;
5689 /* Fill in the optabs with the insns we support. */
5692 /* The ffs function operates on `int'. Fall back on it if we do not
5693 have a libgcc2 function for that width. */
5694 int_mode = mode_for_size (INT_TYPE_SIZE, MODE_INT, 0);
5695 ffs_optab->handlers[(int) int_mode].libfunc = init_one_libfunc ("ffs");
5697 /* Initialize the optabs with the names of the library functions. */
5698 init_integral_libfuncs (add_optab, "add", '3');
5699 init_floating_libfuncs (add_optab, "add", '3');
5700 init_integral_libfuncs (addv_optab, "addv", '3');
5701 init_floating_libfuncs (addv_optab, "add", '3');
5702 init_integral_libfuncs (sub_optab, "sub", '3');
5703 init_floating_libfuncs (sub_optab, "sub", '3');
5704 init_integral_libfuncs (subv_optab, "subv", '3');
5705 init_floating_libfuncs (subv_optab, "sub", '3');
5706 init_integral_libfuncs (smul_optab, "mul", '3');
5707 init_floating_libfuncs (smul_optab, "mul", '3');
5708 init_integral_libfuncs (smulv_optab, "mulv", '3');
5709 init_floating_libfuncs (smulv_optab, "mul", '3');
5710 init_integral_libfuncs (sdiv_optab, "div", '3');
5711 init_floating_libfuncs (sdiv_optab, "div", '3');
5712 init_integral_libfuncs (sdivv_optab, "divv", '3');
5713 init_integral_libfuncs (udiv_optab, "udiv", '3');
5714 init_integral_libfuncs (sdivmod_optab, "divmod", '4');
5715 init_integral_libfuncs (udivmod_optab, "udivmod", '4');
5716 init_integral_libfuncs (smod_optab, "mod", '3');
5717 init_integral_libfuncs (umod_optab, "umod", '3');
5718 init_floating_libfuncs (ftrunc_optab, "ftrunc", '2');
5719 init_integral_libfuncs (and_optab, "and", '3');
5720 init_integral_libfuncs (ior_optab, "ior", '3');
5721 init_integral_libfuncs (xor_optab, "xor", '3');
5722 init_integral_libfuncs (ashl_optab, "ashl", '3');
5723 init_integral_libfuncs (ashr_optab, "ashr", '3');
5724 init_integral_libfuncs (lshr_optab, "lshr", '3');
5725 init_integral_libfuncs (smin_optab, "min", '3');
5726 init_floating_libfuncs (smin_optab, "min", '3');
5727 init_integral_libfuncs (smax_optab, "max", '3');
5728 init_floating_libfuncs (smax_optab, "max", '3');
5729 init_integral_libfuncs (umin_optab, "umin", '3');
5730 init_integral_libfuncs (umax_optab, "umax", '3');
5731 init_integral_libfuncs (neg_optab, "neg", '2');
5732 init_floating_libfuncs (neg_optab, "neg", '2');
5733 init_integral_libfuncs (negv_optab, "negv", '2');
5734 init_floating_libfuncs (negv_optab, "neg", '2');
5735 init_integral_libfuncs (one_cmpl_optab, "one_cmpl", '2');
5736 init_integral_libfuncs (ffs_optab, "ffs", '2');
5737 init_integral_libfuncs (clz_optab, "clz", '2');
5738 init_integral_libfuncs (ctz_optab, "ctz", '2');
5739 init_integral_libfuncs (popcount_optab, "popcount", '2');
5740 init_integral_libfuncs (parity_optab, "parity", '2');
5742 /* Comparison libcalls for integers MUST come in pairs,
5744 init_integral_libfuncs (cmp_optab, "cmp", '2');
5745 init_integral_libfuncs (ucmp_optab, "ucmp", '2');
5746 init_floating_libfuncs (cmp_optab, "cmp", '2');
5748 /* EQ etc are floating point only. */
5749 init_floating_libfuncs (eq_optab, "eq", '2');
5750 init_floating_libfuncs (ne_optab, "ne", '2');
5751 init_floating_libfuncs (gt_optab, "gt", '2');
5752 init_floating_libfuncs (ge_optab, "ge", '2');
5753 init_floating_libfuncs (lt_optab, "lt", '2');
5754 init_floating_libfuncs (le_optab, "le", '2');
5755 init_floating_libfuncs (unord_optab, "unord", '2');
5757 init_floating_libfuncs (powi_optab, "powi", '2');
5760 init_interclass_conv_libfuncs (sfloat_optab, "float",
5761 MODE_INT, MODE_FLOAT);
5762 init_interclass_conv_libfuncs (sfloat_optab, "float",
5763 MODE_INT, MODE_DECIMAL_FLOAT);
5764 init_interclass_conv_libfuncs (ufloat_optab, "floatun",
5765 MODE_INT, MODE_FLOAT);
5766 init_interclass_conv_libfuncs (ufloat_optab, "floatun",
5767 MODE_INT, MODE_DECIMAL_FLOAT);
5768 init_interclass_conv_libfuncs (sfix_optab, "fix",
5769 MODE_FLOAT, MODE_INT);
5770 init_interclass_conv_libfuncs (sfix_optab, "fix",
5771 MODE_DECIMAL_FLOAT, MODE_INT);
5772 init_interclass_conv_libfuncs (ufix_optab, "fixuns",
5773 MODE_FLOAT, MODE_INT);
5774 init_interclass_conv_libfuncs (ufix_optab, "fixuns",
5775 MODE_DECIMAL_FLOAT, MODE_INT);
5776 init_interclass_conv_libfuncs (ufloat_optab, "floatuns",
5777 MODE_INT, MODE_DECIMAL_FLOAT);
5778 init_interclass_conv_libfuncs (lrint_optab, "lrint",
5779 MODE_INT, MODE_FLOAT);
5780 init_interclass_conv_libfuncs (lround_optab, "lround",
5781 MODE_INT, MODE_FLOAT);
5782 init_interclass_conv_libfuncs (lfloor_optab, "lfloor",
5783 MODE_INT, MODE_FLOAT);
5784 init_interclass_conv_libfuncs (lceil_optab, "lceil",
5785 MODE_INT, MODE_FLOAT);
5787 /* sext_optab is also used for FLOAT_EXTEND. */
5788 init_intraclass_conv_libfuncs (sext_optab, "extend", MODE_FLOAT, true);
5789 init_intraclass_conv_libfuncs (sext_optab, "extend", MODE_DECIMAL_FLOAT, true);
5790 init_interclass_conv_libfuncs (sext_optab, "extend", MODE_FLOAT, MODE_DECIMAL_FLOAT);
5791 init_interclass_conv_libfuncs (sext_optab, "extend", MODE_DECIMAL_FLOAT, MODE_FLOAT);
5792 init_intraclass_conv_libfuncs (trunc_optab, "trunc", MODE_FLOAT, false);
5793 init_intraclass_conv_libfuncs (trunc_optab, "trunc", MODE_DECIMAL_FLOAT, false);
5794 init_interclass_conv_libfuncs (trunc_optab, "trunc", MODE_FLOAT, MODE_DECIMAL_FLOAT);
5795 init_interclass_conv_libfuncs (trunc_optab, "trunc", MODE_DECIMAL_FLOAT, MODE_FLOAT);
5797 /* Explicitly initialize the bswap libfuncs since we need them to be
5798 valid for things other than word_mode. */
5799 set_optab_libfunc (bswap_optab, SImode, "__bswapsi2");
5800 set_optab_libfunc (bswap_optab, DImode, "__bswapdi2");
5802 /* Use cabs for double complex abs, since systems generally have cabs.
5803 Don't define any libcall for float complex, so that cabs will be used. */
5804 if (complex_double_type_node)
5805 abs_optab->handlers[TYPE_MODE (complex_double_type_node)].libfunc
5806 = init_one_libfunc ("cabs");
5808 abort_libfunc = init_one_libfunc ("abort");
5809 memcpy_libfunc = init_one_libfunc ("memcpy");
5810 memmove_libfunc = init_one_libfunc ("memmove");
5811 memcmp_libfunc = init_one_libfunc ("memcmp");
5812 memset_libfunc = init_one_libfunc ("memset");
5813 setbits_libfunc = init_one_libfunc ("__setbits");
5815 #ifndef DONT_USE_BUILTIN_SETJMP
5816 setjmp_libfunc = init_one_libfunc ("__builtin_setjmp");
5817 longjmp_libfunc = init_one_libfunc ("__builtin_longjmp");
5819 setjmp_libfunc = init_one_libfunc ("setjmp");
5820 longjmp_libfunc = init_one_libfunc ("longjmp");
5822 unwind_sjlj_register_libfunc = init_one_libfunc ("_Unwind_SjLj_Register");
5823 unwind_sjlj_unregister_libfunc
5824 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5826 /* For function entry/exit instrumentation. */
5827 profile_function_entry_libfunc
5828 = init_one_libfunc ("__cyg_profile_func_enter");
5829 profile_function_exit_libfunc
5830 = init_one_libfunc ("__cyg_profile_func_exit");
5832 gcov_flush_libfunc = init_one_libfunc ("__gcov_flush");
5834 if (HAVE_conditional_trap)
5835 trap_rtx = gen_rtx_fmt_ee (EQ, VOIDmode, NULL_RTX, NULL_RTX);
5837 /* Allow the target to add more libcalls or rename some, etc. */
5838 targetm.init_libfuncs ();
5843 /* Print information about the current contents of the optabs on
5847 debug_optab_libfuncs (void)
5853 /* Dump the arithmetic optabs. */
5854 for (i = 0; i != (int) OTI_MAX; i++)
5855 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5858 struct optab_handlers *h;
5861 h = &o->handlers[j];
5864 gcc_assert (GET_CODE (h->libfunc) == SYMBOL_REF);
5865 fprintf (stderr, "%s\t%s:\t%s\n",
5866 GET_RTX_NAME (o->code),
5868 XSTR (h->libfunc, 0));
5872 /* Dump the conversion optabs. */
5873 for (i = 0; i < (int) COI_MAX; ++i)
5874 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5875 for (k = 0; k < NUM_MACHINE_MODES; ++k)
5878 struct optab_handlers *h;
5880 o = &convert_optab_table[i];
5881 h = &o->handlers[j][k];
5884 gcc_assert (GET_CODE (h->libfunc) == SYMBOL_REF);
5885 fprintf (stderr, "%s\t%s\t%s:\t%s\n",
5886 GET_RTX_NAME (o->code),
5889 XSTR (h->libfunc, 0));
5897 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5898 CODE. Return 0 on failure. */
5901 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED, rtx op1,
5902 rtx op2 ATTRIBUTE_UNUSED, rtx tcode ATTRIBUTE_UNUSED)
5904 enum machine_mode mode = GET_MODE (op1);
5905 enum insn_code icode;
5908 if (!HAVE_conditional_trap)
5911 if (mode == VOIDmode)
5914 icode = cmp_optab->handlers[(int) mode].insn_code;
5915 if (icode == CODE_FOR_nothing)
5919 op1 = prepare_operand (icode, op1, 0, mode, mode, 0);
5920 op2 = prepare_operand (icode, op2, 1, mode, mode, 0);
5926 emit_insn (GEN_FCN (icode) (op1, op2));
5928 PUT_CODE (trap_rtx, code);
5929 gcc_assert (HAVE_conditional_trap);
5930 insn = gen_conditional_trap (trap_rtx, tcode);
5934 insn = get_insns ();
5941 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5942 or unsigned operation code. */
5944 static enum rtx_code
5945 get_rtx_code (enum tree_code tcode, bool unsignedp)
5957 code = unsignedp ? LTU : LT;
5960 code = unsignedp ? LEU : LE;
5963 code = unsignedp ? GTU : GT;
5966 code = unsignedp ? GEU : GE;
5969 case UNORDERED_EXPR:
6000 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
6001 unsigned operators. Do not generate compare instruction. */
6004 vector_compare_rtx (tree cond, bool unsignedp, enum insn_code icode)
6006 enum rtx_code rcode;
6008 rtx rtx_op0, rtx_op1;
6010 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
6011 ensures that condition is a relational operation. */
6012 gcc_assert (COMPARISON_CLASS_P (cond));
6014 rcode = get_rtx_code (TREE_CODE (cond), unsignedp);
6015 t_op0 = TREE_OPERAND (cond, 0);
6016 t_op1 = TREE_OPERAND (cond, 1);
6018 /* Expand operands. */
6019 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)),
6021 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)),
6024 if (!insn_data[icode].operand[4].predicate (rtx_op0, GET_MODE (rtx_op0))
6025 && GET_MODE (rtx_op0) != VOIDmode)
6026 rtx_op0 = force_reg (GET_MODE (rtx_op0), rtx_op0);
6028 if (!insn_data[icode].operand[5].predicate (rtx_op1, GET_MODE (rtx_op1))
6029 && GET_MODE (rtx_op1) != VOIDmode)
6030 rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1);
6032 return gen_rtx_fmt_ee (rcode, VOIDmode, rtx_op0, rtx_op1);
6035 /* Return insn code for VEC_COND_EXPR EXPR. */
6037 static inline enum insn_code
6038 get_vcond_icode (tree expr, enum machine_mode mode)
6040 enum insn_code icode = CODE_FOR_nothing;
6042 if (TYPE_UNSIGNED (TREE_TYPE (expr)))
6043 icode = vcondu_gen_code[mode];
6045 icode = vcond_gen_code[mode];
6049 /* Return TRUE iff, appropriate vector insns are available
6050 for vector cond expr expr in VMODE mode. */
6053 expand_vec_cond_expr_p (tree expr, enum machine_mode vmode)
6055 if (get_vcond_icode (expr, vmode) == CODE_FOR_nothing)
6060 /* Generate insns for VEC_COND_EXPR. */
6063 expand_vec_cond_expr (tree vec_cond_expr, rtx target)
6065 enum insn_code icode;
6066 rtx comparison, rtx_op1, rtx_op2, cc_op0, cc_op1;
6067 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_cond_expr));
6068 bool unsignedp = TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr));
6070 icode = get_vcond_icode (vec_cond_expr, mode);
6071 if (icode == CODE_FOR_nothing)
6074 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6075 target = gen_reg_rtx (mode);
6077 /* Get comparison rtx. First expand both cond expr operands. */
6078 comparison = vector_compare_rtx (TREE_OPERAND (vec_cond_expr, 0),
6080 cc_op0 = XEXP (comparison, 0);
6081 cc_op1 = XEXP (comparison, 1);
6082 /* Expand both operands and force them in reg, if required. */
6083 rtx_op1 = expand_normal (TREE_OPERAND (vec_cond_expr, 1));
6084 if (!insn_data[icode].operand[1].predicate (rtx_op1, mode)
6085 && mode != VOIDmode)
6086 rtx_op1 = force_reg (mode, rtx_op1);
6088 rtx_op2 = expand_normal (TREE_OPERAND (vec_cond_expr, 2));
6089 if (!insn_data[icode].operand[2].predicate (rtx_op2, mode)
6090 && mode != VOIDmode)
6091 rtx_op2 = force_reg (mode, rtx_op2);
6093 /* Emit instruction! */
6094 emit_insn (GEN_FCN (icode) (target, rtx_op1, rtx_op2,
6095 comparison, cc_op0, cc_op1));
6101 /* This is an internal subroutine of the other compare_and_swap expanders.
6102 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
6103 operation. TARGET is an optional place to store the value result of
6104 the operation. ICODE is the particular instruction to expand. Return
6105 the result of the operation. */
6108 expand_val_compare_and_swap_1 (rtx mem, rtx old_val, rtx new_val,
6109 rtx target, enum insn_code icode)
6111 enum machine_mode mode = GET_MODE (mem);
6114 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6115 target = gen_reg_rtx (mode);
6117 if (GET_MODE (old_val) != VOIDmode && GET_MODE (old_val) != mode)
6118 old_val = convert_modes (mode, GET_MODE (old_val), old_val, 1);
6119 if (!insn_data[icode].operand[2].predicate (old_val, mode))
6120 old_val = force_reg (mode, old_val);
6122 if (GET_MODE (new_val) != VOIDmode && GET_MODE (new_val) != mode)
6123 new_val = convert_modes (mode, GET_MODE (new_val), new_val, 1);
6124 if (!insn_data[icode].operand[3].predicate (new_val, mode))
6125 new_val = force_reg (mode, new_val);
6127 insn = GEN_FCN (icode) (target, mem, old_val, new_val);
6128 if (insn == NULL_RTX)
6135 /* Expand a compare-and-swap operation and return its value. */
6138 expand_val_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
6140 enum machine_mode mode = GET_MODE (mem);
6141 enum insn_code icode = sync_compare_and_swap[mode];
6143 if (icode == CODE_FOR_nothing)
6146 return expand_val_compare_and_swap_1 (mem, old_val, new_val, target, icode);
6149 /* Expand a compare-and-swap operation and store true into the result if
6150 the operation was successful and false otherwise. Return the result.
6151 Unlike other routines, TARGET is not optional. */
6154 expand_bool_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
6156 enum machine_mode mode = GET_MODE (mem);
6157 enum insn_code icode;
6158 rtx subtarget, label0, label1;
6160 /* If the target supports a compare-and-swap pattern that simultaneously
6161 sets some flag for success, then use it. Otherwise use the regular
6162 compare-and-swap and follow that immediately with a compare insn. */
6163 icode = sync_compare_and_swap_cc[mode];
6167 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
6169 if (subtarget != NULL_RTX)
6173 case CODE_FOR_nothing:
6174 icode = sync_compare_and_swap[mode];
6175 if (icode == CODE_FOR_nothing)
6178 /* Ensure that if old_val == mem, that we're not comparing
6179 against an old value. */
6180 if (MEM_P (old_val))
6181 old_val = force_reg (mode, old_val);
6183 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
6185 if (subtarget == NULL_RTX)
6188 emit_cmp_insn (subtarget, old_val, EQ, const0_rtx, mode, true);
6191 /* If the target has a sane STORE_FLAG_VALUE, then go ahead and use a
6192 setcc instruction from the beginning. We don't work too hard here,
6193 but it's nice to not be stupid about initial code gen either. */
6194 if (STORE_FLAG_VALUE == 1)
6196 icode = setcc_gen_code[EQ];
6197 if (icode != CODE_FOR_nothing)
6199 enum machine_mode cmode = insn_data[icode].operand[0].mode;
6203 if (!insn_data[icode].operand[0].predicate (target, cmode))
6204 subtarget = gen_reg_rtx (cmode);
6206 insn = GEN_FCN (icode) (subtarget);
6210 if (GET_MODE (target) != GET_MODE (subtarget))
6212 convert_move (target, subtarget, 1);
6220 /* Without an appropriate setcc instruction, use a set of branches to
6221 get 1 and 0 stored into target. Presumably if the target has a
6222 STORE_FLAG_VALUE that isn't 1, then this will get cleaned up by ifcvt. */
6224 label0 = gen_label_rtx ();
6225 label1 = gen_label_rtx ();
6227 emit_jump_insn (bcc_gen_fctn[EQ] (label0));
6228 emit_move_insn (target, const0_rtx);
6229 emit_jump_insn (gen_jump (label1));
6231 emit_label (label0);
6232 emit_move_insn (target, const1_rtx);
6233 emit_label (label1);
6238 /* This is a helper function for the other atomic operations. This function
6239 emits a loop that contains SEQ that iterates until a compare-and-swap
6240 operation at the end succeeds. MEM is the memory to be modified. SEQ is
6241 a set of instructions that takes a value from OLD_REG as an input and
6242 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
6243 set to the current contents of MEM. After SEQ, a compare-and-swap will
6244 attempt to update MEM with NEW_REG. The function returns true when the
6245 loop was generated successfully. */
6248 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
6250 enum machine_mode mode = GET_MODE (mem);
6251 enum insn_code icode;
6252 rtx label, cmp_reg, subtarget;
6254 /* The loop we want to generate looks like
6260 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
6261 if (cmp_reg != old_reg)
6264 Note that we only do the plain load from memory once. Subsequent
6265 iterations use the value loaded by the compare-and-swap pattern. */
6267 label = gen_label_rtx ();
6268 cmp_reg = gen_reg_rtx (mode);
6270 emit_move_insn (cmp_reg, mem);
6272 emit_move_insn (old_reg, cmp_reg);
6276 /* If the target supports a compare-and-swap pattern that simultaneously
6277 sets some flag for success, then use it. Otherwise use the regular
6278 compare-and-swap and follow that immediately with a compare insn. */
6279 icode = sync_compare_and_swap_cc[mode];
6283 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
6285 if (subtarget != NULL_RTX)
6287 gcc_assert (subtarget == cmp_reg);
6292 case CODE_FOR_nothing:
6293 icode = sync_compare_and_swap[mode];
6294 if (icode == CODE_FOR_nothing)
6297 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
6299 if (subtarget == NULL_RTX)
6301 if (subtarget != cmp_reg)
6302 emit_move_insn (cmp_reg, subtarget);
6304 emit_cmp_insn (cmp_reg, old_reg, EQ, const0_rtx, mode, true);
6307 /* ??? Mark this jump predicted not taken? */
6308 emit_jump_insn (bcc_gen_fctn[NE] (label));
6313 /* This function generates the atomic operation MEM CODE= VAL. In this
6314 case, we do not care about any resulting value. Returns NULL if we
6315 cannot generate the operation. */
6318 expand_sync_operation (rtx mem, rtx val, enum rtx_code code)
6320 enum machine_mode mode = GET_MODE (mem);
6321 enum insn_code icode;
6324 /* Look to see if the target supports the operation directly. */
6328 icode = sync_add_optab[mode];
6331 icode = sync_ior_optab[mode];
6334 icode = sync_xor_optab[mode];
6337 icode = sync_and_optab[mode];
6340 icode = sync_nand_optab[mode];
6344 icode = sync_sub_optab[mode];
6345 if (icode == CODE_FOR_nothing)
6347 icode = sync_add_optab[mode];
6348 if (icode != CODE_FOR_nothing)
6350 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
6360 /* Generate the direct operation, if present. */
6361 if (icode != CODE_FOR_nothing)
6363 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6364 val = convert_modes (mode, GET_MODE (val), val, 1);
6365 if (!insn_data[icode].operand[1].predicate (val, mode))
6366 val = force_reg (mode, val);
6368 insn = GEN_FCN (icode) (mem, val);
6376 /* Failing that, generate a compare-and-swap loop in which we perform the
6377 operation with normal arithmetic instructions. */
6378 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6380 rtx t0 = gen_reg_rtx (mode), t1;
6387 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
6390 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
6391 true, OPTAB_LIB_WIDEN);
6393 insn = get_insns ();
6396 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
6403 /* This function generates the atomic operation MEM CODE= VAL. In this
6404 case, we do care about the resulting value: if AFTER is true then
6405 return the value MEM holds after the operation, if AFTER is false
6406 then return the value MEM holds before the operation. TARGET is an
6407 optional place for the result value to be stored. */
6410 expand_sync_fetch_operation (rtx mem, rtx val, enum rtx_code code,
6411 bool after, rtx target)
6413 enum machine_mode mode = GET_MODE (mem);
6414 enum insn_code old_code, new_code, icode;
6418 /* Look to see if the target supports the operation directly. */
6422 old_code = sync_old_add_optab[mode];
6423 new_code = sync_new_add_optab[mode];
6426 old_code = sync_old_ior_optab[mode];
6427 new_code = sync_new_ior_optab[mode];
6430 old_code = sync_old_xor_optab[mode];
6431 new_code = sync_new_xor_optab[mode];
6434 old_code = sync_old_and_optab[mode];
6435 new_code = sync_new_and_optab[mode];
6438 old_code = sync_old_nand_optab[mode];
6439 new_code = sync_new_nand_optab[mode];
6443 old_code = sync_old_sub_optab[mode];
6444 new_code = sync_new_sub_optab[mode];
6445 if (old_code == CODE_FOR_nothing && new_code == CODE_FOR_nothing)
6447 old_code = sync_old_add_optab[mode];
6448 new_code = sync_new_add_optab[mode];
6449 if (old_code != CODE_FOR_nothing || new_code != CODE_FOR_nothing)
6451 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
6461 /* If the target does supports the proper new/old operation, great. But
6462 if we only support the opposite old/new operation, check to see if we
6463 can compensate. In the case in which the old value is supported, then
6464 we can always perform the operation again with normal arithmetic. In
6465 the case in which the new value is supported, then we can only handle
6466 this in the case the operation is reversible. */
6471 if (icode == CODE_FOR_nothing)
6474 if (icode != CODE_FOR_nothing)
6481 if (icode == CODE_FOR_nothing
6482 && (code == PLUS || code == MINUS || code == XOR))
6485 if (icode != CODE_FOR_nothing)
6490 /* If we found something supported, great. */
6491 if (icode != CODE_FOR_nothing)
6493 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6494 target = gen_reg_rtx (mode);
6496 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6497 val = convert_modes (mode, GET_MODE (val), val, 1);
6498 if (!insn_data[icode].operand[2].predicate (val, mode))
6499 val = force_reg (mode, val);
6501 insn = GEN_FCN (icode) (target, mem, val);
6506 /* If we need to compensate for using an operation with the
6507 wrong return value, do so now. */
6514 else if (code == MINUS)
6519 target = expand_simple_unop (mode, NOT, target, NULL_RTX, true);
6520 target = expand_simple_binop (mode, code, target, val, NULL_RTX,
6521 true, OPTAB_LIB_WIDEN);
6528 /* Failing that, generate a compare-and-swap loop in which we perform the
6529 operation with normal arithmetic instructions. */
6530 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6532 rtx t0 = gen_reg_rtx (mode), t1;
6534 if (!target || !register_operand (target, mode))
6535 target = gen_reg_rtx (mode);
6540 emit_move_insn (target, t0);
6544 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
6547 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
6548 true, OPTAB_LIB_WIDEN);
6550 emit_move_insn (target, t1);
6552 insn = get_insns ();
6555 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
6562 /* This function expands a test-and-set operation. Ideally we atomically
6563 store VAL in MEM and return the previous value in MEM. Some targets
6564 may not support this operation and only support VAL with the constant 1;
6565 in this case while the return value will be 0/1, but the exact value
6566 stored in MEM is target defined. TARGET is an option place to stick
6567 the return value. */
6570 expand_sync_lock_test_and_set (rtx mem, rtx val, rtx target)
6572 enum machine_mode mode = GET_MODE (mem);
6573 enum insn_code icode;
6576 /* If the target supports the test-and-set directly, great. */
6577 icode = sync_lock_test_and_set[mode];
6578 if (icode != CODE_FOR_nothing)
6580 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6581 target = gen_reg_rtx (mode);
6583 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6584 val = convert_modes (mode, GET_MODE (val), val, 1);
6585 if (!insn_data[icode].operand[2].predicate (val, mode))
6586 val = force_reg (mode, val);
6588 insn = GEN_FCN (icode) (target, mem, val);
6596 /* Otherwise, use a compare-and-swap loop for the exchange. */
6597 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6599 if (!target || !register_operand (target, mode))
6600 target = gen_reg_rtx (mode);
6601 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6602 val = convert_modes (mode, GET_MODE (val), val, 1);
6603 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
6610 #include "gt-optabs.h"