1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
26 #include "coretypes.h"
30 /* Include insn-config.h before expr.h so that HAVE_conditional_move
31 is properly defined. */
32 #include "insn-config.h"
46 #include "basic-block.h"
49 /* Each optab contains info on how this target machine
50 can perform a particular operation
51 for all sizes and kinds of operands.
53 The operation to be performed is often specified
54 by passing one of these optabs as an argument.
56 See expr.h for documentation of these optabs. */
58 optab optab_table[OTI_MAX];
60 rtx libfunc_table[LTI_MAX];
62 /* Tables of patterns for converting one mode to another. */
63 convert_optab convert_optab_table[COI_MAX];
65 /* Contains the optab used for each rtx code. */
66 optab code_to_optab[NUM_RTX_CODE + 1];
68 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
69 gives the gen_function to make a branch to test that condition. */
71 rtxfun bcc_gen_fctn[NUM_RTX_CODE];
73 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
74 gives the insn code to make a store-condition insn
75 to test that condition. */
77 enum insn_code setcc_gen_code[NUM_RTX_CODE];
79 #ifdef HAVE_conditional_move
80 /* Indexed by the machine mode, gives the insn code to make a conditional
81 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
82 setcc_gen_code to cut down on the number of named patterns. Consider a day
83 when a lot more rtx codes are conditional (eg: for the ARM). */
85 enum insn_code movcc_gen_code[NUM_MACHINE_MODES];
88 /* Indexed by the machine mode, gives the insn code for vector conditional
91 enum insn_code vcond_gen_code[NUM_MACHINE_MODES];
92 enum insn_code vcondu_gen_code[NUM_MACHINE_MODES];
94 /* The insn generating function can not take an rtx_code argument.
95 TRAP_RTX is used as an rtx argument. Its code is replaced with
96 the code to be used in the trap insn and all other fields are ignored. */
97 static GTY(()) rtx trap_rtx;
99 static int add_equal_note (rtx, rtx, enum rtx_code, rtx, rtx);
100 static rtx widen_operand (rtx, enum machine_mode, enum machine_mode, int,
102 static void prepare_cmp_insn (rtx *, rtx *, enum rtx_code *, rtx,
103 enum machine_mode *, int *,
104 enum can_compare_purpose);
105 static enum insn_code can_fix_p (enum machine_mode, enum machine_mode, int,
107 static enum insn_code can_float_p (enum machine_mode, enum machine_mode, int);
108 static optab new_optab (void);
109 static convert_optab new_convert_optab (void);
110 static inline optab init_optab (enum rtx_code);
111 static inline optab init_optabv (enum rtx_code);
112 static inline convert_optab init_convert_optab (enum rtx_code);
113 static void init_libfuncs (optab, int, int, const char *, int);
114 static void init_integral_libfuncs (optab, const char *, int);
115 static void init_floating_libfuncs (optab, const char *, int);
116 static void init_interclass_conv_libfuncs (convert_optab, const char *,
117 enum mode_class, enum mode_class);
118 static void init_intraclass_conv_libfuncs (convert_optab, const char *,
119 enum mode_class, bool);
120 static void emit_cmp_and_jump_insn_1 (rtx, rtx, enum machine_mode,
121 enum rtx_code, int, rtx);
122 static void prepare_float_lib_cmp (rtx *, rtx *, enum rtx_code *,
123 enum machine_mode *, int *);
124 static rtx widen_clz (enum machine_mode, rtx, rtx);
125 static rtx expand_parity (enum machine_mode, rtx, rtx);
126 static enum rtx_code get_rtx_code (enum tree_code, bool);
127 static rtx vector_compare_rtx (tree, bool, enum insn_code);
129 /* Current libcall id. It doesn't matter what these are, as long
130 as they are unique to each libcall that is emitted. */
131 static HOST_WIDE_INT libcall_id = 0;
133 #ifndef HAVE_conditional_trap
134 #define HAVE_conditional_trap 0
135 #define gen_conditional_trap(a,b) (gcc_unreachable (), NULL_RTX)
138 /* Prefixes for the current version of decimal floating point (BID vs. DPD) */
139 #if ENABLE_DECIMAL_BID_FORMAT
140 #define DECIMAL_PREFIX "bid_"
142 #define DECIMAL_PREFIX "dpd_"
146 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
147 the result of operation CODE applied to OP0 (and OP1 if it is a binary
150 If the last insn does not set TARGET, don't do anything, but return 1.
152 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
153 don't add the REG_EQUAL note but return 0. Our caller can then try
154 again, ensuring that TARGET is not one of the operands. */
157 add_equal_note (rtx insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
159 rtx last_insn, insn, set;
162 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
164 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
165 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
166 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
167 && GET_RTX_CLASS (code) != RTX_COMPARE
168 && GET_RTX_CLASS (code) != RTX_UNARY)
171 if (GET_CODE (target) == ZERO_EXTRACT)
174 for (last_insn = insns;
175 NEXT_INSN (last_insn) != NULL_RTX;
176 last_insn = NEXT_INSN (last_insn))
179 set = single_set (last_insn);
183 if (! rtx_equal_p (SET_DEST (set), target)
184 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
185 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
186 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
189 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
190 besides the last insn. */
191 if (reg_overlap_mentioned_p (target, op0)
192 || (op1 && reg_overlap_mentioned_p (target, op1)))
194 insn = PREV_INSN (last_insn);
195 while (insn != NULL_RTX)
197 if (reg_set_p (target, insn))
200 insn = PREV_INSN (insn);
204 if (GET_RTX_CLASS (code) == RTX_UNARY)
205 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
207 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
209 set_unique_reg_note (last_insn, REG_EQUAL, note);
214 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
215 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
216 not actually do a sign-extend or zero-extend, but can leave the
217 higher-order bits of the result rtx undefined, for example, in the case
218 of logical operations, but not right shifts. */
221 widen_operand (rtx op, enum machine_mode mode, enum machine_mode oldmode,
222 int unsignedp, int no_extend)
226 /* If we don't have to extend and this is a constant, return it. */
227 if (no_extend && GET_MODE (op) == VOIDmode)
230 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
231 extend since it will be more efficient to do so unless the signedness of
232 a promoted object differs from our extension. */
234 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
235 && SUBREG_PROMOTED_UNSIGNED_P (op) == unsignedp))
236 return convert_modes (mode, oldmode, op, unsignedp);
238 /* If MODE is no wider than a single word, we return a paradoxical
240 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
241 return gen_rtx_SUBREG (mode, force_reg (GET_MODE (op), op), 0);
243 /* Otherwise, get an object of MODE, clobber it, and set the low-order
246 result = gen_reg_rtx (mode);
247 emit_insn (gen_rtx_CLOBBER (VOIDmode, result));
248 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
252 /* Return the optab used for computing the operation given by
253 the tree code, CODE. This function is not always usable (for
254 example, it cannot give complete results for multiplication
255 or division) but probably ought to be relied on more widely
256 throughout the expander. */
258 optab_for_tree_code (enum tree_code code, tree type)
270 return one_cmpl_optab;
279 return TYPE_UNSIGNED (type) ? umod_optab : smod_optab;
287 return TYPE_UNSIGNED (type) ? udiv_optab : sdiv_optab;
293 return TYPE_UNSIGNED (type) ? lshr_optab : ashr_optab;
302 return TYPE_UNSIGNED (type) ? umax_optab : smax_optab;
305 return TYPE_UNSIGNED (type) ? umin_optab : smin_optab;
307 case REALIGN_LOAD_EXPR:
308 return vec_realign_load_optab;
311 return TYPE_UNSIGNED (type) ? usum_widen_optab : ssum_widen_optab;
314 return TYPE_UNSIGNED (type) ? udot_prod_optab : sdot_prod_optab;
317 return TYPE_UNSIGNED (type) ? reduc_umax_optab : reduc_smax_optab;
320 return TYPE_UNSIGNED (type) ? reduc_umin_optab : reduc_smin_optab;
322 case REDUC_PLUS_EXPR:
323 return TYPE_UNSIGNED (type) ? reduc_uplus_optab : reduc_splus_optab;
325 case VEC_LSHIFT_EXPR:
326 return vec_shl_optab;
328 case VEC_RSHIFT_EXPR:
329 return vec_shr_optab;
331 case VEC_WIDEN_MULT_HI_EXPR:
332 return TYPE_UNSIGNED (type) ?
333 vec_widen_umult_hi_optab : vec_widen_smult_hi_optab;
335 case VEC_WIDEN_MULT_LO_EXPR:
336 return TYPE_UNSIGNED (type) ?
337 vec_widen_umult_lo_optab : vec_widen_smult_lo_optab;
339 case VEC_UNPACK_HI_EXPR:
340 return TYPE_UNSIGNED (type) ?
341 vec_unpacku_hi_optab : vec_unpacks_hi_optab;
343 case VEC_UNPACK_LO_EXPR:
344 return TYPE_UNSIGNED (type) ?
345 vec_unpacku_lo_optab : vec_unpacks_lo_optab;
347 case VEC_UNPACK_FLOAT_HI_EXPR:
348 /* The signedness is determined from input operand. */
349 return TYPE_UNSIGNED (type) ?
350 vec_unpacku_float_hi_optab : vec_unpacks_float_hi_optab;
352 case VEC_UNPACK_FLOAT_LO_EXPR:
353 /* The signedness is determined from input operand. */
354 return TYPE_UNSIGNED (type) ?
355 vec_unpacku_float_lo_optab : vec_unpacks_float_lo_optab;
357 case VEC_PACK_TRUNC_EXPR:
358 return vec_pack_trunc_optab;
360 case VEC_PACK_SAT_EXPR:
361 return TYPE_UNSIGNED (type) ? vec_pack_usat_optab : vec_pack_ssat_optab;
363 case VEC_PACK_FIX_TRUNC_EXPR:
364 /* The signedness is determined from output operand. */
365 return TYPE_UNSIGNED (type) ?
366 vec_pack_ufix_trunc_optab : vec_pack_sfix_trunc_optab;
372 trapv = INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type);
375 case POINTER_PLUS_EXPR:
377 return trapv ? addv_optab : add_optab;
380 return trapv ? subv_optab : sub_optab;
383 return trapv ? smulv_optab : smul_optab;
386 return trapv ? negv_optab : neg_optab;
389 return trapv ? absv_optab : abs_optab;
391 case VEC_EXTRACT_EVEN_EXPR:
392 return vec_extract_even_optab;
394 case VEC_EXTRACT_ODD_EXPR:
395 return vec_extract_odd_optab;
397 case VEC_INTERLEAVE_HIGH_EXPR:
398 return vec_interleave_high_optab;
400 case VEC_INTERLEAVE_LOW_EXPR:
401 return vec_interleave_low_optab;
409 /* Expand vector widening operations.
411 There are two different classes of operations handled here:
412 1) Operations whose result is wider than all the arguments to the operation.
413 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
414 In this case OP0 and optionally OP1 would be initialized,
415 but WIDE_OP wouldn't (not relevant for this case).
416 2) Operations whose result is of the same size as the last argument to the
417 operation, but wider than all the other arguments to the operation.
418 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
419 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
421 E.g, when called to expand the following operations, this is how
422 the arguments will be initialized:
424 widening-sum 2 oprnd0 - oprnd1
425 widening-dot-product 3 oprnd0 oprnd1 oprnd2
426 widening-mult 2 oprnd0 oprnd1 -
427 type-promotion (vec-unpack) 1 oprnd0 - - */
430 expand_widen_pattern_expr (tree exp, rtx op0, rtx op1, rtx wide_op, rtx target,
433 tree oprnd0, oprnd1, oprnd2;
434 enum machine_mode wmode = 0, tmode0, tmode1 = 0;
435 optab widen_pattern_optab;
437 enum machine_mode xmode0, xmode1 = 0, wxmode = 0;
440 rtx xop0, xop1, wxop;
441 int nops = TREE_OPERAND_LENGTH (exp);
443 oprnd0 = TREE_OPERAND (exp, 0);
444 tmode0 = TYPE_MODE (TREE_TYPE (oprnd0));
445 widen_pattern_optab =
446 optab_for_tree_code (TREE_CODE (exp), TREE_TYPE (oprnd0));
447 icode = (int) widen_pattern_optab->handlers[(int) tmode0].insn_code;
448 gcc_assert (icode != CODE_FOR_nothing);
449 xmode0 = insn_data[icode].operand[1].mode;
453 oprnd1 = TREE_OPERAND (exp, 1);
454 tmode1 = TYPE_MODE (TREE_TYPE (oprnd1));
455 xmode1 = insn_data[icode].operand[2].mode;
458 /* The last operand is of a wider mode than the rest of the operands. */
466 gcc_assert (tmode1 == tmode0);
468 oprnd2 = TREE_OPERAND (exp, 2);
469 wmode = TYPE_MODE (TREE_TYPE (oprnd2));
470 wxmode = insn_data[icode].operand[3].mode;
474 wmode = wxmode = insn_data[icode].operand[0].mode;
477 || ! (*insn_data[icode].operand[0].predicate) (target, wmode))
478 temp = gen_reg_rtx (wmode);
486 /* In case the insn wants input operands in modes different from
487 those of the actual operands, convert the operands. It would
488 seem that we don't need to convert CONST_INTs, but we do, so
489 that they're properly zero-extended, sign-extended or truncated
492 if (GET_MODE (op0) != xmode0 && xmode0 != VOIDmode)
493 xop0 = convert_modes (xmode0,
494 GET_MODE (op0) != VOIDmode
500 if (GET_MODE (op1) != xmode1 && xmode1 != VOIDmode)
501 xop1 = convert_modes (xmode1,
502 GET_MODE (op1) != VOIDmode
508 if (GET_MODE (wide_op) != wxmode && wxmode != VOIDmode)
509 wxop = convert_modes (wxmode,
510 GET_MODE (wide_op) != VOIDmode
515 /* Now, if insn's predicates don't allow our operands, put them into
518 if (! (*insn_data[icode].operand[1].predicate) (xop0, xmode0)
519 && xmode0 != VOIDmode)
520 xop0 = copy_to_mode_reg (xmode0, xop0);
524 if (! (*insn_data[icode].operand[2].predicate) (xop1, xmode1)
525 && xmode1 != VOIDmode)
526 xop1 = copy_to_mode_reg (xmode1, xop1);
530 if (! (*insn_data[icode].operand[3].predicate) (wxop, wxmode)
531 && wxmode != VOIDmode)
532 wxop = copy_to_mode_reg (wxmode, wxop);
534 pat = GEN_FCN (icode) (temp, xop0, xop1, wxop);
537 pat = GEN_FCN (icode) (temp, xop0, xop1);
543 if (! (*insn_data[icode].operand[2].predicate) (wxop, wxmode)
544 && wxmode != VOIDmode)
545 wxop = copy_to_mode_reg (wxmode, wxop);
547 pat = GEN_FCN (icode) (temp, xop0, wxop);
550 pat = GEN_FCN (icode) (temp, xop0);
557 /* Generate code to perform an operation specified by TERNARY_OPTAB
558 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
560 UNSIGNEDP is for the case where we have to widen the operands
561 to perform the operation. It says to use zero-extension.
563 If TARGET is nonzero, the value
564 is generated there, if it is convenient to do so.
565 In all cases an rtx is returned for the locus of the value;
566 this may or may not be TARGET. */
569 expand_ternary_op (enum machine_mode mode, optab ternary_optab, rtx op0,
570 rtx op1, rtx op2, rtx target, int unsignedp)
572 int icode = (int) ternary_optab->handlers[(int) mode].insn_code;
573 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
574 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
575 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
578 rtx xop0 = op0, xop1 = op1, xop2 = op2;
580 gcc_assert (ternary_optab->handlers[(int) mode].insn_code
581 != CODE_FOR_nothing);
583 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
584 temp = gen_reg_rtx (mode);
588 /* In case the insn wants input operands in modes different from
589 those of the actual operands, convert the operands. It would
590 seem that we don't need to convert CONST_INTs, but we do, so
591 that they're properly zero-extended, sign-extended or truncated
594 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
595 xop0 = convert_modes (mode0,
596 GET_MODE (op0) != VOIDmode
601 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
602 xop1 = convert_modes (mode1,
603 GET_MODE (op1) != VOIDmode
608 if (GET_MODE (op2) != mode2 && mode2 != VOIDmode)
609 xop2 = convert_modes (mode2,
610 GET_MODE (op2) != VOIDmode
615 /* Now, if insn's predicates don't allow our operands, put them into
618 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
619 && mode0 != VOIDmode)
620 xop0 = copy_to_mode_reg (mode0, xop0);
622 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
623 && mode1 != VOIDmode)
624 xop1 = copy_to_mode_reg (mode1, xop1);
626 if (!insn_data[icode].operand[3].predicate (xop2, mode2)
627 && mode2 != VOIDmode)
628 xop2 = copy_to_mode_reg (mode2, xop2);
630 pat = GEN_FCN (icode) (temp, xop0, xop1, xop2);
637 /* Like expand_binop, but return a constant rtx if the result can be
638 calculated at compile time. The arguments and return value are
639 otherwise the same as for expand_binop. */
642 simplify_expand_binop (enum machine_mode mode, optab binoptab,
643 rtx op0, rtx op1, rtx target, int unsignedp,
644 enum optab_methods methods)
646 if (CONSTANT_P (op0) && CONSTANT_P (op1))
648 rtx x = simplify_binary_operation (binoptab->code, mode, op0, op1);
654 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
657 /* Like simplify_expand_binop, but always put the result in TARGET.
658 Return true if the expansion succeeded. */
661 force_expand_binop (enum machine_mode mode, optab binoptab,
662 rtx op0, rtx op1, rtx target, int unsignedp,
663 enum optab_methods methods)
665 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
666 target, unsignedp, methods);
670 emit_move_insn (target, x);
674 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
677 expand_vec_shift_expr (tree vec_shift_expr, rtx target)
679 enum insn_code icode;
680 rtx rtx_op1, rtx_op2;
681 enum machine_mode mode1;
682 enum machine_mode mode2;
683 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_shift_expr));
684 tree vec_oprnd = TREE_OPERAND (vec_shift_expr, 0);
685 tree shift_oprnd = TREE_OPERAND (vec_shift_expr, 1);
689 switch (TREE_CODE (vec_shift_expr))
691 case VEC_RSHIFT_EXPR:
692 shift_optab = vec_shr_optab;
694 case VEC_LSHIFT_EXPR:
695 shift_optab = vec_shl_optab;
701 icode = (int) shift_optab->handlers[(int) mode].insn_code;
702 gcc_assert (icode != CODE_FOR_nothing);
704 mode1 = insn_data[icode].operand[1].mode;
705 mode2 = insn_data[icode].operand[2].mode;
707 rtx_op1 = expand_normal (vec_oprnd);
708 if (!(*insn_data[icode].operand[1].predicate) (rtx_op1, mode1)
709 && mode1 != VOIDmode)
710 rtx_op1 = force_reg (mode1, rtx_op1);
712 rtx_op2 = expand_normal (shift_oprnd);
713 if (!(*insn_data[icode].operand[2].predicate) (rtx_op2, mode2)
714 && mode2 != VOIDmode)
715 rtx_op2 = force_reg (mode2, rtx_op2);
718 || ! (*insn_data[icode].operand[0].predicate) (target, mode))
719 target = gen_reg_rtx (mode);
721 /* Emit instruction */
722 pat = GEN_FCN (icode) (target, rtx_op1, rtx_op2);
729 /* This subroutine of expand_doubleword_shift handles the cases in which
730 the effective shift value is >= BITS_PER_WORD. The arguments and return
731 value are the same as for the parent routine, except that SUPERWORD_OP1
732 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
733 INTO_TARGET may be null if the caller has decided to calculate it. */
736 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
737 rtx outof_target, rtx into_target,
738 int unsignedp, enum optab_methods methods)
740 if (into_target != 0)
741 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
742 into_target, unsignedp, methods))
745 if (outof_target != 0)
747 /* For a signed right shift, we must fill OUTOF_TARGET with copies
748 of the sign bit, otherwise we must fill it with zeros. */
749 if (binoptab != ashr_optab)
750 emit_move_insn (outof_target, CONST0_RTX (word_mode));
752 if (!force_expand_binop (word_mode, binoptab,
753 outof_input, GEN_INT (BITS_PER_WORD - 1),
754 outof_target, unsignedp, methods))
760 /* This subroutine of expand_doubleword_shift handles the cases in which
761 the effective shift value is < BITS_PER_WORD. The arguments and return
762 value are the same as for the parent routine. */
765 expand_subword_shift (enum machine_mode op1_mode, optab binoptab,
766 rtx outof_input, rtx into_input, rtx op1,
767 rtx outof_target, rtx into_target,
768 int unsignedp, enum optab_methods methods,
769 unsigned HOST_WIDE_INT shift_mask)
771 optab reverse_unsigned_shift, unsigned_shift;
774 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
775 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
777 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
778 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
779 the opposite direction to BINOPTAB. */
780 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
782 carries = outof_input;
783 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
784 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
789 /* We must avoid shifting by BITS_PER_WORD bits since that is either
790 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
791 has unknown behavior. Do a single shift first, then shift by the
792 remainder. It's OK to use ~OP1 as the remainder if shift counts
793 are truncated to the mode size. */
794 carries = expand_binop (word_mode, reverse_unsigned_shift,
795 outof_input, const1_rtx, 0, unsignedp, methods);
796 if (shift_mask == BITS_PER_WORD - 1)
798 tmp = immed_double_const (-1, -1, op1_mode);
799 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
804 tmp = immed_double_const (BITS_PER_WORD - 1, 0, op1_mode);
805 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
809 if (tmp == 0 || carries == 0)
811 carries = expand_binop (word_mode, reverse_unsigned_shift,
812 carries, tmp, 0, unsignedp, methods);
816 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
817 so the result can go directly into INTO_TARGET if convenient. */
818 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
819 into_target, unsignedp, methods);
823 /* Now OR in the bits carried over from OUTOF_INPUT. */
824 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
825 into_target, unsignedp, methods))
828 /* Use a standard word_mode shift for the out-of half. */
829 if (outof_target != 0)
830 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
831 outof_target, unsignedp, methods))
838 #ifdef HAVE_conditional_move
839 /* Try implementing expand_doubleword_shift using conditional moves.
840 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
841 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
842 are the shift counts to use in the former and latter case. All other
843 arguments are the same as the parent routine. */
846 expand_doubleword_shift_condmove (enum machine_mode op1_mode, optab binoptab,
847 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
848 rtx outof_input, rtx into_input,
849 rtx subword_op1, rtx superword_op1,
850 rtx outof_target, rtx into_target,
851 int unsignedp, enum optab_methods methods,
852 unsigned HOST_WIDE_INT shift_mask)
854 rtx outof_superword, into_superword;
856 /* Put the superword version of the output into OUTOF_SUPERWORD and
858 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
859 if (outof_target != 0 && subword_op1 == superword_op1)
861 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
862 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
863 into_superword = outof_target;
864 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
865 outof_superword, 0, unsignedp, methods))
870 into_superword = gen_reg_rtx (word_mode);
871 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
872 outof_superword, into_superword,
877 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
878 if (!expand_subword_shift (op1_mode, binoptab,
879 outof_input, into_input, subword_op1,
880 outof_target, into_target,
881 unsignedp, methods, shift_mask))
884 /* Select between them. Do the INTO half first because INTO_SUPERWORD
885 might be the current value of OUTOF_TARGET. */
886 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
887 into_target, into_superword, word_mode, false))
890 if (outof_target != 0)
891 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
892 outof_target, outof_superword,
900 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
901 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
902 input operand; the shift moves bits in the direction OUTOF_INPUT->
903 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
904 of the target. OP1 is the shift count and OP1_MODE is its mode.
905 If OP1 is constant, it will have been truncated as appropriate
906 and is known to be nonzero.
908 If SHIFT_MASK is zero, the result of word shifts is undefined when the
909 shift count is outside the range [0, BITS_PER_WORD). This routine must
910 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
912 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
913 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
914 fill with zeros or sign bits as appropriate.
916 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
917 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
918 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
919 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
922 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
923 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
924 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
925 function wants to calculate it itself.
927 Return true if the shift could be successfully synthesized. */
930 expand_doubleword_shift (enum machine_mode op1_mode, optab binoptab,
931 rtx outof_input, rtx into_input, rtx op1,
932 rtx outof_target, rtx into_target,
933 int unsignedp, enum optab_methods methods,
934 unsigned HOST_WIDE_INT shift_mask)
936 rtx superword_op1, tmp, cmp1, cmp2;
937 rtx subword_label, done_label;
938 enum rtx_code cmp_code;
940 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
941 fill the result with sign or zero bits as appropriate. If so, the value
942 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
943 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
944 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
946 This isn't worthwhile for constant shifts since the optimizers will
947 cope better with in-range shift counts. */
948 if (shift_mask >= BITS_PER_WORD
950 && !CONSTANT_P (op1))
952 if (!expand_doubleword_shift (op1_mode, binoptab,
953 outof_input, into_input, op1,
955 unsignedp, methods, shift_mask))
957 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
958 outof_target, unsignedp, methods))
963 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
964 is true when the effective shift value is less than BITS_PER_WORD.
965 Set SUPERWORD_OP1 to the shift count that should be used to shift
966 OUTOF_INPUT into INTO_TARGET when the condition is false. */
967 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
968 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
970 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
971 is a subword shift count. */
972 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
974 cmp2 = CONST0_RTX (op1_mode);
980 /* Set CMP1 to OP1 - BITS_PER_WORD. */
981 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
983 cmp2 = CONST0_RTX (op1_mode);
985 superword_op1 = cmp1;
990 /* If we can compute the condition at compile time, pick the
991 appropriate subroutine. */
992 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
993 if (tmp != 0 && GET_CODE (tmp) == CONST_INT)
995 if (tmp == const0_rtx)
996 return expand_superword_shift (binoptab, outof_input, superword_op1,
997 outof_target, into_target,
1000 return expand_subword_shift (op1_mode, binoptab,
1001 outof_input, into_input, op1,
1002 outof_target, into_target,
1003 unsignedp, methods, shift_mask);
1006 #ifdef HAVE_conditional_move
1007 /* Try using conditional moves to generate straight-line code. */
1009 rtx start = get_last_insn ();
1010 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
1011 cmp_code, cmp1, cmp2,
1012 outof_input, into_input,
1014 outof_target, into_target,
1015 unsignedp, methods, shift_mask))
1017 delete_insns_since (start);
1021 /* As a last resort, use branches to select the correct alternative. */
1022 subword_label = gen_label_rtx ();
1023 done_label = gen_label_rtx ();
1026 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
1027 0, 0, subword_label);
1030 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
1031 outof_target, into_target,
1032 unsignedp, methods))
1035 emit_jump_insn (gen_jump (done_label));
1037 emit_label (subword_label);
1039 if (!expand_subword_shift (op1_mode, binoptab,
1040 outof_input, into_input, op1,
1041 outof_target, into_target,
1042 unsignedp, methods, shift_mask))
1045 emit_label (done_label);
1049 /* Subroutine of expand_binop. Perform a double word multiplication of
1050 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
1051 as the target's word_mode. This function return NULL_RTX if anything
1052 goes wrong, in which case it may have already emitted instructions
1053 which need to be deleted.
1055 If we want to multiply two two-word values and have normal and widening
1056 multiplies of single-word values, we can do this with three smaller
1057 multiplications. Note that we do not make a REG_NO_CONFLICT block here
1058 because we are not operating on one word at a time.
1060 The multiplication proceeds as follows:
1061 _______________________
1062 [__op0_high_|__op0_low__]
1063 _______________________
1064 * [__op1_high_|__op1_low__]
1065 _______________________________________________
1066 _______________________
1067 (1) [__op0_low__*__op1_low__]
1068 _______________________
1069 (2a) [__op0_low__*__op1_high_]
1070 _______________________
1071 (2b) [__op0_high_*__op1_low__]
1072 _______________________
1073 (3) [__op0_high_*__op1_high_]
1076 This gives a 4-word result. Since we are only interested in the
1077 lower 2 words, partial result (3) and the upper words of (2a) and
1078 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1079 calculated using non-widening multiplication.
1081 (1), however, needs to be calculated with an unsigned widening
1082 multiplication. If this operation is not directly supported we
1083 try using a signed widening multiplication and adjust the result.
1084 This adjustment works as follows:
1086 If both operands are positive then no adjustment is needed.
1088 If the operands have different signs, for example op0_low < 0 and
1089 op1_low >= 0, the instruction treats the most significant bit of
1090 op0_low as a sign bit instead of a bit with significance
1091 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1092 with 2**BITS_PER_WORD - op0_low, and two's complements the
1093 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1096 Similarly, if both operands are negative, we need to add
1097 (op0_low + op1_low) * 2**BITS_PER_WORD.
1099 We use a trick to adjust quickly. We logically shift op0_low right
1100 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1101 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1102 logical shift exists, we do an arithmetic right shift and subtract
1106 expand_doubleword_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
1107 bool umulp, enum optab_methods methods)
1109 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
1110 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
1111 rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
1112 rtx product, adjust, product_high, temp;
1114 rtx op0_high = operand_subword_force (op0, high, mode);
1115 rtx op0_low = operand_subword_force (op0, low, mode);
1116 rtx op1_high = operand_subword_force (op1, high, mode);
1117 rtx op1_low = operand_subword_force (op1, low, mode);
1119 /* If we're using an unsigned multiply to directly compute the product
1120 of the low-order words of the operands and perform any required
1121 adjustments of the operands, we begin by trying two more multiplications
1122 and then computing the appropriate sum.
1124 We have checked above that the required addition is provided.
1125 Full-word addition will normally always succeed, especially if
1126 it is provided at all, so we don't worry about its failure. The
1127 multiplication may well fail, however, so we do handle that. */
1131 /* ??? This could be done with emit_store_flag where available. */
1132 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
1133 NULL_RTX, 1, methods);
1135 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
1136 NULL_RTX, 0, OPTAB_DIRECT);
1139 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
1140 NULL_RTX, 0, methods);
1143 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
1144 NULL_RTX, 0, OPTAB_DIRECT);
1151 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
1152 NULL_RTX, 0, OPTAB_DIRECT);
1156 /* OP0_HIGH should now be dead. */
1160 /* ??? This could be done with emit_store_flag where available. */
1161 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
1162 NULL_RTX, 1, methods);
1164 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
1165 NULL_RTX, 0, OPTAB_DIRECT);
1168 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
1169 NULL_RTX, 0, methods);
1172 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
1173 NULL_RTX, 0, OPTAB_DIRECT);
1180 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
1181 NULL_RTX, 0, OPTAB_DIRECT);
1185 /* OP1_HIGH should now be dead. */
1187 adjust = expand_binop (word_mode, add_optab, adjust, temp,
1188 adjust, 0, OPTAB_DIRECT);
1190 if (target && !REG_P (target))
1194 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
1195 target, 1, OPTAB_DIRECT);
1197 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
1198 target, 1, OPTAB_DIRECT);
1203 product_high = operand_subword (product, high, 1, mode);
1204 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
1205 REG_P (product_high) ? product_high : adjust,
1207 emit_move_insn (product_high, adjust);
1211 /* Wrapper around expand_binop which takes an rtx code to specify
1212 the operation to perform, not an optab pointer. All other
1213 arguments are the same. */
1215 expand_simple_binop (enum machine_mode mode, enum rtx_code code, rtx op0,
1216 rtx op1, rtx target, int unsignedp,
1217 enum optab_methods methods)
1219 optab binop = code_to_optab[(int) code];
1222 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
1225 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1226 binop. Order them according to commutative_operand_precedence and, if
1227 possible, try to put TARGET or a pseudo first. */
1229 swap_commutative_operands_with_target (rtx target, rtx op0, rtx op1)
1231 int op0_prec = commutative_operand_precedence (op0);
1232 int op1_prec = commutative_operand_precedence (op1);
1234 if (op0_prec < op1_prec)
1237 if (op0_prec > op1_prec)
1240 /* With equal precedence, both orders are ok, but it is better if the
1241 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
1242 if (target == 0 || REG_P (target))
1243 return (REG_P (op1) && !REG_P (op0)) || target == op1;
1245 return rtx_equal_p (op1, target);
1249 /* Generate code to perform an operation specified by BINOPTAB
1250 on operands OP0 and OP1, with result having machine-mode MODE.
1252 UNSIGNEDP is for the case where we have to widen the operands
1253 to perform the operation. It says to use zero-extension.
1255 If TARGET is nonzero, the value
1256 is generated there, if it is convenient to do so.
1257 In all cases an rtx is returned for the locus of the value;
1258 this may or may not be TARGET. */
1261 expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1,
1262 rtx target, int unsignedp, enum optab_methods methods)
1264 enum optab_methods next_methods
1265 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1266 ? OPTAB_WIDEN : methods);
1267 enum mode_class class;
1268 enum machine_mode wider_mode;
1270 int commutative_op = 0;
1271 int shift_op = (binoptab->code == ASHIFT
1272 || binoptab->code == ASHIFTRT
1273 || binoptab->code == LSHIFTRT
1274 || binoptab->code == ROTATE
1275 || binoptab->code == ROTATERT);
1276 rtx entry_last = get_last_insn ();
1278 bool first_pass_p = true;
1280 class = GET_MODE_CLASS (mode);
1282 /* If subtracting an integer constant, convert this into an addition of
1283 the negated constant. */
1285 if (binoptab == sub_optab && GET_CODE (op1) == CONST_INT)
1287 op1 = negate_rtx (mode, op1);
1288 binoptab = add_optab;
1291 /* If we are inside an appropriately-short loop and we are optimizing,
1292 force expensive constants into a register. */
1293 if (CONSTANT_P (op0) && optimize
1294 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
1296 if (GET_MODE (op0) != VOIDmode)
1297 op0 = convert_modes (mode, VOIDmode, op0, unsignedp);
1298 op0 = force_reg (mode, op0);
1301 if (CONSTANT_P (op1) && optimize
1302 && ! shift_op && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
1304 if (GET_MODE (op1) != VOIDmode)
1305 op1 = convert_modes (mode, VOIDmode, op1, unsignedp);
1306 op1 = force_reg (mode, op1);
1309 /* Record where to delete back to if we backtrack. */
1310 last = get_last_insn ();
1312 /* If operation is commutative,
1313 try to make the first operand a register.
1314 Even better, try to make it the same as the target.
1315 Also try to make the last operand a constant. */
1316 if (GET_RTX_CLASS (binoptab->code) == RTX_COMM_ARITH
1317 || binoptab == smul_widen_optab
1318 || binoptab == umul_widen_optab
1319 || binoptab == smul_highpart_optab
1320 || binoptab == umul_highpart_optab)
1324 if (swap_commutative_operands_with_target (target, op0, op1))
1334 /* If we can do it with a three-operand insn, do so. */
1336 if (methods != OPTAB_MUST_WIDEN
1337 && binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1339 int icode = (int) binoptab->handlers[(int) mode].insn_code;
1340 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
1341 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
1342 enum machine_mode tmp_mode;
1344 rtx xop0 = op0, xop1 = op1;
1349 temp = gen_reg_rtx (mode);
1351 /* If it is a commutative operator and the modes would match
1352 if we would swap the operands, we can save the conversions. */
1355 if (GET_MODE (op0) != mode0 && GET_MODE (op1) != mode1
1356 && GET_MODE (op0) == mode1 && GET_MODE (op1) == mode0)
1360 tmp = op0; op0 = op1; op1 = tmp;
1361 tmp = xop0; xop0 = xop1; xop1 = tmp;
1365 /* In case the insn wants input operands in modes different from
1366 those of the actual operands, convert the operands. It would
1367 seem that we don't need to convert CONST_INTs, but we do, so
1368 that they're properly zero-extended, sign-extended or truncated
1371 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
1372 xop0 = convert_modes (mode0,
1373 GET_MODE (op0) != VOIDmode
1378 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
1379 xop1 = convert_modes (mode1,
1380 GET_MODE (op1) != VOIDmode
1385 /* Now, if insn's predicates don't allow our operands, put them into
1388 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
1389 && mode0 != VOIDmode)
1390 xop0 = copy_to_mode_reg (mode0, xop0);
1392 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
1393 && mode1 != VOIDmode)
1394 xop1 = copy_to_mode_reg (mode1, xop1);
1396 if (binoptab == vec_pack_trunc_optab
1397 || binoptab == vec_pack_usat_optab
1398 || binoptab == vec_pack_ssat_optab
1399 || binoptab == vec_pack_ufix_trunc_optab
1400 || binoptab == vec_pack_sfix_trunc_optab)
1402 /* The mode of the result is different then the mode of the
1404 tmp_mode = insn_data[icode].operand[0].mode;
1405 if (GET_MODE_NUNITS (tmp_mode) != 2 * GET_MODE_NUNITS (mode))
1411 if (!insn_data[icode].operand[0].predicate (temp, tmp_mode))
1412 temp = gen_reg_rtx (tmp_mode);
1414 pat = GEN_FCN (icode) (temp, xop0, xop1);
1417 /* If PAT is composed of more than one insn, try to add an appropriate
1418 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1419 operand, call ourselves again, this time without a target. */
1420 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1421 && ! add_equal_note (pat, temp, binoptab->code, xop0, xop1))
1423 delete_insns_since (last);
1424 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1425 unsignedp, methods);
1432 delete_insns_since (last);
1435 /* If we were trying to rotate by a constant value, and that didn't
1436 work, try rotating the other direction before falling back to
1437 shifts and bitwise-or. */
1439 && (binoptab == rotl_optab || binoptab == rotr_optab)
1440 && class == MODE_INT
1441 && GET_CODE (op1) == CONST_INT
1443 && (unsigned int) INTVAL (op1) < GET_MODE_BITSIZE (mode))
1445 first_pass_p = false;
1446 op1 = GEN_INT (GET_MODE_BITSIZE (mode) - INTVAL (op1));
1447 binoptab = binoptab == rotl_optab ? rotr_optab : rotl_optab;
1451 /* If this is a multiply, see if we can do a widening operation that
1452 takes operands of this mode and makes a wider mode. */
1454 if (binoptab == smul_optab
1455 && GET_MODE_WIDER_MODE (mode) != VOIDmode
1456 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1457 ->handlers[(int) GET_MODE_WIDER_MODE (mode)].insn_code)
1458 != CODE_FOR_nothing))
1460 temp = expand_binop (GET_MODE_WIDER_MODE (mode),
1461 unsignedp ? umul_widen_optab : smul_widen_optab,
1462 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1466 if (GET_MODE_CLASS (mode) == MODE_INT
1467 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1468 GET_MODE_BITSIZE (GET_MODE (temp))))
1469 return gen_lowpart (mode, temp);
1471 return convert_to_mode (mode, temp, unsignedp);
1475 /* Look for a wider mode of the same class for which we think we
1476 can open-code the operation. Check for a widening multiply at the
1477 wider mode as well. */
1479 if (CLASS_HAS_WIDER_MODES_P (class)
1480 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1481 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1482 wider_mode != VOIDmode;
1483 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1485 if (binoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
1486 || (binoptab == smul_optab
1487 && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
1488 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1489 ->handlers[(int) GET_MODE_WIDER_MODE (wider_mode)].insn_code)
1490 != CODE_FOR_nothing)))
1492 rtx xop0 = op0, xop1 = op1;
1495 /* For certain integer operations, we need not actually extend
1496 the narrow operands, as long as we will truncate
1497 the results to the same narrowness. */
1499 if ((binoptab == ior_optab || binoptab == and_optab
1500 || binoptab == xor_optab
1501 || binoptab == add_optab || binoptab == sub_optab
1502 || binoptab == smul_optab || binoptab == ashl_optab)
1503 && class == MODE_INT)
1506 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1508 /* The second operand of a shift must always be extended. */
1509 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1510 no_extend && binoptab != ashl_optab);
1512 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1513 unsignedp, OPTAB_DIRECT);
1516 if (class != MODE_INT
1517 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1518 GET_MODE_BITSIZE (wider_mode)))
1521 target = gen_reg_rtx (mode);
1522 convert_move (target, temp, 0);
1526 return gen_lowpart (mode, temp);
1529 delete_insns_since (last);
1533 /* These can be done a word at a time. */
1534 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1535 && class == MODE_INT
1536 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
1537 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1543 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1544 won't be accurate, so use a new target. */
1545 if (target == 0 || target == op0 || target == op1)
1546 target = gen_reg_rtx (mode);
1550 /* Do the actual arithmetic. */
1551 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
1553 rtx target_piece = operand_subword (target, i, 1, mode);
1554 rtx x = expand_binop (word_mode, binoptab,
1555 operand_subword_force (op0, i, mode),
1556 operand_subword_force (op1, i, mode),
1557 target_piece, unsignedp, next_methods);
1562 if (target_piece != x)
1563 emit_move_insn (target_piece, x);
1566 insns = get_insns ();
1569 if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
1571 if (binoptab->code != UNKNOWN)
1573 = gen_rtx_fmt_ee (binoptab->code, mode,
1574 copy_rtx (op0), copy_rtx (op1));
1578 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1583 /* Synthesize double word shifts from single word shifts. */
1584 if ((binoptab == lshr_optab || binoptab == ashl_optab
1585 || binoptab == ashr_optab)
1586 && class == MODE_INT
1587 && (GET_CODE (op1) == CONST_INT || !optimize_size)
1588 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1589 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1590 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1591 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1593 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1594 enum machine_mode op1_mode;
1596 double_shift_mask = targetm.shift_truncation_mask (mode);
1597 shift_mask = targetm.shift_truncation_mask (word_mode);
1598 op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode;
1600 /* Apply the truncation to constant shifts. */
1601 if (double_shift_mask > 0 && GET_CODE (op1) == CONST_INT)
1602 op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
1604 if (op1 == CONST0_RTX (op1_mode))
1607 /* Make sure that this is a combination that expand_doubleword_shift
1608 can handle. See the comments there for details. */
1609 if (double_shift_mask == 0
1610 || (shift_mask == BITS_PER_WORD - 1
1611 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1613 rtx insns, equiv_value;
1614 rtx into_target, outof_target;
1615 rtx into_input, outof_input;
1616 int left_shift, outof_word;
1618 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1619 won't be accurate, so use a new target. */
1620 if (target == 0 || target == op0 || target == op1)
1621 target = gen_reg_rtx (mode);
1625 /* OUTOF_* is the word we are shifting bits away from, and
1626 INTO_* is the word that we are shifting bits towards, thus
1627 they differ depending on the direction of the shift and
1628 WORDS_BIG_ENDIAN. */
1630 left_shift = binoptab == ashl_optab;
1631 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1633 outof_target = operand_subword (target, outof_word, 1, mode);
1634 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1636 outof_input = operand_subword_force (op0, outof_word, mode);
1637 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1639 if (expand_doubleword_shift (op1_mode, binoptab,
1640 outof_input, into_input, op1,
1641 outof_target, into_target,
1642 unsignedp, next_methods, shift_mask))
1644 insns = get_insns ();
1647 equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, op0, op1);
1648 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1655 /* Synthesize double word rotates from single word shifts. */
1656 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1657 && class == MODE_INT
1658 && GET_CODE (op1) == CONST_INT
1659 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1660 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1661 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1664 rtx into_target, outof_target;
1665 rtx into_input, outof_input;
1667 int shift_count, left_shift, outof_word;
1669 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1670 won't be accurate, so use a new target. Do this also if target is not
1671 a REG, first because having a register instead may open optimization
1672 opportunities, and second because if target and op0 happen to be MEMs
1673 designating the same location, we would risk clobbering it too early
1674 in the code sequence we generate below. */
1675 if (target == 0 || target == op0 || target == op1 || ! REG_P (target))
1676 target = gen_reg_rtx (mode);
1680 shift_count = INTVAL (op1);
1682 /* OUTOF_* is the word we are shifting bits away from, and
1683 INTO_* is the word that we are shifting bits towards, thus
1684 they differ depending on the direction of the shift and
1685 WORDS_BIG_ENDIAN. */
1687 left_shift = (binoptab == rotl_optab);
1688 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1690 outof_target = operand_subword (target, outof_word, 1, mode);
1691 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1693 outof_input = operand_subword_force (op0, outof_word, mode);
1694 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1696 if (shift_count == BITS_PER_WORD)
1698 /* This is just a word swap. */
1699 emit_move_insn (outof_target, into_input);
1700 emit_move_insn (into_target, outof_input);
1705 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1706 rtx first_shift_count, second_shift_count;
1707 optab reverse_unsigned_shift, unsigned_shift;
1709 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1710 ? lshr_optab : ashl_optab);
1712 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1713 ? ashl_optab : lshr_optab);
1715 if (shift_count > BITS_PER_WORD)
1717 first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1718 second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1722 first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1723 second_shift_count = GEN_INT (shift_count);
1726 into_temp1 = expand_binop (word_mode, unsigned_shift,
1727 outof_input, first_shift_count,
1728 NULL_RTX, unsignedp, next_methods);
1729 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1730 into_input, second_shift_count,
1731 NULL_RTX, unsignedp, next_methods);
1733 if (into_temp1 != 0 && into_temp2 != 0)
1734 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1735 into_target, unsignedp, next_methods);
1739 if (inter != 0 && inter != into_target)
1740 emit_move_insn (into_target, inter);
1742 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1743 into_input, first_shift_count,
1744 NULL_RTX, unsignedp, next_methods);
1745 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1746 outof_input, second_shift_count,
1747 NULL_RTX, unsignedp, next_methods);
1749 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1750 inter = expand_binop (word_mode, ior_optab,
1751 outof_temp1, outof_temp2,
1752 outof_target, unsignedp, next_methods);
1754 if (inter != 0 && inter != outof_target)
1755 emit_move_insn (outof_target, inter);
1758 insns = get_insns ();
1763 /* One may be tempted to wrap the insns in a REG_NO_CONFLICT
1764 block to help the register allocator a bit. But a multi-word
1765 rotate will need all the input bits when setting the output
1766 bits, so there clearly is a conflict between the input and
1767 output registers. So we can't use a no-conflict block here. */
1773 /* These can be done a word at a time by propagating carries. */
1774 if ((binoptab == add_optab || binoptab == sub_optab)
1775 && class == MODE_INT
1776 && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
1777 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1780 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1781 const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
1782 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1783 rtx xop0, xop1, xtarget;
1785 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1786 value is one of those, use it. Otherwise, use 1 since it is the
1787 one easiest to get. */
1788 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1789 int normalizep = STORE_FLAG_VALUE;
1794 /* Prepare the operands. */
1795 xop0 = force_reg (mode, op0);
1796 xop1 = force_reg (mode, op1);
1798 xtarget = gen_reg_rtx (mode);
1800 if (target == 0 || !REG_P (target))
1803 /* Indicate for flow that the entire target reg is being set. */
1805 emit_insn (gen_rtx_CLOBBER (VOIDmode, xtarget));
1807 /* Do the actual arithmetic. */
1808 for (i = 0; i < nwords; i++)
1810 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1811 rtx target_piece = operand_subword (xtarget, index, 1, mode);
1812 rtx op0_piece = operand_subword_force (xop0, index, mode);
1813 rtx op1_piece = operand_subword_force (xop1, index, mode);
1816 /* Main add/subtract of the input operands. */
1817 x = expand_binop (word_mode, binoptab,
1818 op0_piece, op1_piece,
1819 target_piece, unsignedp, next_methods);
1825 /* Store carry from main add/subtract. */
1826 carry_out = gen_reg_rtx (word_mode);
1827 carry_out = emit_store_flag_force (carry_out,
1828 (binoptab == add_optab
1831 word_mode, 1, normalizep);
1838 /* Add/subtract previous carry to main result. */
1839 newx = expand_binop (word_mode,
1840 normalizep == 1 ? binoptab : otheroptab,
1842 NULL_RTX, 1, next_methods);
1846 /* Get out carry from adding/subtracting carry in. */
1847 rtx carry_tmp = gen_reg_rtx (word_mode);
1848 carry_tmp = emit_store_flag_force (carry_tmp,
1849 (binoptab == add_optab
1852 word_mode, 1, normalizep);
1854 /* Logical-ior the two poss. carry together. */
1855 carry_out = expand_binop (word_mode, ior_optab,
1856 carry_out, carry_tmp,
1857 carry_out, 0, next_methods);
1861 emit_move_insn (target_piece, newx);
1865 if (x != target_piece)
1866 emit_move_insn (target_piece, x);
1869 carry_in = carry_out;
1872 if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
1874 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
1875 || ! rtx_equal_p (target, xtarget))
1877 rtx temp = emit_move_insn (target, xtarget);
1879 set_unique_reg_note (temp,
1881 gen_rtx_fmt_ee (binoptab->code, mode,
1892 delete_insns_since (last);
1895 /* Attempt to synthesize double word multiplies using a sequence of word
1896 mode multiplications. We first attempt to generate a sequence using a
1897 more efficient unsigned widening multiply, and if that fails we then
1898 try using a signed widening multiply. */
1900 if (binoptab == smul_optab
1901 && class == MODE_INT
1902 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1903 && smul_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1904 && add_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1906 rtx product = NULL_RTX;
1908 if (umul_widen_optab->handlers[(int) mode].insn_code
1909 != CODE_FOR_nothing)
1911 product = expand_doubleword_mult (mode, op0, op1, target,
1914 delete_insns_since (last);
1917 if (product == NULL_RTX
1918 && smul_widen_optab->handlers[(int) mode].insn_code
1919 != CODE_FOR_nothing)
1921 product = expand_doubleword_mult (mode, op0, op1, target,
1924 delete_insns_since (last);
1927 if (product != NULL_RTX)
1929 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1931 temp = emit_move_insn (target ? target : product, product);
1932 set_unique_reg_note (temp,
1934 gen_rtx_fmt_ee (MULT, mode,
1942 /* It can't be open-coded in this mode.
1943 Use a library call if one is available and caller says that's ok. */
1945 if (binoptab->handlers[(int) mode].libfunc
1946 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
1950 enum machine_mode op1_mode = mode;
1957 op1_mode = word_mode;
1958 /* Specify unsigned here,
1959 since negative shift counts are meaningless. */
1960 op1x = convert_to_mode (word_mode, op1, 1);
1963 if (GET_MODE (op0) != VOIDmode
1964 && GET_MODE (op0) != mode)
1965 op0 = convert_to_mode (mode, op0, unsignedp);
1967 /* Pass 1 for NO_QUEUE so we don't lose any increments
1968 if the libcall is cse'd or moved. */
1969 value = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
1970 NULL_RTX, LCT_CONST, mode, 2,
1971 op0, mode, op1x, op1_mode);
1973 insns = get_insns ();
1976 target = gen_reg_rtx (mode);
1977 emit_libcall_block (insns, target, value,
1978 gen_rtx_fmt_ee (binoptab->code, mode, op0, op1));
1983 delete_insns_since (last);
1985 /* It can't be done in this mode. Can we do it in a wider mode? */
1987 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
1988 || methods == OPTAB_MUST_WIDEN))
1990 /* Caller says, don't even try. */
1991 delete_insns_since (entry_last);
1995 /* Compute the value of METHODS to pass to recursive calls.
1996 Don't allow widening to be tried recursively. */
1998 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
2000 /* Look for a wider mode of the same class for which it appears we can do
2003 if (CLASS_HAS_WIDER_MODES_P (class))
2005 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2006 wider_mode != VOIDmode;
2007 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2009 if ((binoptab->handlers[(int) wider_mode].insn_code
2010 != CODE_FOR_nothing)
2011 || (methods == OPTAB_LIB
2012 && binoptab->handlers[(int) wider_mode].libfunc))
2014 rtx xop0 = op0, xop1 = op1;
2017 /* For certain integer operations, we need not actually extend
2018 the narrow operands, as long as we will truncate
2019 the results to the same narrowness. */
2021 if ((binoptab == ior_optab || binoptab == and_optab
2022 || binoptab == xor_optab
2023 || binoptab == add_optab || binoptab == sub_optab
2024 || binoptab == smul_optab || binoptab == ashl_optab)
2025 && class == MODE_INT)
2028 xop0 = widen_operand (xop0, wider_mode, mode,
2029 unsignedp, no_extend);
2031 /* The second operand of a shift must always be extended. */
2032 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
2033 no_extend && binoptab != ashl_optab);
2035 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
2036 unsignedp, methods);
2039 if (class != MODE_INT
2040 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
2041 GET_MODE_BITSIZE (wider_mode)))
2044 target = gen_reg_rtx (mode);
2045 convert_move (target, temp, 0);
2049 return gen_lowpart (mode, temp);
2052 delete_insns_since (last);
2057 delete_insns_since (entry_last);
2061 /* Expand a binary operator which has both signed and unsigned forms.
2062 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2065 If we widen unsigned operands, we may use a signed wider operation instead
2066 of an unsigned wider operation, since the result would be the same. */
2069 sign_expand_binop (enum machine_mode mode, optab uoptab, optab soptab,
2070 rtx op0, rtx op1, rtx target, int unsignedp,
2071 enum optab_methods methods)
2074 optab direct_optab = unsignedp ? uoptab : soptab;
2075 struct optab wide_soptab;
2077 /* Do it without widening, if possible. */
2078 temp = expand_binop (mode, direct_optab, op0, op1, target,
2079 unsignedp, OPTAB_DIRECT);
2080 if (temp || methods == OPTAB_DIRECT)
2083 /* Try widening to a signed int. Make a fake signed optab that
2084 hides any signed insn for direct use. */
2085 wide_soptab = *soptab;
2086 wide_soptab.handlers[(int) mode].insn_code = CODE_FOR_nothing;
2087 wide_soptab.handlers[(int) mode].libfunc = 0;
2089 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2090 unsignedp, OPTAB_WIDEN);
2092 /* For unsigned operands, try widening to an unsigned int. */
2093 if (temp == 0 && unsignedp)
2094 temp = expand_binop (mode, uoptab, op0, op1, target,
2095 unsignedp, OPTAB_WIDEN);
2096 if (temp || methods == OPTAB_WIDEN)
2099 /* Use the right width lib call if that exists. */
2100 temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_LIB);
2101 if (temp || methods == OPTAB_LIB)
2104 /* Must widen and use a lib call, use either signed or unsigned. */
2105 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2106 unsignedp, methods);
2110 return expand_binop (mode, uoptab, op0, op1, target,
2111 unsignedp, methods);
2115 /* Generate code to perform an operation specified by UNOPPTAB
2116 on operand OP0, with two results to TARG0 and TARG1.
2117 We assume that the order of the operands for the instruction
2118 is TARG0, TARG1, OP0.
2120 Either TARG0 or TARG1 may be zero, but what that means is that
2121 the result is not actually wanted. We will generate it into
2122 a dummy pseudo-reg and discard it. They may not both be zero.
2124 Returns 1 if this operation can be performed; 0 if not. */
2127 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
2130 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2131 enum mode_class class;
2132 enum machine_mode wider_mode;
2133 rtx entry_last = get_last_insn ();
2136 class = GET_MODE_CLASS (mode);
2139 targ0 = gen_reg_rtx (mode);
2141 targ1 = gen_reg_rtx (mode);
2143 /* Record where to go back to if we fail. */
2144 last = get_last_insn ();
2146 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2148 int icode = (int) unoptab->handlers[(int) mode].insn_code;
2149 enum machine_mode mode0 = insn_data[icode].operand[2].mode;
2153 if (GET_MODE (xop0) != VOIDmode
2154 && GET_MODE (xop0) != mode0)
2155 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2157 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2158 if (!insn_data[icode].operand[2].predicate (xop0, mode0))
2159 xop0 = copy_to_mode_reg (mode0, xop0);
2161 /* We could handle this, but we should always be called with a pseudo
2162 for our targets and all insns should take them as outputs. */
2163 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2164 gcc_assert (insn_data[icode].operand[1].predicate (targ1, mode));
2166 pat = GEN_FCN (icode) (targ0, targ1, xop0);
2173 delete_insns_since (last);
2176 /* It can't be done in this mode. Can we do it in a wider mode? */
2178 if (CLASS_HAS_WIDER_MODES_P (class))
2180 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2181 wider_mode != VOIDmode;
2182 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2184 if (unoptab->handlers[(int) wider_mode].insn_code
2185 != CODE_FOR_nothing)
2187 rtx t0 = gen_reg_rtx (wider_mode);
2188 rtx t1 = gen_reg_rtx (wider_mode);
2189 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2191 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
2193 convert_move (targ0, t0, unsignedp);
2194 convert_move (targ1, t1, unsignedp);
2198 delete_insns_since (last);
2203 delete_insns_since (entry_last);
2207 /* Generate code to perform an operation specified by BINOPTAB
2208 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2209 We assume that the order of the operands for the instruction
2210 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2211 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2213 Either TARG0 or TARG1 may be zero, but what that means is that
2214 the result is not actually wanted. We will generate it into
2215 a dummy pseudo-reg and discard it. They may not both be zero.
2217 Returns 1 if this operation can be performed; 0 if not. */
2220 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
2223 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2224 enum mode_class class;
2225 enum machine_mode wider_mode;
2226 rtx entry_last = get_last_insn ();
2229 class = GET_MODE_CLASS (mode);
2231 /* If we are inside an appropriately-short loop and we are optimizing,
2232 force expensive constants into a register. */
2233 if (CONSTANT_P (op0) && optimize
2234 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
2235 op0 = force_reg (mode, op0);
2237 if (CONSTANT_P (op1) && optimize
2238 && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
2239 op1 = force_reg (mode, op1);
2242 targ0 = gen_reg_rtx (mode);
2244 targ1 = gen_reg_rtx (mode);
2246 /* Record where to go back to if we fail. */
2247 last = get_last_insn ();
2249 if (binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2251 int icode = (int) binoptab->handlers[(int) mode].insn_code;
2252 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2253 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
2255 rtx xop0 = op0, xop1 = op1;
2257 /* In case the insn wants input operands in modes different from
2258 those of the actual operands, convert the operands. It would
2259 seem that we don't need to convert CONST_INTs, but we do, so
2260 that they're properly zero-extended, sign-extended or truncated
2263 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
2264 xop0 = convert_modes (mode0,
2265 GET_MODE (op0) != VOIDmode
2270 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
2271 xop1 = convert_modes (mode1,
2272 GET_MODE (op1) != VOIDmode
2277 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2278 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2279 xop0 = copy_to_mode_reg (mode0, xop0);
2281 if (!insn_data[icode].operand[2].predicate (xop1, mode1))
2282 xop1 = copy_to_mode_reg (mode1, xop1);
2284 /* We could handle this, but we should always be called with a pseudo
2285 for our targets and all insns should take them as outputs. */
2286 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2287 gcc_assert (insn_data[icode].operand[3].predicate (targ1, mode));
2289 pat = GEN_FCN (icode) (targ0, xop0, xop1, targ1);
2296 delete_insns_since (last);
2299 /* It can't be done in this mode. Can we do it in a wider mode? */
2301 if (CLASS_HAS_WIDER_MODES_P (class))
2303 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2304 wider_mode != VOIDmode;
2305 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2307 if (binoptab->handlers[(int) wider_mode].insn_code
2308 != CODE_FOR_nothing)
2310 rtx t0 = gen_reg_rtx (wider_mode);
2311 rtx t1 = gen_reg_rtx (wider_mode);
2312 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2313 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2315 if (expand_twoval_binop (binoptab, cop0, cop1,
2318 convert_move (targ0, t0, unsignedp);
2319 convert_move (targ1, t1, unsignedp);
2323 delete_insns_since (last);
2328 delete_insns_since (entry_last);
2332 /* Expand the two-valued library call indicated by BINOPTAB, but
2333 preserve only one of the values. If TARG0 is non-NULL, the first
2334 value is placed into TARG0; otherwise the second value is placed
2335 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2336 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2337 This routine assumes that the value returned by the library call is
2338 as if the return value was of an integral mode twice as wide as the
2339 mode of OP0. Returns 1 if the call was successful. */
2342 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2343 rtx targ0, rtx targ1, enum rtx_code code)
2345 enum machine_mode mode;
2346 enum machine_mode libval_mode;
2350 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2351 gcc_assert (!targ0 != !targ1);
2353 mode = GET_MODE (op0);
2354 if (!binoptab->handlers[(int) mode].libfunc)
2357 /* The value returned by the library function will have twice as
2358 many bits as the nominal MODE. */
2359 libval_mode = smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode),
2362 libval = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
2363 NULL_RTX, LCT_CONST,
2367 /* Get the part of VAL containing the value that we want. */
2368 libval = simplify_gen_subreg (mode, libval, libval_mode,
2369 targ0 ? 0 : GET_MODE_SIZE (mode));
2370 insns = get_insns ();
2372 /* Move the into the desired location. */
2373 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2374 gen_rtx_fmt_ee (code, mode, op0, op1));
2380 /* Wrapper around expand_unop which takes an rtx code to specify
2381 the operation to perform, not an optab pointer. All other
2382 arguments are the same. */
2384 expand_simple_unop (enum machine_mode mode, enum rtx_code code, rtx op0,
2385 rtx target, int unsignedp)
2387 optab unop = code_to_optab[(int) code];
2390 return expand_unop (mode, unop, op0, target, unsignedp);
2396 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2398 widen_clz (enum machine_mode mode, rtx op0, rtx target)
2400 enum mode_class class = GET_MODE_CLASS (mode);
2401 if (CLASS_HAS_WIDER_MODES_P (class))
2403 enum machine_mode wider_mode;
2404 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2405 wider_mode != VOIDmode;
2406 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2408 if (clz_optab->handlers[(int) wider_mode].insn_code
2409 != CODE_FOR_nothing)
2411 rtx xop0, temp, last;
2413 last = get_last_insn ();
2416 target = gen_reg_rtx (mode);
2417 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2418 temp = expand_unop (wider_mode, clz_optab, xop0, NULL_RTX, true);
2420 temp = expand_binop (wider_mode, sub_optab, temp,
2421 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2422 - GET_MODE_BITSIZE (mode)),
2423 target, true, OPTAB_DIRECT);
2425 delete_insns_since (last);
2437 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2439 widen_bswap (enum machine_mode mode, rtx op0, rtx target)
2441 enum mode_class class = GET_MODE_CLASS (mode);
2442 enum machine_mode wider_mode;
2445 if (!CLASS_HAS_WIDER_MODES_P (class))
2448 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2449 wider_mode != VOIDmode;
2450 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2451 if (bswap_optab->handlers[wider_mode].insn_code != CODE_FOR_nothing)
2456 last = get_last_insn ();
2458 x = widen_operand (op0, wider_mode, mode, true, true);
2459 x = expand_unop (wider_mode, bswap_optab, x, NULL_RTX, true);
2462 x = expand_shift (RSHIFT_EXPR, wider_mode, x,
2463 size_int (GET_MODE_BITSIZE (wider_mode)
2464 - GET_MODE_BITSIZE (mode)),
2470 target = gen_reg_rtx (mode);
2471 emit_move_insn (target, gen_lowpart (mode, x));
2474 delete_insns_since (last);
2479 /* Try calculating bswap as two bswaps of two word-sized operands. */
2482 expand_doubleword_bswap (enum machine_mode mode, rtx op, rtx target)
2486 t1 = expand_unop (word_mode, bswap_optab,
2487 operand_subword_force (op, 0, mode), NULL_RTX, true);
2488 t0 = expand_unop (word_mode, bswap_optab,
2489 operand_subword_force (op, 1, mode), NULL_RTX, true);
2492 target = gen_reg_rtx (mode);
2494 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
2495 emit_move_insn (operand_subword (target, 0, 1, mode), t0);
2496 emit_move_insn (operand_subword (target, 1, 1, mode), t1);
2501 /* Try calculating (parity x) as (and (popcount x) 1), where
2502 popcount can also be done in a wider mode. */
2504 expand_parity (enum machine_mode mode, rtx op0, rtx target)
2506 enum mode_class class = GET_MODE_CLASS (mode);
2507 if (CLASS_HAS_WIDER_MODES_P (class))
2509 enum machine_mode wider_mode;
2510 for (wider_mode = mode; wider_mode != VOIDmode;
2511 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2513 if (popcount_optab->handlers[(int) wider_mode].insn_code
2514 != CODE_FOR_nothing)
2516 rtx xop0, temp, last;
2518 last = get_last_insn ();
2521 target = gen_reg_rtx (mode);
2522 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2523 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2526 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2527 target, true, OPTAB_DIRECT);
2529 delete_insns_since (last);
2538 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2539 conditions, VAL may already be a SUBREG against which we cannot generate
2540 a further SUBREG. In this case, we expect forcing the value into a
2541 register will work around the situation. */
2544 lowpart_subreg_maybe_copy (enum machine_mode omode, rtx val,
2545 enum machine_mode imode)
2548 ret = lowpart_subreg (omode, val, imode);
2551 val = force_reg (imode, val);
2552 ret = lowpart_subreg (omode, val, imode);
2553 gcc_assert (ret != NULL);
2558 /* Expand a floating point absolute value or negation operation via a
2559 logical operation on the sign bit. */
2562 expand_absneg_bit (enum rtx_code code, enum machine_mode mode,
2563 rtx op0, rtx target)
2565 const struct real_format *fmt;
2566 int bitpos, word, nwords, i;
2567 enum machine_mode imode;
2568 HOST_WIDE_INT hi, lo;
2571 /* The format has to have a simple sign bit. */
2572 fmt = REAL_MODE_FORMAT (mode);
2576 bitpos = fmt->signbit_rw;
2580 /* Don't create negative zeros if the format doesn't support them. */
2581 if (code == NEG && !fmt->has_signed_zero)
2584 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2586 imode = int_mode_for_mode (mode);
2587 if (imode == BLKmode)
2596 if (FLOAT_WORDS_BIG_ENDIAN)
2597 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2599 word = bitpos / BITS_PER_WORD;
2600 bitpos = bitpos % BITS_PER_WORD;
2601 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2604 if (bitpos < HOST_BITS_PER_WIDE_INT)
2607 lo = (HOST_WIDE_INT) 1 << bitpos;
2611 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2617 if (target == 0 || target == op0)
2618 target = gen_reg_rtx (mode);
2624 for (i = 0; i < nwords; ++i)
2626 rtx targ_piece = operand_subword (target, i, 1, mode);
2627 rtx op0_piece = operand_subword_force (op0, i, mode);
2631 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2633 immed_double_const (lo, hi, imode),
2634 targ_piece, 1, OPTAB_LIB_WIDEN);
2635 if (temp != targ_piece)
2636 emit_move_insn (targ_piece, temp);
2639 emit_move_insn (targ_piece, op0_piece);
2642 insns = get_insns ();
2645 temp = gen_rtx_fmt_e (code, mode, copy_rtx (op0));
2646 emit_no_conflict_block (insns, target, op0, NULL_RTX, temp);
2650 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2651 gen_lowpart (imode, op0),
2652 immed_double_const (lo, hi, imode),
2653 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2654 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2656 set_unique_reg_note (get_last_insn (), REG_EQUAL,
2657 gen_rtx_fmt_e (code, mode, copy_rtx (op0)));
2663 /* Generate code to perform an operation specified by UNOPTAB
2664 on operand OP0, with result having machine-mode MODE.
2666 UNSIGNEDP is for the case where we have to widen the operands
2667 to perform the operation. It says to use zero-extension.
2669 If TARGET is nonzero, the value
2670 is generated there, if it is convenient to do so.
2671 In all cases an rtx is returned for the locus of the value;
2672 this may or may not be TARGET. */
2675 expand_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
2678 enum mode_class class;
2679 enum machine_mode wider_mode;
2681 rtx last = get_last_insn ();
2684 class = GET_MODE_CLASS (mode);
2686 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2688 int icode = (int) unoptab->handlers[(int) mode].insn_code;
2689 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2695 temp = gen_reg_rtx (mode);
2697 if (GET_MODE (xop0) != VOIDmode
2698 && GET_MODE (xop0) != mode0)
2699 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2701 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2703 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2704 xop0 = copy_to_mode_reg (mode0, xop0);
2706 if (!insn_data[icode].operand[0].predicate (temp, mode))
2707 temp = gen_reg_rtx (mode);
2709 pat = GEN_FCN (icode) (temp, xop0);
2712 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
2713 && ! add_equal_note (pat, temp, unoptab->code, xop0, NULL_RTX))
2715 delete_insns_since (last);
2716 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
2724 delete_insns_since (last);
2727 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2729 /* Widening clz needs special treatment. */
2730 if (unoptab == clz_optab)
2732 temp = widen_clz (mode, op0, target);
2739 /* Widening (or narrowing) bswap needs special treatment. */
2740 if (unoptab == bswap_optab)
2742 temp = widen_bswap (mode, op0, target);
2746 if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
2747 && unoptab->handlers[word_mode].insn_code != CODE_FOR_nothing)
2749 temp = expand_doubleword_bswap (mode, op0, target);
2757 if (CLASS_HAS_WIDER_MODES_P (class))
2758 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2759 wider_mode != VOIDmode;
2760 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2762 if (unoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing)
2766 /* For certain operations, we need not actually extend
2767 the narrow operand, as long as we will truncate the
2768 results to the same narrowness. */
2770 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2771 (unoptab == neg_optab
2772 || unoptab == one_cmpl_optab)
2773 && class == MODE_INT);
2775 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2780 if (class != MODE_INT
2781 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
2782 GET_MODE_BITSIZE (wider_mode)))
2785 target = gen_reg_rtx (mode);
2786 convert_move (target, temp, 0);
2790 return gen_lowpart (mode, temp);
2793 delete_insns_since (last);
2797 /* These can be done a word at a time. */
2798 if (unoptab == one_cmpl_optab
2799 && class == MODE_INT
2800 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
2801 && unoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
2806 if (target == 0 || target == op0)
2807 target = gen_reg_rtx (mode);
2811 /* Do the actual arithmetic. */
2812 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
2814 rtx target_piece = operand_subword (target, i, 1, mode);
2815 rtx x = expand_unop (word_mode, unoptab,
2816 operand_subword_force (op0, i, mode),
2817 target_piece, unsignedp);
2819 if (target_piece != x)
2820 emit_move_insn (target_piece, x);
2823 insns = get_insns ();
2826 emit_no_conflict_block (insns, target, op0, NULL_RTX,
2827 gen_rtx_fmt_e (unoptab->code, mode,
2832 if (unoptab->code == NEG)
2834 /* Try negating floating point values by flipping the sign bit. */
2835 if (SCALAR_FLOAT_MODE_P (mode))
2837 temp = expand_absneg_bit (NEG, mode, op0, target);
2842 /* If there is no negation pattern, and we have no negative zero,
2843 try subtracting from zero. */
2844 if (!HONOR_SIGNED_ZEROS (mode))
2846 temp = expand_binop (mode, (unoptab == negv_optab
2847 ? subv_optab : sub_optab),
2848 CONST0_RTX (mode), op0, target,
2849 unsignedp, OPTAB_DIRECT);
2855 /* Try calculating parity (x) as popcount (x) % 2. */
2856 if (unoptab == parity_optab)
2858 temp = expand_parity (mode, op0, target);
2864 /* Now try a library call in this mode. */
2865 if (unoptab->handlers[(int) mode].libfunc)
2869 enum machine_mode outmode = mode;
2871 /* All of these functions return small values. Thus we choose to
2872 have them return something that isn't a double-word. */
2873 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
2874 || unoptab == popcount_optab || unoptab == parity_optab)
2876 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node)));
2880 /* Pass 1 for NO_QUEUE so we don't lose any increments
2881 if the libcall is cse'd or moved. */
2882 value = emit_library_call_value (unoptab->handlers[(int) mode].libfunc,
2883 NULL_RTX, LCT_CONST, outmode,
2885 insns = get_insns ();
2888 target = gen_reg_rtx (outmode);
2889 emit_libcall_block (insns, target, value,
2890 gen_rtx_fmt_e (unoptab->code, outmode, op0));
2895 /* It can't be done in this mode. Can we do it in a wider mode? */
2897 if (CLASS_HAS_WIDER_MODES_P (class))
2899 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2900 wider_mode != VOIDmode;
2901 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2903 if ((unoptab->handlers[(int) wider_mode].insn_code
2904 != CODE_FOR_nothing)
2905 || unoptab->handlers[(int) wider_mode].libfunc)
2909 /* For certain operations, we need not actually extend
2910 the narrow operand, as long as we will truncate the
2911 results to the same narrowness. */
2913 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2914 (unoptab == neg_optab
2915 || unoptab == one_cmpl_optab)
2916 && class == MODE_INT);
2918 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2921 /* If we are generating clz using wider mode, adjust the
2923 if (unoptab == clz_optab && temp != 0)
2924 temp = expand_binop (wider_mode, sub_optab, temp,
2925 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2926 - GET_MODE_BITSIZE (mode)),
2927 target, true, OPTAB_DIRECT);
2931 if (class != MODE_INT)
2934 target = gen_reg_rtx (mode);
2935 convert_move (target, temp, 0);
2939 return gen_lowpart (mode, temp);
2942 delete_insns_since (last);
2947 /* One final attempt at implementing negation via subtraction,
2948 this time allowing widening of the operand. */
2949 if (unoptab->code == NEG && !HONOR_SIGNED_ZEROS (mode))
2952 temp = expand_binop (mode,
2953 unoptab == negv_optab ? subv_optab : sub_optab,
2954 CONST0_RTX (mode), op0,
2955 target, unsignedp, OPTAB_LIB_WIDEN);
2963 /* Emit code to compute the absolute value of OP0, with result to
2964 TARGET if convenient. (TARGET may be 0.) The return value says
2965 where the result actually is to be found.
2967 MODE is the mode of the operand; the mode of the result is
2968 different but can be deduced from MODE.
2973 expand_abs_nojump (enum machine_mode mode, rtx op0, rtx target,
2974 int result_unsignedp)
2979 result_unsignedp = 1;
2981 /* First try to do it with a special abs instruction. */
2982 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
2987 /* For floating point modes, try clearing the sign bit. */
2988 if (SCALAR_FLOAT_MODE_P (mode))
2990 temp = expand_absneg_bit (ABS, mode, op0, target);
2995 /* If we have a MAX insn, we can do this as MAX (x, -x). */
2996 if (smax_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
2997 && !HONOR_SIGNED_ZEROS (mode))
2999 rtx last = get_last_insn ();
3001 temp = expand_unop (mode, neg_optab, op0, NULL_RTX, 0);
3003 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3009 delete_insns_since (last);
3012 /* If this machine has expensive jumps, we can do integer absolute
3013 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3014 where W is the width of MODE. */
3016 if (GET_MODE_CLASS (mode) == MODE_INT && BRANCH_COST >= 2)
3018 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
3019 size_int (GET_MODE_BITSIZE (mode) - 1),
3022 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
3025 temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab,
3026 temp, extended, target, 0, OPTAB_LIB_WIDEN);
3036 expand_abs (enum machine_mode mode, rtx op0, rtx target,
3037 int result_unsignedp, int safe)
3042 result_unsignedp = 1;
3044 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
3048 /* If that does not win, use conditional jump and negate. */
3050 /* It is safe to use the target if it is the same
3051 as the source if this is also a pseudo register */
3052 if (op0 == target && REG_P (op0)
3053 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
3056 op1 = gen_label_rtx ();
3057 if (target == 0 || ! safe
3058 || GET_MODE (target) != mode
3059 || (MEM_P (target) && MEM_VOLATILE_P (target))
3061 && REGNO (target) < FIRST_PSEUDO_REGISTER))
3062 target = gen_reg_rtx (mode);
3064 emit_move_insn (target, op0);
3067 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
3068 NULL_RTX, NULL_RTX, op1);
3070 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3073 emit_move_insn (target, op0);
3079 /* A subroutine of expand_copysign, perform the copysign operation using the
3080 abs and neg primitives advertised to exist on the target. The assumption
3081 is that we have a split register file, and leaving op0 in fp registers,
3082 and not playing with subregs so much, will help the register allocator. */
3085 expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3086 int bitpos, bool op0_is_abs)
3088 enum machine_mode imode;
3089 HOST_WIDE_INT hi, lo;
3098 op0 = expand_unop (mode, abs_optab, op0, target, 0);
3105 if (target == NULL_RTX)
3106 target = copy_to_reg (op0);
3108 emit_move_insn (target, op0);
3111 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3113 imode = int_mode_for_mode (mode);
3114 if (imode == BLKmode)
3116 op1 = gen_lowpart (imode, op1);
3121 if (FLOAT_WORDS_BIG_ENDIAN)
3122 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3124 word = bitpos / BITS_PER_WORD;
3125 bitpos = bitpos % BITS_PER_WORD;
3126 op1 = operand_subword_force (op1, word, mode);
3129 if (bitpos < HOST_BITS_PER_WIDE_INT)
3132 lo = (HOST_WIDE_INT) 1 << bitpos;
3136 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
3140 op1 = expand_binop (imode, and_optab, op1,
3141 immed_double_const (lo, hi, imode),
3142 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3144 label = gen_label_rtx ();
3145 emit_cmp_and_jump_insns (op1, const0_rtx, EQ, NULL_RTX, imode, 1, label);
3147 if (GET_CODE (op0) == CONST_DOUBLE)
3148 op0 = simplify_unary_operation (NEG, mode, op0, mode);
3150 op0 = expand_unop (mode, neg_optab, op0, target, 0);
3152 emit_move_insn (target, op0);
3160 /* A subroutine of expand_copysign, perform the entire copysign operation
3161 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3162 is true if op0 is known to have its sign bit clear. */
3165 expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3166 int bitpos, bool op0_is_abs)
3168 enum machine_mode imode;
3169 HOST_WIDE_INT hi, lo;
3170 int word, nwords, i;
3173 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3175 imode = int_mode_for_mode (mode);
3176 if (imode == BLKmode)
3185 if (FLOAT_WORDS_BIG_ENDIAN)
3186 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3188 word = bitpos / BITS_PER_WORD;
3189 bitpos = bitpos % BITS_PER_WORD;
3190 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
3193 if (bitpos < HOST_BITS_PER_WIDE_INT)
3196 lo = (HOST_WIDE_INT) 1 << bitpos;
3200 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
3204 if (target == 0 || target == op0 || target == op1)
3205 target = gen_reg_rtx (mode);
3211 for (i = 0; i < nwords; ++i)
3213 rtx targ_piece = operand_subword (target, i, 1, mode);
3214 rtx op0_piece = operand_subword_force (op0, i, mode);
3219 op0_piece = expand_binop (imode, and_optab, op0_piece,
3220 immed_double_const (~lo, ~hi, imode),
3221 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3223 op1 = expand_binop (imode, and_optab,
3224 operand_subword_force (op1, i, mode),
3225 immed_double_const (lo, hi, imode),
3226 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3228 temp = expand_binop (imode, ior_optab, op0_piece, op1,
3229 targ_piece, 1, OPTAB_LIB_WIDEN);
3230 if (temp != targ_piece)
3231 emit_move_insn (targ_piece, temp);
3234 emit_move_insn (targ_piece, op0_piece);
3237 insns = get_insns ();
3240 emit_no_conflict_block (insns, target, op0, op1, NULL_RTX);
3244 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
3245 immed_double_const (lo, hi, imode),
3246 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3248 op0 = gen_lowpart (imode, op0);
3250 op0 = expand_binop (imode, and_optab, op0,
3251 immed_double_const (~lo, ~hi, imode),
3252 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3254 temp = expand_binop (imode, ior_optab, op0, op1,
3255 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3256 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3262 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3263 scalar floating point mode. Return NULL if we do not know how to
3264 expand the operation inline. */
3267 expand_copysign (rtx op0, rtx op1, rtx target)
3269 enum machine_mode mode = GET_MODE (op0);
3270 const struct real_format *fmt;
3274 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3275 gcc_assert (GET_MODE (op1) == mode);
3277 /* First try to do it with a special instruction. */
3278 temp = expand_binop (mode, copysign_optab, op0, op1,
3279 target, 0, OPTAB_DIRECT);
3283 fmt = REAL_MODE_FORMAT (mode);
3284 if (fmt == NULL || !fmt->has_signed_zero)
3288 if (GET_CODE (op0) == CONST_DOUBLE)
3290 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
3291 op0 = simplify_unary_operation (ABS, mode, op0, mode);
3295 if (fmt->signbit_ro >= 0
3296 && (GET_CODE (op0) == CONST_DOUBLE
3297 || (neg_optab->handlers[mode].insn_code != CODE_FOR_nothing
3298 && abs_optab->handlers[mode].insn_code != CODE_FOR_nothing)))
3300 temp = expand_copysign_absneg (mode, op0, op1, target,
3301 fmt->signbit_ro, op0_is_abs);
3306 if (fmt->signbit_rw < 0)
3308 return expand_copysign_bit (mode, op0, op1, target,
3309 fmt->signbit_rw, op0_is_abs);
3312 /* Generate an instruction whose insn-code is INSN_CODE,
3313 with two operands: an output TARGET and an input OP0.
3314 TARGET *must* be nonzero, and the output is always stored there.
3315 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3316 the value that is stored into TARGET. */
3319 emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code)
3322 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
3327 /* Now, if insn does not accept our operands, put them into pseudos. */
3329 if (!insn_data[icode].operand[1].predicate (op0, mode0))
3330 op0 = copy_to_mode_reg (mode0, op0);
3332 if (!insn_data[icode].operand[0].predicate (temp, GET_MODE (temp)))
3333 temp = gen_reg_rtx (GET_MODE (temp));
3335 pat = GEN_FCN (icode) (temp, op0);
3337 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && code != UNKNOWN)
3338 add_equal_note (pat, temp, code, op0, NULL_RTX);
3343 emit_move_insn (target, temp);
3346 struct no_conflict_data
3348 rtx target, first, insn;
3352 /* Called via note_stores by emit_no_conflict_block and emit_libcall_block.
3353 Set P->must_stay if the currently examined clobber / store has to stay
3354 in the list of insns that constitute the actual no_conflict block /
3357 no_conflict_move_test (rtx dest, rtx set, void *p0)
3359 struct no_conflict_data *p= p0;
3361 /* If this inns directly contributes to setting the target, it must stay. */
3362 if (reg_overlap_mentioned_p (p->target, dest))
3363 p->must_stay = true;
3364 /* If we haven't committed to keeping any other insns in the list yet,
3365 there is nothing more to check. */
3366 else if (p->insn == p->first)
3368 /* If this insn sets / clobbers a register that feeds one of the insns
3369 already in the list, this insn has to stay too. */
3370 else if (reg_overlap_mentioned_p (dest, PATTERN (p->first))
3371 || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest)))
3372 || reg_used_between_p (dest, p->first, p->insn)
3373 /* Likewise if this insn depends on a register set by a previous
3374 insn in the list, or if it sets a result (presumably a hard
3375 register) that is set or clobbered by a previous insn.
3376 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3377 SET_DEST perform the former check on the address, and the latter
3378 check on the MEM. */
3379 || (GET_CODE (set) == SET
3380 && (modified_in_p (SET_SRC (set), p->first)
3381 || modified_in_p (SET_DEST (set), p->first)
3382 || modified_between_p (SET_SRC (set), p->first, p->insn)
3383 || modified_between_p (SET_DEST (set), p->first, p->insn))))
3384 p->must_stay = true;
3387 /* Encapsulate the block starting at FIRST and ending with LAST, which is
3388 logically equivalent to EQUIV, so it gets manipulated as a unit if it
3389 is possible to do so. */
3392 maybe_encapsulate_block (rtx first, rtx last, rtx equiv)
3394 if (!flag_non_call_exceptions || !may_trap_p (equiv))
3396 /* We can't attach the REG_LIBCALL and REG_RETVAL notes when the
3397 encapsulated region would not be in one basic block, i.e. when
3398 there is a control_flow_insn_p insn between FIRST and LAST. */
3399 bool attach_libcall_retval_notes = true;
3400 rtx insn, next = NEXT_INSN (last);
3402 for (insn = first; insn != next; insn = NEXT_INSN (insn))
3403 if (control_flow_insn_p (insn))
3405 attach_libcall_retval_notes = false;
3409 if (attach_libcall_retval_notes)
3411 REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
3413 REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first,
3415 next = NEXT_INSN (last);
3416 for (insn = first; insn != next; insn = NEXT_INSN (insn))
3417 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_LIBCALL_ID,
3418 GEN_INT (libcall_id),
3425 /* Emit code to perform a series of operations on a multi-word quantity, one
3428 Such a block is preceded by a CLOBBER of the output, consists of multiple
3429 insns, each setting one word of the output, and followed by a SET copying
3430 the output to itself.
3432 Each of the insns setting words of the output receives a REG_NO_CONFLICT
3433 note indicating that it doesn't conflict with the (also multi-word)
3434 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
3437 INSNS is a block of code generated to perform the operation, not including
3438 the CLOBBER and final copy. All insns that compute intermediate values
3439 are first emitted, followed by the block as described above.
3441 TARGET, OP0, and OP1 are the output and inputs of the operations,
3442 respectively. OP1 may be zero for a unary operation.
3444 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
3447 If TARGET is not a register, INSNS is simply emitted with no special
3448 processing. Likewise if anything in INSNS is not an INSN or if
3449 there is a libcall block inside INSNS.
3451 The final insn emitted is returned. */
3454 emit_no_conflict_block (rtx insns, rtx target, rtx op0, rtx op1, rtx equiv)
3456 rtx prev, next, first, last, insn;
3458 if (!REG_P (target) || reload_in_progress)
3459 return emit_insn (insns);
3461 for (insn = insns; insn; insn = NEXT_INSN (insn))
3462 if (!NONJUMP_INSN_P (insn)
3463 || find_reg_note (insn, REG_LIBCALL, NULL_RTX))
3464 return emit_insn (insns);
3466 /* First emit all insns that do not store into words of the output and remove
3467 these from the list. */
3468 for (insn = insns; insn; insn = next)
3471 struct no_conflict_data data;
3473 next = NEXT_INSN (insn);
3475 /* Some ports (cris) create a libcall regions at their own. We must
3476 avoid any potential nesting of LIBCALLs. */
3477 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3478 remove_note (insn, note);
3479 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3480 remove_note (insn, note);
3481 if ((note = find_reg_note (insn, REG_LIBCALL_ID, NULL)) != NULL)
3482 remove_note (insn, note);
3484 data.target = target;
3488 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3489 if (! data.must_stay)
3491 if (PREV_INSN (insn))
3492 NEXT_INSN (PREV_INSN (insn)) = next;
3497 PREV_INSN (next) = PREV_INSN (insn);
3503 prev = get_last_insn ();
3505 /* Now write the CLOBBER of the output, followed by the setting of each
3506 of the words, followed by the final copy. */
3507 if (target != op0 && target != op1)
3508 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
3510 for (insn = insns; insn; insn = next)
3512 next = NEXT_INSN (insn);
3515 if (op1 && REG_P (op1))
3516 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op1,
3519 if (op0 && REG_P (op0))
3520 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op0,
3524 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3525 != CODE_FOR_nothing)
3527 last = emit_move_insn (target, target);
3529 set_unique_reg_note (last, REG_EQUAL, equiv);
3533 last = get_last_insn ();
3535 /* Remove any existing REG_EQUAL note from "last", or else it will
3536 be mistaken for a note referring to the full contents of the
3537 alleged libcall value when found together with the REG_RETVAL
3538 note added below. An existing note can come from an insn
3539 expansion at "last". */
3540 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3544 first = get_insns ();
3546 first = NEXT_INSN (prev);
3548 maybe_encapsulate_block (first, last, equiv);
3553 /* Emit code to make a call to a constant function or a library call.
3555 INSNS is a list containing all insns emitted in the call.
3556 These insns leave the result in RESULT. Our block is to copy RESULT
3557 to TARGET, which is logically equivalent to EQUIV.
3559 We first emit any insns that set a pseudo on the assumption that these are
3560 loading constants into registers; doing so allows them to be safely cse'ed
3561 between blocks. Then we emit all the other insns in the block, followed by
3562 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3563 note with an operand of EQUIV.
3565 Moving assignments to pseudos outside of the block is done to improve
3566 the generated code, but is not required to generate correct code,
3567 hence being unable to move an assignment is not grounds for not making
3568 a libcall block. There are two reasons why it is safe to leave these
3569 insns inside the block: First, we know that these pseudos cannot be
3570 used in generated RTL outside the block since they are created for
3571 temporary purposes within the block. Second, CSE will not record the
3572 values of anything set inside a libcall block, so we know they must
3573 be dead at the end of the block.
3575 Except for the first group of insns (the ones setting pseudos), the
3576 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3578 emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv)
3580 rtx final_dest = target;
3581 rtx prev, next, first, last, insn;
3583 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3584 into a MEM later. Protect the libcall block from this change. */
3585 if (! REG_P (target) || REG_USERVAR_P (target))
3586 target = gen_reg_rtx (GET_MODE (target));
3588 /* If we're using non-call exceptions, a libcall corresponding to an
3589 operation that may trap may also trap. */
3590 if (flag_non_call_exceptions && may_trap_p (equiv))
3592 for (insn = insns; insn; insn = NEXT_INSN (insn))
3595 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3597 if (note != 0 && INTVAL (XEXP (note, 0)) <= 0)
3598 remove_note (insn, note);
3602 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3603 reg note to indicate that this call cannot throw or execute a nonlocal
3604 goto (unless there is already a REG_EH_REGION note, in which case
3606 for (insn = insns; insn; insn = NEXT_INSN (insn))
3609 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3612 XEXP (note, 0) = constm1_rtx;
3614 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx,
3618 /* First emit all insns that set pseudos. Remove them from the list as
3619 we go. Avoid insns that set pseudos which were referenced in previous
3620 insns. These can be generated by move_by_pieces, for example,
3621 to update an address. Similarly, avoid insns that reference things
3622 set in previous insns. */
3624 for (insn = insns; insn; insn = next)
3626 rtx set = single_set (insn);
3629 /* Some ports (cris) create a libcall regions at their own. We must
3630 avoid any potential nesting of LIBCALLs. */
3631 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3632 remove_note (insn, note);
3633 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3634 remove_note (insn, note);
3635 if ((note = find_reg_note (insn, REG_LIBCALL_ID, NULL)) != NULL)
3636 remove_note (insn, note);
3638 next = NEXT_INSN (insn);
3640 if (set != 0 && REG_P (SET_DEST (set))
3641 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3643 struct no_conflict_data data;
3645 data.target = const0_rtx;
3649 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3650 if (! data.must_stay)
3652 if (PREV_INSN (insn))
3653 NEXT_INSN (PREV_INSN (insn)) = next;
3658 PREV_INSN (next) = PREV_INSN (insn);
3664 /* Some ports use a loop to copy large arguments onto the stack.
3665 Don't move anything outside such a loop. */
3670 prev = get_last_insn ();
3672 /* Write the remaining insns followed by the final copy. */
3674 for (insn = insns; insn; insn = next)
3676 next = NEXT_INSN (insn);
3681 last = emit_move_insn (target, result);
3682 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3683 != CODE_FOR_nothing)
3684 set_unique_reg_note (last, REG_EQUAL, copy_rtx (equiv));
3687 /* Remove any existing REG_EQUAL note from "last", or else it will
3688 be mistaken for a note referring to the full contents of the
3689 libcall value when found together with the REG_RETVAL note added
3690 below. An existing note can come from an insn expansion at
3692 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3695 if (final_dest != target)
3696 emit_move_insn (final_dest, target);
3699 first = get_insns ();
3701 first = NEXT_INSN (prev);
3703 maybe_encapsulate_block (first, last, equiv);
3706 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3707 PURPOSE describes how this comparison will be used. CODE is the rtx
3708 comparison code we will be using.
3710 ??? Actually, CODE is slightly weaker than that. A target is still
3711 required to implement all of the normal bcc operations, but not
3712 required to implement all (or any) of the unordered bcc operations. */
3715 can_compare_p (enum rtx_code code, enum machine_mode mode,
3716 enum can_compare_purpose purpose)
3720 if (cmp_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3722 if (purpose == ccp_jump)
3723 return bcc_gen_fctn[(int) code] != NULL;
3724 else if (purpose == ccp_store_flag)
3725 return setcc_gen_code[(int) code] != CODE_FOR_nothing;
3727 /* There's only one cmov entry point, and it's allowed to fail. */
3730 if (purpose == ccp_jump
3731 && cbranch_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3733 if (purpose == ccp_cmov
3734 && cmov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3736 if (purpose == ccp_store_flag
3737 && cstore_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3739 mode = GET_MODE_WIDER_MODE (mode);
3741 while (mode != VOIDmode);
3746 /* This function is called when we are going to emit a compare instruction that
3747 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3749 *PMODE is the mode of the inputs (in case they are const_int).
3750 *PUNSIGNEDP nonzero says that the operands are unsigned;
3751 this matters if they need to be widened.
3753 If they have mode BLKmode, then SIZE specifies the size of both operands.
3755 This function performs all the setup necessary so that the caller only has
3756 to emit a single comparison insn. This setup can involve doing a BLKmode
3757 comparison or emitting a library call to perform the comparison if no insn
3758 is available to handle it.
3759 The values which are passed in through pointers can be modified; the caller
3760 should perform the comparison on the modified values. Constant
3761 comparisons must have already been folded. */
3764 prepare_cmp_insn (rtx *px, rtx *py, enum rtx_code *pcomparison, rtx size,
3765 enum machine_mode *pmode, int *punsignedp,
3766 enum can_compare_purpose purpose)
3768 enum machine_mode mode = *pmode;
3769 rtx x = *px, y = *py;
3770 int unsignedp = *punsignedp;
3772 /* If we are inside an appropriately-short loop and we are optimizing,
3773 force expensive constants into a register. */
3774 if (CONSTANT_P (x) && optimize
3775 && rtx_cost (x, COMPARE) > COSTS_N_INSNS (1))
3776 x = force_reg (mode, x);
3778 if (CONSTANT_P (y) && optimize
3779 && rtx_cost (y, COMPARE) > COSTS_N_INSNS (1))
3780 y = force_reg (mode, y);
3783 /* Make sure if we have a canonical comparison. The RTL
3784 documentation states that canonical comparisons are required only
3785 for targets which have cc0. */
3786 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
3789 /* Don't let both operands fail to indicate the mode. */
3790 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
3791 x = force_reg (mode, x);
3793 /* Handle all BLKmode compares. */
3795 if (mode == BLKmode)
3797 enum machine_mode cmp_mode, result_mode;
3798 enum insn_code cmp_code;
3803 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
3807 /* Try to use a memory block compare insn - either cmpstr
3808 or cmpmem will do. */
3809 for (cmp_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
3810 cmp_mode != VOIDmode;
3811 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode))
3813 cmp_code = cmpmem_optab[cmp_mode];
3814 if (cmp_code == CODE_FOR_nothing)
3815 cmp_code = cmpstr_optab[cmp_mode];
3816 if (cmp_code == CODE_FOR_nothing)
3817 cmp_code = cmpstrn_optab[cmp_mode];
3818 if (cmp_code == CODE_FOR_nothing)
3821 /* Must make sure the size fits the insn's mode. */
3822 if ((GET_CODE (size) == CONST_INT
3823 && INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode)))
3824 || (GET_MODE_BITSIZE (GET_MODE (size))
3825 > GET_MODE_BITSIZE (cmp_mode)))
3828 result_mode = insn_data[cmp_code].operand[0].mode;
3829 result = gen_reg_rtx (result_mode);
3830 size = convert_to_mode (cmp_mode, size, 1);
3831 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
3835 *pmode = result_mode;
3839 /* Otherwise call a library function, memcmp. */
3840 libfunc = memcmp_libfunc;
3841 length_type = sizetype;
3842 result_mode = TYPE_MODE (integer_type_node);
3843 cmp_mode = TYPE_MODE (length_type);
3844 size = convert_to_mode (TYPE_MODE (length_type), size,
3845 TYPE_UNSIGNED (length_type));
3847 result = emit_library_call_value (libfunc, 0, LCT_PURE_MAKE_BLOCK,
3854 *pmode = result_mode;
3858 /* Don't allow operands to the compare to trap, as that can put the
3859 compare and branch in different basic blocks. */
3860 if (flag_non_call_exceptions)
3863 x = force_reg (mode, x);
3865 y = force_reg (mode, y);
3870 if (can_compare_p (*pcomparison, mode, purpose))
3873 /* Handle a lib call just for the mode we are using. */
3875 if (cmp_optab->handlers[(int) mode].libfunc && !SCALAR_FLOAT_MODE_P (mode))
3877 rtx libfunc = cmp_optab->handlers[(int) mode].libfunc;
3880 /* If we want unsigned, and this mode has a distinct unsigned
3881 comparison routine, use that. */
3882 if (unsignedp && ucmp_optab->handlers[(int) mode].libfunc)
3883 libfunc = ucmp_optab->handlers[(int) mode].libfunc;
3885 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST_MAKE_BLOCK,
3886 word_mode, 2, x, mode, y, mode);
3888 /* There are two kinds of comparison routines. Biased routines
3889 return 0/1/2, and unbiased routines return -1/0/1. Other parts
3890 of gcc expect that the comparison operation is equivalent
3891 to the modified comparison. For signed comparisons compare the
3892 result against 1 in the biased case, and zero in the unbiased
3893 case. For unsigned comparisons always compare against 1 after
3894 biasing the unbiased result by adding 1. This gives us a way to
3900 if (!TARGET_LIB_INT_CMP_BIASED)
3903 *px = plus_constant (result, 1);
3910 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3911 prepare_float_lib_cmp (px, py, pcomparison, pmode, punsignedp);
3914 /* Before emitting an insn with code ICODE, make sure that X, which is going
3915 to be used for operand OPNUM of the insn, is converted from mode MODE to
3916 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3917 that it is accepted by the operand predicate. Return the new value. */
3920 prepare_operand (int icode, rtx x, int opnum, enum machine_mode mode,
3921 enum machine_mode wider_mode, int unsignedp)
3923 if (mode != wider_mode)
3924 x = convert_modes (wider_mode, mode, x, unsignedp);
3926 if (!insn_data[icode].operand[opnum].predicate
3927 (x, insn_data[icode].operand[opnum].mode))
3931 x = copy_to_mode_reg (insn_data[icode].operand[opnum].mode, x);
3937 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3938 we can do the comparison.
3939 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3940 be NULL_RTX which indicates that only a comparison is to be generated. */
3943 emit_cmp_and_jump_insn_1 (rtx x, rtx y, enum machine_mode mode,
3944 enum rtx_code comparison, int unsignedp, rtx label)
3946 rtx test = gen_rtx_fmt_ee (comparison, mode, x, y);
3947 enum mode_class class = GET_MODE_CLASS (mode);
3948 enum machine_mode wider_mode = mode;
3950 /* Try combined insns first. */
3953 enum insn_code icode;
3954 PUT_MODE (test, wider_mode);
3958 icode = cbranch_optab->handlers[(int) wider_mode].insn_code;
3960 if (icode != CODE_FOR_nothing
3961 && insn_data[icode].operand[0].predicate (test, wider_mode))
3963 x = prepare_operand (icode, x, 1, mode, wider_mode, unsignedp);
3964 y = prepare_operand (icode, y, 2, mode, wider_mode, unsignedp);
3965 emit_jump_insn (GEN_FCN (icode) (test, x, y, label));
3970 /* Handle some compares against zero. */
3971 icode = (int) tst_optab->handlers[(int) wider_mode].insn_code;
3972 if (y == CONST0_RTX (mode) && icode != CODE_FOR_nothing)
3974 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
3975 emit_insn (GEN_FCN (icode) (x));
3977 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
3981 /* Handle compares for which there is a directly suitable insn. */
3983 icode = (int) cmp_optab->handlers[(int) wider_mode].insn_code;
3984 if (icode != CODE_FOR_nothing)
3986 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
3987 y = prepare_operand (icode, y, 1, mode, wider_mode, unsignedp);
3988 emit_insn (GEN_FCN (icode) (x, y));
3990 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
3994 if (!CLASS_HAS_WIDER_MODES_P (class))
3997 wider_mode = GET_MODE_WIDER_MODE (wider_mode);
3999 while (wider_mode != VOIDmode);
4004 /* Generate code to compare X with Y so that the condition codes are
4005 set and to jump to LABEL if the condition is true. If X is a
4006 constant and Y is not a constant, then the comparison is swapped to
4007 ensure that the comparison RTL has the canonical form.
4009 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
4010 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
4011 the proper branch condition code.
4013 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4015 MODE is the mode of the inputs (in case they are const_int).
4017 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
4018 be passed unchanged to emit_cmp_insn, then potentially converted into an
4019 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
4022 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
4023 enum machine_mode mode, int unsignedp, rtx label)
4025 rtx op0 = x, op1 = y;
4027 /* Swap operands and condition to ensure canonical RTL. */
4028 if (swap_commutative_operands_p (x, y))
4030 /* If we're not emitting a branch, this means some caller
4035 comparison = swap_condition (comparison);
4039 /* If OP0 is still a constant, then both X and Y must be constants.
4040 Force X into a register to create canonical RTL. */
4041 if (CONSTANT_P (op0))
4042 op0 = force_reg (mode, op0);
4046 comparison = unsigned_condition (comparison);
4048 prepare_cmp_insn (&op0, &op1, &comparison, size, &mode, &unsignedp,
4050 emit_cmp_and_jump_insn_1 (op0, op1, mode, comparison, unsignedp, label);
4053 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
4056 emit_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
4057 enum machine_mode mode, int unsignedp)
4059 emit_cmp_and_jump_insns (x, y, comparison, size, mode, unsignedp, 0);
4062 /* Emit a library call comparison between floating point X and Y.
4063 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4066 prepare_float_lib_cmp (rtx *px, rtx *py, enum rtx_code *pcomparison,
4067 enum machine_mode *pmode, int *punsignedp)
4069 enum rtx_code comparison = *pcomparison;
4070 enum rtx_code swapped = swap_condition (comparison);
4071 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
4074 enum machine_mode orig_mode = GET_MODE (x);
4075 enum machine_mode mode;
4076 rtx value, target, insns, equiv;
4078 bool reversed_p = false;
4080 for (mode = orig_mode;
4082 mode = GET_MODE_WIDER_MODE (mode))
4084 if ((libfunc = code_to_optab[comparison]->handlers[mode].libfunc))
4087 if ((libfunc = code_to_optab[swapped]->handlers[mode].libfunc))
4090 tmp = x; x = y; y = tmp;
4091 comparison = swapped;
4095 if ((libfunc = code_to_optab[reversed]->handlers[mode].libfunc)
4096 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, reversed))
4098 comparison = reversed;
4104 gcc_assert (mode != VOIDmode);
4106 if (mode != orig_mode)
4108 x = convert_to_mode (mode, x, 0);
4109 y = convert_to_mode (mode, y, 0);
4112 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4113 the RTL. The allows the RTL optimizers to delete the libcall if the
4114 condition can be determined at compile-time. */
4115 if (comparison == UNORDERED)
4117 rtx temp = simplify_gen_relational (NE, word_mode, mode, x, x);
4118 equiv = simplify_gen_relational (NE, word_mode, mode, y, y);
4119 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
4120 temp, const_true_rtx, equiv);
4124 equiv = simplify_gen_relational (comparison, word_mode, mode, x, y);
4125 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4127 rtx true_rtx, false_rtx;
4132 true_rtx = const0_rtx;
4133 false_rtx = const_true_rtx;
4137 true_rtx = const_true_rtx;
4138 false_rtx = const0_rtx;
4142 true_rtx = const1_rtx;
4143 false_rtx = const0_rtx;
4147 true_rtx = const0_rtx;
4148 false_rtx = constm1_rtx;
4152 true_rtx = constm1_rtx;
4153 false_rtx = const0_rtx;
4157 true_rtx = const0_rtx;
4158 false_rtx = const1_rtx;
4164 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
4165 equiv, true_rtx, false_rtx);
4170 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4171 word_mode, 2, x, mode, y, mode);
4172 insns = get_insns ();
4175 target = gen_reg_rtx (word_mode);
4176 emit_libcall_block (insns, target, value, equiv);
4178 if (comparison == UNORDERED
4179 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4180 comparison = reversed_p ? EQ : NE;
4185 *pcomparison = comparison;
4189 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4192 emit_indirect_jump (rtx loc)
4194 if (!insn_data[(int) CODE_FOR_indirect_jump].operand[0].predicate
4196 loc = copy_to_mode_reg (Pmode, loc);
4198 emit_jump_insn (gen_indirect_jump (loc));
4202 #ifdef HAVE_conditional_move
4204 /* Emit a conditional move instruction if the machine supports one for that
4205 condition and machine mode.
4207 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4208 the mode to use should they be constants. If it is VOIDmode, they cannot
4211 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4212 should be stored there. MODE is the mode to use should they be constants.
4213 If it is VOIDmode, they cannot both be constants.
4215 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4216 is not supported. */
4219 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
4220 enum machine_mode cmode, rtx op2, rtx op3,
4221 enum machine_mode mode, int unsignedp)
4223 rtx tem, subtarget, comparison, insn;
4224 enum insn_code icode;
4225 enum rtx_code reversed;
4227 /* If one operand is constant, make it the second one. Only do this
4228 if the other operand is not constant as well. */
4230 if (swap_commutative_operands_p (op0, op1))
4235 code = swap_condition (code);
4238 /* get_condition will prefer to generate LT and GT even if the old
4239 comparison was against zero, so undo that canonicalization here since
4240 comparisons against zero are cheaper. */
4241 if (code == LT && op1 == const1_rtx)
4242 code = LE, op1 = const0_rtx;
4243 else if (code == GT && op1 == constm1_rtx)
4244 code = GE, op1 = const0_rtx;
4246 if (cmode == VOIDmode)
4247 cmode = GET_MODE (op0);
4249 if (swap_commutative_operands_p (op2, op3)
4250 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4259 if (mode == VOIDmode)
4260 mode = GET_MODE (op2);
4262 icode = movcc_gen_code[mode];
4264 if (icode == CODE_FOR_nothing)
4268 target = gen_reg_rtx (mode);
4272 /* If the insn doesn't accept these operands, put them in pseudos. */
4274 if (!insn_data[icode].operand[0].predicate
4275 (subtarget, insn_data[icode].operand[0].mode))
4276 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4278 if (!insn_data[icode].operand[2].predicate
4279 (op2, insn_data[icode].operand[2].mode))
4280 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4282 if (!insn_data[icode].operand[3].predicate
4283 (op3, insn_data[icode].operand[3].mode))
4284 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4286 /* Everything should now be in the suitable form, so emit the compare insn
4287 and then the conditional move. */
4290 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4292 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4293 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4294 return NULL and let the caller figure out how best to deal with this
4296 if (GET_CODE (comparison) != code)
4299 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4301 /* If that failed, then give up. */
4307 if (subtarget != target)
4308 convert_move (target, subtarget, 0);
4313 /* Return nonzero if a conditional move of mode MODE is supported.
4315 This function is for combine so it can tell whether an insn that looks
4316 like a conditional move is actually supported by the hardware. If we
4317 guess wrong we lose a bit on optimization, but that's it. */
4318 /* ??? sparc64 supports conditionally moving integers values based on fp
4319 comparisons, and vice versa. How do we handle them? */
4322 can_conditionally_move_p (enum machine_mode mode)
4324 if (movcc_gen_code[mode] != CODE_FOR_nothing)
4330 #endif /* HAVE_conditional_move */
4332 /* Emit a conditional addition instruction if the machine supports one for that
4333 condition and machine mode.
4335 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4336 the mode to use should they be constants. If it is VOIDmode, they cannot
4339 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
4340 should be stored there. MODE is the mode to use should they be constants.
4341 If it is VOIDmode, they cannot both be constants.
4343 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4344 is not supported. */
4347 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
4348 enum machine_mode cmode, rtx op2, rtx op3,
4349 enum machine_mode mode, int unsignedp)
4351 rtx tem, subtarget, comparison, insn;
4352 enum insn_code icode;
4353 enum rtx_code reversed;
4355 /* If one operand is constant, make it the second one. Only do this
4356 if the other operand is not constant as well. */
4358 if (swap_commutative_operands_p (op0, op1))
4363 code = swap_condition (code);
4366 /* get_condition will prefer to generate LT and GT even if the old
4367 comparison was against zero, so undo that canonicalization here since
4368 comparisons against zero are cheaper. */
4369 if (code == LT && op1 == const1_rtx)
4370 code = LE, op1 = const0_rtx;
4371 else if (code == GT && op1 == constm1_rtx)
4372 code = GE, op1 = const0_rtx;
4374 if (cmode == VOIDmode)
4375 cmode = GET_MODE (op0);
4377 if (swap_commutative_operands_p (op2, op3)
4378 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4387 if (mode == VOIDmode)
4388 mode = GET_MODE (op2);
4390 icode = addcc_optab->handlers[(int) mode].insn_code;
4392 if (icode == CODE_FOR_nothing)
4396 target = gen_reg_rtx (mode);
4398 /* If the insn doesn't accept these operands, put them in pseudos. */
4400 if (!insn_data[icode].operand[0].predicate
4401 (target, insn_data[icode].operand[0].mode))
4402 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4406 if (!insn_data[icode].operand[2].predicate
4407 (op2, insn_data[icode].operand[2].mode))
4408 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4410 if (!insn_data[icode].operand[3].predicate
4411 (op3, insn_data[icode].operand[3].mode))
4412 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4414 /* Everything should now be in the suitable form, so emit the compare insn
4415 and then the conditional move. */
4418 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4420 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4421 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4422 return NULL and let the caller figure out how best to deal with this
4424 if (GET_CODE (comparison) != code)
4427 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4429 /* If that failed, then give up. */
4435 if (subtarget != target)
4436 convert_move (target, subtarget, 0);
4441 /* These functions attempt to generate an insn body, rather than
4442 emitting the insn, but if the gen function already emits them, we
4443 make no attempt to turn them back into naked patterns. */
4445 /* Generate and return an insn body to add Y to X. */
4448 gen_add2_insn (rtx x, rtx y)
4450 int icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4452 gcc_assert (insn_data[icode].operand[0].predicate
4453 (x, insn_data[icode].operand[0].mode));
4454 gcc_assert (insn_data[icode].operand[1].predicate
4455 (x, insn_data[icode].operand[1].mode));
4456 gcc_assert (insn_data[icode].operand[2].predicate
4457 (y, insn_data[icode].operand[2].mode));
4459 return GEN_FCN (icode) (x, x, y);
4462 /* Generate and return an insn body to add r1 and c,
4463 storing the result in r0. */
4465 gen_add3_insn (rtx r0, rtx r1, rtx c)
4467 int icode = (int) add_optab->handlers[(int) GET_MODE (r0)].insn_code;
4469 if (icode == CODE_FOR_nothing
4470 || !(insn_data[icode].operand[0].predicate
4471 (r0, insn_data[icode].operand[0].mode))
4472 || !(insn_data[icode].operand[1].predicate
4473 (r1, insn_data[icode].operand[1].mode))
4474 || !(insn_data[icode].operand[2].predicate
4475 (c, insn_data[icode].operand[2].mode)))
4478 return GEN_FCN (icode) (r0, r1, c);
4482 have_add2_insn (rtx x, rtx y)
4486 gcc_assert (GET_MODE (x) != VOIDmode);
4488 icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4490 if (icode == CODE_FOR_nothing)
4493 if (!(insn_data[icode].operand[0].predicate
4494 (x, insn_data[icode].operand[0].mode))
4495 || !(insn_data[icode].operand[1].predicate
4496 (x, insn_data[icode].operand[1].mode))
4497 || !(insn_data[icode].operand[2].predicate
4498 (y, insn_data[icode].operand[2].mode)))
4504 /* Generate and return an insn body to subtract Y from X. */
4507 gen_sub2_insn (rtx x, rtx y)
4509 int icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4511 gcc_assert (insn_data[icode].operand[0].predicate
4512 (x, insn_data[icode].operand[0].mode));
4513 gcc_assert (insn_data[icode].operand[1].predicate
4514 (x, insn_data[icode].operand[1].mode));
4515 gcc_assert (insn_data[icode].operand[2].predicate
4516 (y, insn_data[icode].operand[2].mode));
4518 return GEN_FCN (icode) (x, x, y);
4521 /* Generate and return an insn body to subtract r1 and c,
4522 storing the result in r0. */
4524 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4526 int icode = (int) sub_optab->handlers[(int) GET_MODE (r0)].insn_code;
4528 if (icode == CODE_FOR_nothing
4529 || !(insn_data[icode].operand[0].predicate
4530 (r0, insn_data[icode].operand[0].mode))
4531 || !(insn_data[icode].operand[1].predicate
4532 (r1, insn_data[icode].operand[1].mode))
4533 || !(insn_data[icode].operand[2].predicate
4534 (c, insn_data[icode].operand[2].mode)))
4537 return GEN_FCN (icode) (r0, r1, c);
4541 have_sub2_insn (rtx x, rtx y)
4545 gcc_assert (GET_MODE (x) != VOIDmode);
4547 icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4549 if (icode == CODE_FOR_nothing)
4552 if (!(insn_data[icode].operand[0].predicate
4553 (x, insn_data[icode].operand[0].mode))
4554 || !(insn_data[icode].operand[1].predicate
4555 (x, insn_data[icode].operand[1].mode))
4556 || !(insn_data[icode].operand[2].predicate
4557 (y, insn_data[icode].operand[2].mode)))
4563 /* Generate the body of an instruction to copy Y into X.
4564 It may be a list of insns, if one insn isn't enough. */
4567 gen_move_insn (rtx x, rtx y)
4572 emit_move_insn_1 (x, y);
4578 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4579 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4580 no such operation exists, CODE_FOR_nothing will be returned. */
4583 can_extend_p (enum machine_mode to_mode, enum machine_mode from_mode,
4587 #ifdef HAVE_ptr_extend
4589 return CODE_FOR_ptr_extend;
4592 tab = unsignedp ? zext_optab : sext_optab;
4593 return tab->handlers[to_mode][from_mode].insn_code;
4596 /* Generate the body of an insn to extend Y (with mode MFROM)
4597 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4600 gen_extend_insn (rtx x, rtx y, enum machine_mode mto,
4601 enum machine_mode mfrom, int unsignedp)
4603 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
4604 return GEN_FCN (icode) (x, y);
4607 /* can_fix_p and can_float_p say whether the target machine
4608 can directly convert a given fixed point type to
4609 a given floating point type, or vice versa.
4610 The returned value is the CODE_FOR_... value to use,
4611 or CODE_FOR_nothing if these modes cannot be directly converted.
4613 *TRUNCP_PTR is set to 1 if it is necessary to output
4614 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4616 static enum insn_code
4617 can_fix_p (enum machine_mode fixmode, enum machine_mode fltmode,
4618 int unsignedp, int *truncp_ptr)
4621 enum insn_code icode;
4623 tab = unsignedp ? ufixtrunc_optab : sfixtrunc_optab;
4624 icode = tab->handlers[fixmode][fltmode].insn_code;
4625 if (icode != CODE_FOR_nothing)
4631 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4632 for this to work. We need to rework the fix* and ftrunc* patterns
4633 and documentation. */
4634 tab = unsignedp ? ufix_optab : sfix_optab;
4635 icode = tab->handlers[fixmode][fltmode].insn_code;
4636 if (icode != CODE_FOR_nothing
4637 && ftrunc_optab->handlers[fltmode].insn_code != CODE_FOR_nothing)
4644 return CODE_FOR_nothing;
4647 static enum insn_code
4648 can_float_p (enum machine_mode fltmode, enum machine_mode fixmode,
4653 tab = unsignedp ? ufloat_optab : sfloat_optab;
4654 return tab->handlers[fltmode][fixmode].insn_code;
4657 /* Generate code to convert FROM to floating point
4658 and store in TO. FROM must be fixed point and not VOIDmode.
4659 UNSIGNEDP nonzero means regard FROM as unsigned.
4660 Normally this is done by correcting the final value
4661 if it is negative. */
4664 expand_float (rtx to, rtx from, int unsignedp)
4666 enum insn_code icode;
4668 enum machine_mode fmode, imode;
4669 bool can_do_signed = false;
4671 /* Crash now, because we won't be able to decide which mode to use. */
4672 gcc_assert (GET_MODE (from) != VOIDmode);
4674 /* Look for an insn to do the conversion. Do it in the specified
4675 modes if possible; otherwise convert either input, output or both to
4676 wider mode. If the integer mode is wider than the mode of FROM,
4677 we can do the conversion signed even if the input is unsigned. */
4679 for (fmode = GET_MODE (to); fmode != VOIDmode;
4680 fmode = GET_MODE_WIDER_MODE (fmode))
4681 for (imode = GET_MODE (from); imode != VOIDmode;
4682 imode = GET_MODE_WIDER_MODE (imode))
4684 int doing_unsigned = unsignedp;
4686 if (fmode != GET_MODE (to)
4687 && significand_size (fmode) < GET_MODE_BITSIZE (GET_MODE (from)))
4690 icode = can_float_p (fmode, imode, unsignedp);
4691 if (icode == CODE_FOR_nothing && unsignedp)
4693 enum insn_code scode = can_float_p (fmode, imode, 0);
4694 if (scode != CODE_FOR_nothing)
4695 can_do_signed = true;
4696 if (imode != GET_MODE (from))
4697 icode = scode, doing_unsigned = 0;
4700 if (icode != CODE_FOR_nothing)
4702 if (imode != GET_MODE (from))
4703 from = convert_to_mode (imode, from, unsignedp);
4705 if (fmode != GET_MODE (to))
4706 target = gen_reg_rtx (fmode);
4708 emit_unop_insn (icode, target, from,
4709 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
4712 convert_move (to, target, 0);
4717 /* Unsigned integer, and no way to convert directly. For binary
4718 floating point modes, convert as signed, then conditionally adjust
4720 if (unsignedp && can_do_signed && !DECIMAL_FLOAT_MODE_P (GET_MODE (to)))
4722 rtx label = gen_label_rtx ();
4724 REAL_VALUE_TYPE offset;
4726 /* Look for a usable floating mode FMODE wider than the source and at
4727 least as wide as the target. Using FMODE will avoid rounding woes
4728 with unsigned values greater than the signed maximum value. */
4730 for (fmode = GET_MODE (to); fmode != VOIDmode;
4731 fmode = GET_MODE_WIDER_MODE (fmode))
4732 if (GET_MODE_BITSIZE (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
4733 && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
4736 if (fmode == VOIDmode)
4738 /* There is no such mode. Pretend the target is wide enough. */
4739 fmode = GET_MODE (to);
4741 /* Avoid double-rounding when TO is narrower than FROM. */
4742 if ((significand_size (fmode) + 1)
4743 < GET_MODE_BITSIZE (GET_MODE (from)))
4746 rtx neglabel = gen_label_rtx ();
4748 /* Don't use TARGET if it isn't a register, is a hard register,
4749 or is the wrong mode. */
4751 || REGNO (target) < FIRST_PSEUDO_REGISTER
4752 || GET_MODE (target) != fmode)
4753 target = gen_reg_rtx (fmode);
4755 imode = GET_MODE (from);
4756 do_pending_stack_adjust ();
4758 /* Test whether the sign bit is set. */
4759 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
4762 /* The sign bit is not set. Convert as signed. */
4763 expand_float (target, from, 0);
4764 emit_jump_insn (gen_jump (label));
4767 /* The sign bit is set.
4768 Convert to a usable (positive signed) value by shifting right
4769 one bit, while remembering if a nonzero bit was shifted
4770 out; i.e., compute (from & 1) | (from >> 1). */
4772 emit_label (neglabel);
4773 temp = expand_binop (imode, and_optab, from, const1_rtx,
4774 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4775 temp1 = expand_shift (RSHIFT_EXPR, imode, from, integer_one_node,
4777 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
4779 expand_float (target, temp, 0);
4781 /* Multiply by 2 to undo the shift above. */
4782 temp = expand_binop (fmode, add_optab, target, target,
4783 target, 0, OPTAB_LIB_WIDEN);
4785 emit_move_insn (target, temp);
4787 do_pending_stack_adjust ();
4793 /* If we are about to do some arithmetic to correct for an
4794 unsigned operand, do it in a pseudo-register. */
4796 if (GET_MODE (to) != fmode
4797 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
4798 target = gen_reg_rtx (fmode);
4800 /* Convert as signed integer to floating. */
4801 expand_float (target, from, 0);
4803 /* If FROM is negative (and therefore TO is negative),
4804 correct its value by 2**bitwidth. */
4806 do_pending_stack_adjust ();
4807 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from),
4811 real_2expN (&offset, GET_MODE_BITSIZE (GET_MODE (from)));
4812 temp = expand_binop (fmode, add_optab, target,
4813 CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode),
4814 target, 0, OPTAB_LIB_WIDEN);
4816 emit_move_insn (target, temp);
4818 do_pending_stack_adjust ();
4823 /* No hardware instruction available; call a library routine. */
4828 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
4830 if (GET_MODE_SIZE (GET_MODE (from)) < GET_MODE_SIZE (SImode))
4831 from = convert_to_mode (SImode, from, unsignedp);
4833 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
4834 gcc_assert (libfunc);
4838 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4839 GET_MODE (to), 1, from,
4841 insns = get_insns ();
4844 emit_libcall_block (insns, target, value,
4845 gen_rtx_FLOAT (GET_MODE (to), from));
4850 /* Copy result to requested destination
4851 if we have been computing in a temp location. */
4855 if (GET_MODE (target) == GET_MODE (to))
4856 emit_move_insn (to, target);
4858 convert_move (to, target, 0);
4862 /* Generate code to convert FROM to fixed point and store in TO. FROM
4863 must be floating point. */
4866 expand_fix (rtx to, rtx from, int unsignedp)
4868 enum insn_code icode;
4870 enum machine_mode fmode, imode;
4873 /* We first try to find a pair of modes, one real and one integer, at
4874 least as wide as FROM and TO, respectively, in which we can open-code
4875 this conversion. If the integer mode is wider than the mode of TO,
4876 we can do the conversion either signed or unsigned. */
4878 for (fmode = GET_MODE (from); fmode != VOIDmode;
4879 fmode = GET_MODE_WIDER_MODE (fmode))
4880 for (imode = GET_MODE (to); imode != VOIDmode;
4881 imode = GET_MODE_WIDER_MODE (imode))
4883 int doing_unsigned = unsignedp;
4885 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
4886 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
4887 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
4889 if (icode != CODE_FOR_nothing)
4891 if (fmode != GET_MODE (from))
4892 from = convert_to_mode (fmode, from, 0);
4896 rtx temp = gen_reg_rtx (GET_MODE (from));
4897 from = expand_unop (GET_MODE (from), ftrunc_optab, from,
4901 if (imode != GET_MODE (to))
4902 target = gen_reg_rtx (imode);
4904 emit_unop_insn (icode, target, from,
4905 doing_unsigned ? UNSIGNED_FIX : FIX);
4907 convert_move (to, target, unsignedp);
4912 /* For an unsigned conversion, there is one more way to do it.
4913 If we have a signed conversion, we generate code that compares
4914 the real value to the largest representable positive number. If if
4915 is smaller, the conversion is done normally. Otherwise, subtract
4916 one plus the highest signed number, convert, and add it back.
4918 We only need to check all real modes, since we know we didn't find
4919 anything with a wider integer mode.
4921 This code used to extend FP value into mode wider than the destination.
4922 This is not needed. Consider, for instance conversion from SFmode
4925 The hot path through the code is dealing with inputs smaller than 2^63
4926 and doing just the conversion, so there is no bits to lose.
4928 In the other path we know the value is positive in the range 2^63..2^64-1
4929 inclusive. (as for other imput overflow happens and result is undefined)
4930 So we know that the most important bit set in mantissa corresponds to
4931 2^63. The subtraction of 2^63 should not generate any rounding as it
4932 simply clears out that bit. The rest is trivial. */
4934 if (unsignedp && GET_MODE_BITSIZE (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
4935 for (fmode = GET_MODE (from); fmode != VOIDmode;
4936 fmode = GET_MODE_WIDER_MODE (fmode))
4937 if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0,
4941 REAL_VALUE_TYPE offset;
4942 rtx limit, lab1, lab2, insn;
4944 bitsize = GET_MODE_BITSIZE (GET_MODE (to));
4945 real_2expN (&offset, bitsize - 1);
4946 limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode);
4947 lab1 = gen_label_rtx ();
4948 lab2 = gen_label_rtx ();
4950 if (fmode != GET_MODE (from))
4951 from = convert_to_mode (fmode, from, 0);
4953 /* See if we need to do the subtraction. */
4954 do_pending_stack_adjust ();
4955 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from),
4958 /* If not, do the signed "fix" and branch around fixup code. */
4959 expand_fix (to, from, 0);
4960 emit_jump_insn (gen_jump (lab2));
4963 /* Otherwise, subtract 2**(N-1), convert to signed number,
4964 then add 2**(N-1). Do the addition using XOR since this
4965 will often generate better code. */
4967 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
4968 NULL_RTX, 0, OPTAB_LIB_WIDEN);
4969 expand_fix (to, target, 0);
4970 target = expand_binop (GET_MODE (to), xor_optab, to,
4972 ((HOST_WIDE_INT) 1 << (bitsize - 1),
4974 to, 1, OPTAB_LIB_WIDEN);
4977 emit_move_insn (to, target);
4981 if (mov_optab->handlers[(int) GET_MODE (to)].insn_code
4982 != CODE_FOR_nothing)
4984 /* Make a place for a REG_NOTE and add it. */
4985 insn = emit_move_insn (to, to);
4986 set_unique_reg_note (insn,
4988 gen_rtx_fmt_e (UNSIGNED_FIX,
4996 /* We can't do it with an insn, so use a library call. But first ensure
4997 that the mode of TO is at least as wide as SImode, since those are the
4998 only library calls we know about. */
5000 if (GET_MODE_SIZE (GET_MODE (to)) < GET_MODE_SIZE (SImode))
5002 target = gen_reg_rtx (SImode);
5004 expand_fix (target, from, unsignedp);
5012 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
5013 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
5014 gcc_assert (libfunc);
5018 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
5019 GET_MODE (to), 1, from,
5021 insns = get_insns ();
5024 emit_libcall_block (insns, target, value,
5025 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
5026 GET_MODE (to), from));
5031 if (GET_MODE (to) == GET_MODE (target))
5032 emit_move_insn (to, target);
5034 convert_move (to, target, 0);
5038 /* Generate code to convert FROM to fixed point and store in TO. FROM
5039 must be floating point, TO must be signed. Use the conversion optab
5040 TAB to do the conversion. */
5043 expand_sfix_optab (rtx to, rtx from, convert_optab tab)
5045 enum insn_code icode;
5047 enum machine_mode fmode, imode;
5049 /* We first try to find a pair of modes, one real and one integer, at
5050 least as wide as FROM and TO, respectively, in which we can open-code
5051 this conversion. If the integer mode is wider than the mode of TO,
5052 we can do the conversion either signed or unsigned. */
5054 for (fmode = GET_MODE (from); fmode != VOIDmode;
5055 fmode = GET_MODE_WIDER_MODE (fmode))
5056 for (imode = GET_MODE (to); imode != VOIDmode;
5057 imode = GET_MODE_WIDER_MODE (imode))
5059 icode = tab->handlers[imode][fmode].insn_code;
5060 if (icode != CODE_FOR_nothing)
5062 if (fmode != GET_MODE (from))
5063 from = convert_to_mode (fmode, from, 0);
5065 if (imode != GET_MODE (to))
5066 target = gen_reg_rtx (imode);
5068 emit_unop_insn (icode, target, from, UNKNOWN);
5070 convert_move (to, target, 0);
5078 /* Report whether we have an instruction to perform the operation
5079 specified by CODE on operands of mode MODE. */
5081 have_insn_for (enum rtx_code code, enum machine_mode mode)
5083 return (code_to_optab[(int) code] != 0
5084 && (code_to_optab[(int) code]->handlers[(int) mode].insn_code
5085 != CODE_FOR_nothing));
5088 /* Create a blank optab. */
5093 optab op = ggc_alloc (sizeof (struct optab));
5094 for (i = 0; i < NUM_MACHINE_MODES; i++)
5096 op->handlers[i].insn_code = CODE_FOR_nothing;
5097 op->handlers[i].libfunc = 0;
5103 static convert_optab
5104 new_convert_optab (void)
5107 convert_optab op = ggc_alloc (sizeof (struct convert_optab));
5108 for (i = 0; i < NUM_MACHINE_MODES; i++)
5109 for (j = 0; j < NUM_MACHINE_MODES; j++)
5111 op->handlers[i][j].insn_code = CODE_FOR_nothing;
5112 op->handlers[i][j].libfunc = 0;
5117 /* Same, but fill in its code as CODE, and write it into the
5118 code_to_optab table. */
5120 init_optab (enum rtx_code code)
5122 optab op = new_optab ();
5124 code_to_optab[(int) code] = op;
5128 /* Same, but fill in its code as CODE, and do _not_ write it into
5129 the code_to_optab table. */
5131 init_optabv (enum rtx_code code)
5133 optab op = new_optab ();
5138 /* Conversion optabs never go in the code_to_optab table. */
5139 static inline convert_optab
5140 init_convert_optab (enum rtx_code code)
5142 convert_optab op = new_convert_optab ();
5147 /* Initialize the libfunc fields of an entire group of entries in some
5148 optab. Each entry is set equal to a string consisting of a leading
5149 pair of underscores followed by a generic operation name followed by
5150 a mode name (downshifted to lowercase) followed by a single character
5151 representing the number of operands for the given operation (which is
5152 usually one of the characters '2', '3', or '4').
5154 OPTABLE is the table in which libfunc fields are to be initialized.
5155 FIRST_MODE is the first machine mode index in the given optab to
5157 LAST_MODE is the last machine mode index in the given optab to
5159 OPNAME is the generic (string) name of the operation.
5160 SUFFIX is the character which specifies the number of operands for
5161 the given generic operation.
5165 init_libfuncs (optab optable, int first_mode, int last_mode,
5166 const char *opname, int suffix)
5169 unsigned opname_len = strlen (opname);
5171 for (mode = first_mode; (int) mode <= (int) last_mode;
5172 mode = (enum machine_mode) ((int) mode + 1))
5174 const char *mname = GET_MODE_NAME (mode);
5175 unsigned mname_len = strlen (mname);
5176 char *libfunc_name = alloca (2 + opname_len + mname_len + 1 + 1);
5183 for (q = opname; *q; )
5185 for (q = mname; *q; q++)
5186 *p++ = TOLOWER (*q);
5190 optable->handlers[(int) mode].libfunc
5191 = init_one_libfunc (ggc_alloc_string (libfunc_name, p - libfunc_name));
5195 /* Initialize the libfunc fields of an entire group of entries in some
5196 optab which correspond to all integer mode operations. The parameters
5197 have the same meaning as similarly named ones for the `init_libfuncs'
5198 routine. (See above). */
5201 init_integral_libfuncs (optab optable, const char *opname, int suffix)
5203 int maxsize = 2*BITS_PER_WORD;
5204 if (maxsize < LONG_LONG_TYPE_SIZE)
5205 maxsize = LONG_LONG_TYPE_SIZE;
5206 init_libfuncs (optable, word_mode,
5207 mode_for_size (maxsize, MODE_INT, 0),
5211 /* Initialize the libfunc fields of an entire group of entries in some
5212 optab which correspond to all real mode operations. The parameters
5213 have the same meaning as similarly named ones for the `init_libfuncs'
5214 routine. (See above). */
5217 init_floating_libfuncs (optab optable, const char *opname, int suffix)
5219 char *dec_opname = alloca (sizeof (DECIMAL_PREFIX) + strlen (opname));
5221 /* For BID support, change the name to have either a bid_ or dpd_ prefix
5222 depending on the low level floating format used. */
5223 memcpy (dec_opname, DECIMAL_PREFIX, sizeof (DECIMAL_PREFIX) - 1);
5224 strcpy (dec_opname + sizeof (DECIMAL_PREFIX) - 1, opname);
5226 init_libfuncs (optable, MIN_MODE_FLOAT, MAX_MODE_FLOAT, opname, suffix);
5227 init_libfuncs (optable, MIN_MODE_DECIMAL_FLOAT, MAX_MODE_DECIMAL_FLOAT,
5228 dec_opname, suffix);
5231 /* Initialize the libfunc fields of an entire group of entries of an
5232 inter-mode-class conversion optab. The string formation rules are
5233 similar to the ones for init_libfuncs, above, but instead of having
5234 a mode name and an operand count these functions have two mode names
5235 and no operand count. */
5237 init_interclass_conv_libfuncs (convert_optab tab, const char *opname,
5238 enum mode_class from_class,
5239 enum mode_class to_class)
5241 enum machine_mode first_from_mode = GET_CLASS_NARROWEST_MODE (from_class);
5242 enum machine_mode first_to_mode = GET_CLASS_NARROWEST_MODE (to_class);
5243 size_t opname_len = strlen (opname);
5244 size_t max_mname_len = 0;
5246 enum machine_mode fmode, tmode;
5247 const char *fname, *tname;
5249 char *libfunc_name, *suffix;
5250 char *nondec_name, *dec_name, *nondec_suffix, *dec_suffix;
5253 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5254 depends on which underlying decimal floating point format is used. */
5255 const size_t dec_len = sizeof (DECIMAL_PREFIX) - 1;
5257 for (fmode = first_from_mode;
5259 fmode = GET_MODE_WIDER_MODE (fmode))
5260 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (fmode)));
5262 for (tmode = first_to_mode;
5264 tmode = GET_MODE_WIDER_MODE (tmode))
5265 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (tmode)));
5267 nondec_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
5268 nondec_name[0] = '_';
5269 nondec_name[1] = '_';
5270 memcpy (&nondec_name[2], opname, opname_len);
5271 nondec_suffix = nondec_name + opname_len + 2;
5273 dec_name = alloca (2 + dec_len + opname_len + 2*max_mname_len + 1 + 1);
5276 memcpy (&dec_name[2], DECIMAL_PREFIX, dec_len);
5277 memcpy (&dec_name[2+dec_len], opname, opname_len);
5278 dec_suffix = dec_name + dec_len + opname_len + 2;
5280 for (fmode = first_from_mode; fmode != VOIDmode;
5281 fmode = GET_MODE_WIDER_MODE (fmode))
5282 for (tmode = first_to_mode; tmode != VOIDmode;
5283 tmode = GET_MODE_WIDER_MODE (tmode))
5285 fname = GET_MODE_NAME (fmode);
5286 tname = GET_MODE_NAME (tmode);
5288 if (DECIMAL_FLOAT_MODE_P(fmode) || DECIMAL_FLOAT_MODE_P(tmode))
5290 libfunc_name = dec_name;
5291 suffix = dec_suffix;
5295 libfunc_name = nondec_name;
5296 suffix = nondec_suffix;
5300 for (q = fname; *q; p++, q++)
5302 for (q = tname; *q; p++, q++)
5307 tab->handlers[tmode][fmode].libfunc
5308 = init_one_libfunc (ggc_alloc_string (libfunc_name,
5313 /* Initialize the libfunc fields of an entire group of entries of an
5314 intra-mode-class conversion optab. The string formation rules are
5315 similar to the ones for init_libfunc, above. WIDENING says whether
5316 the optab goes from narrow to wide modes or vice versa. These functions
5317 have two mode names _and_ an operand count. */
5319 init_intraclass_conv_libfuncs (convert_optab tab, const char *opname,
5320 enum mode_class class, bool widening)
5322 enum machine_mode first_mode = GET_CLASS_NARROWEST_MODE (class);
5323 size_t opname_len = strlen (opname);
5324 size_t max_mname_len = 0;
5326 enum machine_mode nmode, wmode;
5327 const char *nname, *wname;
5329 char *nondec_name, *dec_name, *nondec_suffix, *dec_suffix;
5330 char *libfunc_name, *suffix;
5333 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5334 depends on which underlying decimal floating point format is used. */
5335 const size_t dec_len = sizeof (DECIMAL_PREFIX) - 1;
5337 for (nmode = first_mode; nmode != VOIDmode;
5338 nmode = GET_MODE_WIDER_MODE (nmode))
5339 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (nmode)));
5341 nondec_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
5342 nondec_name[0] = '_';
5343 nondec_name[1] = '_';
5344 memcpy (&nondec_name[2], opname, opname_len);
5345 nondec_suffix = nondec_name + opname_len + 2;
5347 dec_name = alloca (2 + dec_len + opname_len + 2*max_mname_len + 1 + 1);
5350 memcpy (&dec_name[2], DECIMAL_PREFIX, dec_len);
5351 memcpy (&dec_name[2 + dec_len], opname, opname_len);
5352 dec_suffix = dec_name + dec_len + opname_len + 2;
5354 for (nmode = first_mode; nmode != VOIDmode;
5355 nmode = GET_MODE_WIDER_MODE (nmode))
5356 for (wmode = GET_MODE_WIDER_MODE (nmode); wmode != VOIDmode;
5357 wmode = GET_MODE_WIDER_MODE (wmode))
5359 nname = GET_MODE_NAME (nmode);
5360 wname = GET_MODE_NAME (wmode);
5362 if (DECIMAL_FLOAT_MODE_P(nmode) || DECIMAL_FLOAT_MODE_P(wmode))
5364 libfunc_name = dec_name;
5365 suffix = dec_suffix;
5369 libfunc_name = nondec_name;
5370 suffix = nondec_suffix;
5374 for (q = widening ? nname : wname; *q; p++, q++)
5376 for (q = widening ? wname : nname; *q; p++, q++)
5382 tab->handlers[widening ? wmode : nmode]
5383 [widening ? nmode : wmode].libfunc
5384 = init_one_libfunc (ggc_alloc_string (libfunc_name,
5391 init_one_libfunc (const char *name)
5395 /* Create a FUNCTION_DECL that can be passed to
5396 targetm.encode_section_info. */
5397 /* ??? We don't have any type information except for this is
5398 a function. Pretend this is "int foo()". */
5399 tree decl = build_decl (FUNCTION_DECL, get_identifier (name),
5400 build_function_type (integer_type_node, NULL_TREE));
5401 DECL_ARTIFICIAL (decl) = 1;
5402 DECL_EXTERNAL (decl) = 1;
5403 TREE_PUBLIC (decl) = 1;
5405 symbol = XEXP (DECL_RTL (decl), 0);
5407 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
5408 are the flags assigned by targetm.encode_section_info. */
5409 SET_SYMBOL_REF_DECL (symbol, 0);
5414 /* Call this to reset the function entry for one optab (OPTABLE) in mode
5415 MODE to NAME, which should be either 0 or a string constant. */
5417 set_optab_libfunc (optab optable, enum machine_mode mode, const char *name)
5420 optable->handlers[mode].libfunc = init_one_libfunc (name);
5422 optable->handlers[mode].libfunc = 0;
5425 /* Call this to reset the function entry for one conversion optab
5426 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
5427 either 0 or a string constant. */
5429 set_conv_libfunc (convert_optab optable, enum machine_mode tmode,
5430 enum machine_mode fmode, const char *name)
5433 optable->handlers[tmode][fmode].libfunc = init_one_libfunc (name);
5435 optable->handlers[tmode][fmode].libfunc = 0;
5438 /* Call this once to initialize the contents of the optabs
5439 appropriately for the current target machine. */
5445 enum machine_mode int_mode;
5447 /* Start by initializing all tables to contain CODE_FOR_nothing. */
5449 for (i = 0; i < NUM_RTX_CODE; i++)
5450 setcc_gen_code[i] = CODE_FOR_nothing;
5452 #ifdef HAVE_conditional_move
5453 for (i = 0; i < NUM_MACHINE_MODES; i++)
5454 movcc_gen_code[i] = CODE_FOR_nothing;
5457 for (i = 0; i < NUM_MACHINE_MODES; i++)
5459 vcond_gen_code[i] = CODE_FOR_nothing;
5460 vcondu_gen_code[i] = CODE_FOR_nothing;
5463 add_optab = init_optab (PLUS);
5464 addv_optab = init_optabv (PLUS);
5465 sub_optab = init_optab (MINUS);
5466 subv_optab = init_optabv (MINUS);
5467 smul_optab = init_optab (MULT);
5468 smulv_optab = init_optabv (MULT);
5469 smul_highpart_optab = init_optab (UNKNOWN);
5470 umul_highpart_optab = init_optab (UNKNOWN);
5471 smul_widen_optab = init_optab (UNKNOWN);
5472 umul_widen_optab = init_optab (UNKNOWN);
5473 usmul_widen_optab = init_optab (UNKNOWN);
5474 smadd_widen_optab = init_optab (UNKNOWN);
5475 umadd_widen_optab = init_optab (UNKNOWN);
5476 smsub_widen_optab = init_optab (UNKNOWN);
5477 umsub_widen_optab = init_optab (UNKNOWN);
5478 sdiv_optab = init_optab (DIV);
5479 sdivv_optab = init_optabv (DIV);
5480 sdivmod_optab = init_optab (UNKNOWN);
5481 udiv_optab = init_optab (UDIV);
5482 udivmod_optab = init_optab (UNKNOWN);
5483 smod_optab = init_optab (MOD);
5484 umod_optab = init_optab (UMOD);
5485 fmod_optab = init_optab (UNKNOWN);
5486 remainder_optab = init_optab (UNKNOWN);
5487 ftrunc_optab = init_optab (UNKNOWN);
5488 and_optab = init_optab (AND);
5489 ior_optab = init_optab (IOR);
5490 xor_optab = init_optab (XOR);
5491 ashl_optab = init_optab (ASHIFT);
5492 ashr_optab = init_optab (ASHIFTRT);
5493 lshr_optab = init_optab (LSHIFTRT);
5494 rotl_optab = init_optab (ROTATE);
5495 rotr_optab = init_optab (ROTATERT);
5496 smin_optab = init_optab (SMIN);
5497 smax_optab = init_optab (SMAX);
5498 umin_optab = init_optab (UMIN);
5499 umax_optab = init_optab (UMAX);
5500 pow_optab = init_optab (UNKNOWN);
5501 atan2_optab = init_optab (UNKNOWN);
5503 /* These three have codes assigned exclusively for the sake of
5505 mov_optab = init_optab (SET);
5506 movstrict_optab = init_optab (STRICT_LOW_PART);
5507 cmp_optab = init_optab (COMPARE);
5509 storent_optab = init_optab (UNKNOWN);
5511 ucmp_optab = init_optab (UNKNOWN);
5512 tst_optab = init_optab (UNKNOWN);
5514 eq_optab = init_optab (EQ);
5515 ne_optab = init_optab (NE);
5516 gt_optab = init_optab (GT);
5517 ge_optab = init_optab (GE);
5518 lt_optab = init_optab (LT);
5519 le_optab = init_optab (LE);
5520 unord_optab = init_optab (UNORDERED);
5522 neg_optab = init_optab (NEG);
5523 negv_optab = init_optabv (NEG);
5524 abs_optab = init_optab (ABS);
5525 absv_optab = init_optabv (ABS);
5526 addcc_optab = init_optab (UNKNOWN);
5527 one_cmpl_optab = init_optab (NOT);
5528 bswap_optab = init_optab (BSWAP);
5529 ffs_optab = init_optab (FFS);
5530 clz_optab = init_optab (CLZ);
5531 ctz_optab = init_optab (CTZ);
5532 popcount_optab = init_optab (POPCOUNT);
5533 parity_optab = init_optab (PARITY);
5534 sqrt_optab = init_optab (SQRT);
5535 floor_optab = init_optab (UNKNOWN);
5536 ceil_optab = init_optab (UNKNOWN);
5537 round_optab = init_optab (UNKNOWN);
5538 btrunc_optab = init_optab (UNKNOWN);
5539 nearbyint_optab = init_optab (UNKNOWN);
5540 rint_optab = init_optab (UNKNOWN);
5541 sincos_optab = init_optab (UNKNOWN);
5542 sin_optab = init_optab (UNKNOWN);
5543 asin_optab = init_optab (UNKNOWN);
5544 cos_optab = init_optab (UNKNOWN);
5545 acos_optab = init_optab (UNKNOWN);
5546 exp_optab = init_optab (UNKNOWN);
5547 exp10_optab = init_optab (UNKNOWN);
5548 exp2_optab = init_optab (UNKNOWN);
5549 expm1_optab = init_optab (UNKNOWN);
5550 ldexp_optab = init_optab (UNKNOWN);
5551 scalb_optab = init_optab (UNKNOWN);
5552 logb_optab = init_optab (UNKNOWN);
5553 ilogb_optab = init_optab (UNKNOWN);
5554 log_optab = init_optab (UNKNOWN);
5555 log10_optab = init_optab (UNKNOWN);
5556 log2_optab = init_optab (UNKNOWN);
5557 log1p_optab = init_optab (UNKNOWN);
5558 tan_optab = init_optab (UNKNOWN);
5559 atan_optab = init_optab (UNKNOWN);
5560 copysign_optab = init_optab (UNKNOWN);
5562 isinf_optab = init_optab (UNKNOWN);
5564 strlen_optab = init_optab (UNKNOWN);
5565 cbranch_optab = init_optab (UNKNOWN);
5566 cmov_optab = init_optab (UNKNOWN);
5567 cstore_optab = init_optab (UNKNOWN);
5568 push_optab = init_optab (UNKNOWN);
5570 reduc_smax_optab = init_optab (UNKNOWN);
5571 reduc_umax_optab = init_optab (UNKNOWN);
5572 reduc_smin_optab = init_optab (UNKNOWN);
5573 reduc_umin_optab = init_optab (UNKNOWN);
5574 reduc_splus_optab = init_optab (UNKNOWN);
5575 reduc_uplus_optab = init_optab (UNKNOWN);
5577 ssum_widen_optab = init_optab (UNKNOWN);
5578 usum_widen_optab = init_optab (UNKNOWN);
5579 sdot_prod_optab = init_optab (UNKNOWN);
5580 udot_prod_optab = init_optab (UNKNOWN);
5582 vec_extract_optab = init_optab (UNKNOWN);
5583 vec_extract_even_optab = init_optab (UNKNOWN);
5584 vec_extract_odd_optab = init_optab (UNKNOWN);
5585 vec_interleave_high_optab = init_optab (UNKNOWN);
5586 vec_interleave_low_optab = init_optab (UNKNOWN);
5587 vec_set_optab = init_optab (UNKNOWN);
5588 vec_init_optab = init_optab (UNKNOWN);
5589 vec_shl_optab = init_optab (UNKNOWN);
5590 vec_shr_optab = init_optab (UNKNOWN);
5591 vec_realign_load_optab = init_optab (UNKNOWN);
5592 movmisalign_optab = init_optab (UNKNOWN);
5593 vec_widen_umult_hi_optab = init_optab (UNKNOWN);
5594 vec_widen_umult_lo_optab = init_optab (UNKNOWN);
5595 vec_widen_smult_hi_optab = init_optab (UNKNOWN);
5596 vec_widen_smult_lo_optab = init_optab (UNKNOWN);
5597 vec_unpacks_hi_optab = init_optab (UNKNOWN);
5598 vec_unpacks_lo_optab = init_optab (UNKNOWN);
5599 vec_unpacku_hi_optab = init_optab (UNKNOWN);
5600 vec_unpacku_lo_optab = init_optab (UNKNOWN);
5601 vec_unpacks_float_hi_optab = init_optab (UNKNOWN);
5602 vec_unpacks_float_lo_optab = init_optab (UNKNOWN);
5603 vec_unpacku_float_hi_optab = init_optab (UNKNOWN);
5604 vec_unpacku_float_lo_optab = init_optab (UNKNOWN);
5605 vec_pack_trunc_optab = init_optab (UNKNOWN);
5606 vec_pack_usat_optab = init_optab (UNKNOWN);
5607 vec_pack_ssat_optab = init_optab (UNKNOWN);
5608 vec_pack_ufix_trunc_optab = init_optab (UNKNOWN);
5609 vec_pack_sfix_trunc_optab = init_optab (UNKNOWN);
5611 powi_optab = init_optab (UNKNOWN);
5614 sext_optab = init_convert_optab (SIGN_EXTEND);
5615 zext_optab = init_convert_optab (ZERO_EXTEND);
5616 trunc_optab = init_convert_optab (TRUNCATE);
5617 sfix_optab = init_convert_optab (FIX);
5618 ufix_optab = init_convert_optab (UNSIGNED_FIX);
5619 sfixtrunc_optab = init_convert_optab (UNKNOWN);
5620 ufixtrunc_optab = init_convert_optab (UNKNOWN);
5621 sfloat_optab = init_convert_optab (FLOAT);
5622 ufloat_optab = init_convert_optab (UNSIGNED_FLOAT);
5623 lrint_optab = init_convert_optab (UNKNOWN);
5624 lround_optab = init_convert_optab (UNKNOWN);
5625 lfloor_optab = init_convert_optab (UNKNOWN);
5626 lceil_optab = init_convert_optab (UNKNOWN);
5628 for (i = 0; i < NUM_MACHINE_MODES; i++)
5630 movmem_optab[i] = CODE_FOR_nothing;
5631 cmpstr_optab[i] = CODE_FOR_nothing;
5632 cmpstrn_optab[i] = CODE_FOR_nothing;
5633 cmpmem_optab[i] = CODE_FOR_nothing;
5634 setmem_optab[i] = CODE_FOR_nothing;
5636 sync_add_optab[i] = CODE_FOR_nothing;
5637 sync_sub_optab[i] = CODE_FOR_nothing;
5638 sync_ior_optab[i] = CODE_FOR_nothing;
5639 sync_and_optab[i] = CODE_FOR_nothing;
5640 sync_xor_optab[i] = CODE_FOR_nothing;
5641 sync_nand_optab[i] = CODE_FOR_nothing;
5642 sync_old_add_optab[i] = CODE_FOR_nothing;
5643 sync_old_sub_optab[i] = CODE_FOR_nothing;
5644 sync_old_ior_optab[i] = CODE_FOR_nothing;
5645 sync_old_and_optab[i] = CODE_FOR_nothing;
5646 sync_old_xor_optab[i] = CODE_FOR_nothing;
5647 sync_old_nand_optab[i] = CODE_FOR_nothing;
5648 sync_new_add_optab[i] = CODE_FOR_nothing;
5649 sync_new_sub_optab[i] = CODE_FOR_nothing;
5650 sync_new_ior_optab[i] = CODE_FOR_nothing;
5651 sync_new_and_optab[i] = CODE_FOR_nothing;
5652 sync_new_xor_optab[i] = CODE_FOR_nothing;
5653 sync_new_nand_optab[i] = CODE_FOR_nothing;
5654 sync_compare_and_swap[i] = CODE_FOR_nothing;
5655 sync_compare_and_swap_cc[i] = CODE_FOR_nothing;
5656 sync_lock_test_and_set[i] = CODE_FOR_nothing;
5657 sync_lock_release[i] = CODE_FOR_nothing;
5659 reload_in_optab[i] = reload_out_optab[i] = CODE_FOR_nothing;
5662 /* Fill in the optabs with the insns we support. */
5665 /* The ffs function operates on `int'. Fall back on it if we do not
5666 have a libgcc2 function for that width. */
5667 int_mode = mode_for_size (INT_TYPE_SIZE, MODE_INT, 0);
5668 ffs_optab->handlers[(int) int_mode].libfunc = init_one_libfunc ("ffs");
5670 /* Initialize the optabs with the names of the library functions. */
5671 init_integral_libfuncs (add_optab, "add", '3');
5672 init_floating_libfuncs (add_optab, "add", '3');
5673 init_integral_libfuncs (addv_optab, "addv", '3');
5674 init_floating_libfuncs (addv_optab, "add", '3');
5675 init_integral_libfuncs (sub_optab, "sub", '3');
5676 init_floating_libfuncs (sub_optab, "sub", '3');
5677 init_integral_libfuncs (subv_optab, "subv", '3');
5678 init_floating_libfuncs (subv_optab, "sub", '3');
5679 init_integral_libfuncs (smul_optab, "mul", '3');
5680 init_floating_libfuncs (smul_optab, "mul", '3');
5681 init_integral_libfuncs (smulv_optab, "mulv", '3');
5682 init_floating_libfuncs (smulv_optab, "mul", '3');
5683 init_integral_libfuncs (sdiv_optab, "div", '3');
5684 init_floating_libfuncs (sdiv_optab, "div", '3');
5685 init_integral_libfuncs (sdivv_optab, "divv", '3');
5686 init_integral_libfuncs (udiv_optab, "udiv", '3');
5687 init_integral_libfuncs (sdivmod_optab, "divmod", '4');
5688 init_integral_libfuncs (udivmod_optab, "udivmod", '4');
5689 init_integral_libfuncs (smod_optab, "mod", '3');
5690 init_integral_libfuncs (umod_optab, "umod", '3');
5691 init_floating_libfuncs (ftrunc_optab, "ftrunc", '2');
5692 init_integral_libfuncs (and_optab, "and", '3');
5693 init_integral_libfuncs (ior_optab, "ior", '3');
5694 init_integral_libfuncs (xor_optab, "xor", '3');
5695 init_integral_libfuncs (ashl_optab, "ashl", '3');
5696 init_integral_libfuncs (ashr_optab, "ashr", '3');
5697 init_integral_libfuncs (lshr_optab, "lshr", '3');
5698 init_integral_libfuncs (smin_optab, "min", '3');
5699 init_floating_libfuncs (smin_optab, "min", '3');
5700 init_integral_libfuncs (smax_optab, "max", '3');
5701 init_floating_libfuncs (smax_optab, "max", '3');
5702 init_integral_libfuncs (umin_optab, "umin", '3');
5703 init_integral_libfuncs (umax_optab, "umax", '3');
5704 init_integral_libfuncs (neg_optab, "neg", '2');
5705 init_floating_libfuncs (neg_optab, "neg", '2');
5706 init_integral_libfuncs (negv_optab, "negv", '2');
5707 init_floating_libfuncs (negv_optab, "neg", '2');
5708 init_integral_libfuncs (one_cmpl_optab, "one_cmpl", '2');
5709 init_integral_libfuncs (ffs_optab, "ffs", '2');
5710 init_integral_libfuncs (clz_optab, "clz", '2');
5711 init_integral_libfuncs (ctz_optab, "ctz", '2');
5712 init_integral_libfuncs (popcount_optab, "popcount", '2');
5713 init_integral_libfuncs (parity_optab, "parity", '2');
5715 /* Comparison libcalls for integers MUST come in pairs,
5717 init_integral_libfuncs (cmp_optab, "cmp", '2');
5718 init_integral_libfuncs (ucmp_optab, "ucmp", '2');
5719 init_floating_libfuncs (cmp_optab, "cmp", '2');
5721 /* EQ etc are floating point only. */
5722 init_floating_libfuncs (eq_optab, "eq", '2');
5723 init_floating_libfuncs (ne_optab, "ne", '2');
5724 init_floating_libfuncs (gt_optab, "gt", '2');
5725 init_floating_libfuncs (ge_optab, "ge", '2');
5726 init_floating_libfuncs (lt_optab, "lt", '2');
5727 init_floating_libfuncs (le_optab, "le", '2');
5728 init_floating_libfuncs (unord_optab, "unord", '2');
5730 init_floating_libfuncs (powi_optab, "powi", '2');
5733 init_interclass_conv_libfuncs (sfloat_optab, "float",
5734 MODE_INT, MODE_FLOAT);
5735 init_interclass_conv_libfuncs (sfloat_optab, "float",
5736 MODE_INT, MODE_DECIMAL_FLOAT);
5737 init_interclass_conv_libfuncs (ufloat_optab, "floatun",
5738 MODE_INT, MODE_FLOAT);
5739 init_interclass_conv_libfuncs (ufloat_optab, "floatun",
5740 MODE_INT, MODE_DECIMAL_FLOAT);
5741 init_interclass_conv_libfuncs (sfix_optab, "fix",
5742 MODE_FLOAT, MODE_INT);
5743 init_interclass_conv_libfuncs (sfix_optab, "fix",
5744 MODE_DECIMAL_FLOAT, MODE_INT);
5745 init_interclass_conv_libfuncs (ufix_optab, "fixuns",
5746 MODE_FLOAT, MODE_INT);
5747 init_interclass_conv_libfuncs (ufix_optab, "fixuns",
5748 MODE_DECIMAL_FLOAT, MODE_INT);
5749 init_interclass_conv_libfuncs (ufloat_optab, "floatuns",
5750 MODE_INT, MODE_DECIMAL_FLOAT);
5751 init_interclass_conv_libfuncs (lrint_optab, "lrint",
5752 MODE_INT, MODE_FLOAT);
5753 init_interclass_conv_libfuncs (lround_optab, "lround",
5754 MODE_INT, MODE_FLOAT);
5755 init_interclass_conv_libfuncs (lfloor_optab, "lfloor",
5756 MODE_INT, MODE_FLOAT);
5757 init_interclass_conv_libfuncs (lceil_optab, "lceil",
5758 MODE_INT, MODE_FLOAT);
5760 /* sext_optab is also used for FLOAT_EXTEND. */
5761 init_intraclass_conv_libfuncs (sext_optab, "extend", MODE_FLOAT, true);
5762 init_intraclass_conv_libfuncs (sext_optab, "extend", MODE_DECIMAL_FLOAT, true);
5763 init_interclass_conv_libfuncs (sext_optab, "extend", MODE_FLOAT, MODE_DECIMAL_FLOAT);
5764 init_interclass_conv_libfuncs (sext_optab, "extend", MODE_DECIMAL_FLOAT, MODE_FLOAT);
5765 init_intraclass_conv_libfuncs (trunc_optab, "trunc", MODE_FLOAT, false);
5766 init_intraclass_conv_libfuncs (trunc_optab, "trunc", MODE_DECIMAL_FLOAT, false);
5767 init_interclass_conv_libfuncs (trunc_optab, "trunc", MODE_FLOAT, MODE_DECIMAL_FLOAT);
5768 init_interclass_conv_libfuncs (trunc_optab, "trunc", MODE_DECIMAL_FLOAT, MODE_FLOAT);
5770 /* Explicitly initialize the bswap libfuncs since we need them to be
5771 valid for things other than word_mode. */
5772 set_optab_libfunc (bswap_optab, SImode, "__bswapsi2");
5773 set_optab_libfunc (bswap_optab, DImode, "__bswapdi2");
5775 /* Use cabs for double complex abs, since systems generally have cabs.
5776 Don't define any libcall for float complex, so that cabs will be used. */
5777 if (complex_double_type_node)
5778 abs_optab->handlers[TYPE_MODE (complex_double_type_node)].libfunc
5779 = init_one_libfunc ("cabs");
5781 abort_libfunc = init_one_libfunc ("abort");
5782 memcpy_libfunc = init_one_libfunc ("memcpy");
5783 memmove_libfunc = init_one_libfunc ("memmove");
5784 memcmp_libfunc = init_one_libfunc ("memcmp");
5785 memset_libfunc = init_one_libfunc ("memset");
5786 setbits_libfunc = init_one_libfunc ("__setbits");
5788 #ifndef DONT_USE_BUILTIN_SETJMP
5789 setjmp_libfunc = init_one_libfunc ("__builtin_setjmp");
5790 longjmp_libfunc = init_one_libfunc ("__builtin_longjmp");
5792 setjmp_libfunc = init_one_libfunc ("setjmp");
5793 longjmp_libfunc = init_one_libfunc ("longjmp");
5795 unwind_sjlj_register_libfunc = init_one_libfunc ("_Unwind_SjLj_Register");
5796 unwind_sjlj_unregister_libfunc
5797 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5799 /* For function entry/exit instrumentation. */
5800 profile_function_entry_libfunc
5801 = init_one_libfunc ("__cyg_profile_func_enter");
5802 profile_function_exit_libfunc
5803 = init_one_libfunc ("__cyg_profile_func_exit");
5805 gcov_flush_libfunc = init_one_libfunc ("__gcov_flush");
5807 if (HAVE_conditional_trap)
5808 trap_rtx = gen_rtx_fmt_ee (EQ, VOIDmode, NULL_RTX, NULL_RTX);
5810 /* Allow the target to add more libcalls or rename some, etc. */
5811 targetm.init_libfuncs ();
5816 /* Print information about the current contents of the optabs on
5820 debug_optab_libfuncs (void)
5826 /* Dump the arithmetic optabs. */
5827 for (i = 0; i != (int) OTI_MAX; i++)
5828 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5831 struct optab_handlers *h;
5834 h = &o->handlers[j];
5837 gcc_assert (GET_CODE (h->libfunc) == SYMBOL_REF);
5838 fprintf (stderr, "%s\t%s:\t%s\n",
5839 GET_RTX_NAME (o->code),
5841 XSTR (h->libfunc, 0));
5845 /* Dump the conversion optabs. */
5846 for (i = 0; i < (int) COI_MAX; ++i)
5847 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5848 for (k = 0; k < NUM_MACHINE_MODES; ++k)
5851 struct optab_handlers *h;
5853 o = &convert_optab_table[i];
5854 h = &o->handlers[j][k];
5857 gcc_assert (GET_CODE (h->libfunc) == SYMBOL_REF);
5858 fprintf (stderr, "%s\t%s\t%s:\t%s\n",
5859 GET_RTX_NAME (o->code),
5862 XSTR (h->libfunc, 0));
5870 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5871 CODE. Return 0 on failure. */
5874 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED, rtx op1,
5875 rtx op2 ATTRIBUTE_UNUSED, rtx tcode ATTRIBUTE_UNUSED)
5877 enum machine_mode mode = GET_MODE (op1);
5878 enum insn_code icode;
5881 if (!HAVE_conditional_trap)
5884 if (mode == VOIDmode)
5887 icode = cmp_optab->handlers[(int) mode].insn_code;
5888 if (icode == CODE_FOR_nothing)
5892 op1 = prepare_operand (icode, op1, 0, mode, mode, 0);
5893 op2 = prepare_operand (icode, op2, 1, mode, mode, 0);
5899 emit_insn (GEN_FCN (icode) (op1, op2));
5901 PUT_CODE (trap_rtx, code);
5902 gcc_assert (HAVE_conditional_trap);
5903 insn = gen_conditional_trap (trap_rtx, tcode);
5907 insn = get_insns ();
5914 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5915 or unsigned operation code. */
5917 static enum rtx_code
5918 get_rtx_code (enum tree_code tcode, bool unsignedp)
5930 code = unsignedp ? LTU : LT;
5933 code = unsignedp ? LEU : LE;
5936 code = unsignedp ? GTU : GT;
5939 code = unsignedp ? GEU : GE;
5942 case UNORDERED_EXPR:
5973 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
5974 unsigned operators. Do not generate compare instruction. */
5977 vector_compare_rtx (tree cond, bool unsignedp, enum insn_code icode)
5979 enum rtx_code rcode;
5981 rtx rtx_op0, rtx_op1;
5983 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
5984 ensures that condition is a relational operation. */
5985 gcc_assert (COMPARISON_CLASS_P (cond));
5987 rcode = get_rtx_code (TREE_CODE (cond), unsignedp);
5988 t_op0 = TREE_OPERAND (cond, 0);
5989 t_op1 = TREE_OPERAND (cond, 1);
5991 /* Expand operands. */
5992 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)),
5994 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)),
5997 if (!insn_data[icode].operand[4].predicate (rtx_op0, GET_MODE (rtx_op0))
5998 && GET_MODE (rtx_op0) != VOIDmode)
5999 rtx_op0 = force_reg (GET_MODE (rtx_op0), rtx_op0);
6001 if (!insn_data[icode].operand[5].predicate (rtx_op1, GET_MODE (rtx_op1))
6002 && GET_MODE (rtx_op1) != VOIDmode)
6003 rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1);
6005 return gen_rtx_fmt_ee (rcode, VOIDmode, rtx_op0, rtx_op1);
6008 /* Return insn code for VEC_COND_EXPR EXPR. */
6010 static inline enum insn_code
6011 get_vcond_icode (tree expr, enum machine_mode mode)
6013 enum insn_code icode = CODE_FOR_nothing;
6015 if (TYPE_UNSIGNED (TREE_TYPE (expr)))
6016 icode = vcondu_gen_code[mode];
6018 icode = vcond_gen_code[mode];
6022 /* Return TRUE iff, appropriate vector insns are available
6023 for vector cond expr expr in VMODE mode. */
6026 expand_vec_cond_expr_p (tree expr, enum machine_mode vmode)
6028 if (get_vcond_icode (expr, vmode) == CODE_FOR_nothing)
6033 /* Generate insns for VEC_COND_EXPR. */
6036 expand_vec_cond_expr (tree vec_cond_expr, rtx target)
6038 enum insn_code icode;
6039 rtx comparison, rtx_op1, rtx_op2, cc_op0, cc_op1;
6040 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_cond_expr));
6041 bool unsignedp = TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr));
6043 icode = get_vcond_icode (vec_cond_expr, mode);
6044 if (icode == CODE_FOR_nothing)
6047 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6048 target = gen_reg_rtx (mode);
6050 /* Get comparison rtx. First expand both cond expr operands. */
6051 comparison = vector_compare_rtx (TREE_OPERAND (vec_cond_expr, 0),
6053 cc_op0 = XEXP (comparison, 0);
6054 cc_op1 = XEXP (comparison, 1);
6055 /* Expand both operands and force them in reg, if required. */
6056 rtx_op1 = expand_normal (TREE_OPERAND (vec_cond_expr, 1));
6057 if (!insn_data[icode].operand[1].predicate (rtx_op1, mode)
6058 && mode != VOIDmode)
6059 rtx_op1 = force_reg (mode, rtx_op1);
6061 rtx_op2 = expand_normal (TREE_OPERAND (vec_cond_expr, 2));
6062 if (!insn_data[icode].operand[2].predicate (rtx_op2, mode)
6063 && mode != VOIDmode)
6064 rtx_op2 = force_reg (mode, rtx_op2);
6066 /* Emit instruction! */
6067 emit_insn (GEN_FCN (icode) (target, rtx_op1, rtx_op2,
6068 comparison, cc_op0, cc_op1));
6074 /* This is an internal subroutine of the other compare_and_swap expanders.
6075 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
6076 operation. TARGET is an optional place to store the value result of
6077 the operation. ICODE is the particular instruction to expand. Return
6078 the result of the operation. */
6081 expand_val_compare_and_swap_1 (rtx mem, rtx old_val, rtx new_val,
6082 rtx target, enum insn_code icode)
6084 enum machine_mode mode = GET_MODE (mem);
6087 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6088 target = gen_reg_rtx (mode);
6090 if (GET_MODE (old_val) != VOIDmode && GET_MODE (old_val) != mode)
6091 old_val = convert_modes (mode, GET_MODE (old_val), old_val, 1);
6092 if (!insn_data[icode].operand[2].predicate (old_val, mode))
6093 old_val = force_reg (mode, old_val);
6095 if (GET_MODE (new_val) != VOIDmode && GET_MODE (new_val) != mode)
6096 new_val = convert_modes (mode, GET_MODE (new_val), new_val, 1);
6097 if (!insn_data[icode].operand[3].predicate (new_val, mode))
6098 new_val = force_reg (mode, new_val);
6100 insn = GEN_FCN (icode) (target, mem, old_val, new_val);
6101 if (insn == NULL_RTX)
6108 /* Expand a compare-and-swap operation and return its value. */
6111 expand_val_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
6113 enum machine_mode mode = GET_MODE (mem);
6114 enum insn_code icode = sync_compare_and_swap[mode];
6116 if (icode == CODE_FOR_nothing)
6119 return expand_val_compare_and_swap_1 (mem, old_val, new_val, target, icode);
6122 /* Expand a compare-and-swap operation and store true into the result if
6123 the operation was successful and false otherwise. Return the result.
6124 Unlike other routines, TARGET is not optional. */
6127 expand_bool_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
6129 enum machine_mode mode = GET_MODE (mem);
6130 enum insn_code icode;
6131 rtx subtarget, label0, label1;
6133 /* If the target supports a compare-and-swap pattern that simultaneously
6134 sets some flag for success, then use it. Otherwise use the regular
6135 compare-and-swap and follow that immediately with a compare insn. */
6136 icode = sync_compare_and_swap_cc[mode];
6140 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
6142 if (subtarget != NULL_RTX)
6146 case CODE_FOR_nothing:
6147 icode = sync_compare_and_swap[mode];
6148 if (icode == CODE_FOR_nothing)
6151 /* Ensure that if old_val == mem, that we're not comparing
6152 against an old value. */
6153 if (MEM_P (old_val))
6154 old_val = force_reg (mode, old_val);
6156 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
6158 if (subtarget == NULL_RTX)
6161 emit_cmp_insn (subtarget, old_val, EQ, const0_rtx, mode, true);
6164 /* If the target has a sane STORE_FLAG_VALUE, then go ahead and use a
6165 setcc instruction from the beginning. We don't work too hard here,
6166 but it's nice to not be stupid about initial code gen either. */
6167 if (STORE_FLAG_VALUE == 1)
6169 icode = setcc_gen_code[EQ];
6170 if (icode != CODE_FOR_nothing)
6172 enum machine_mode cmode = insn_data[icode].operand[0].mode;
6176 if (!insn_data[icode].operand[0].predicate (target, cmode))
6177 subtarget = gen_reg_rtx (cmode);
6179 insn = GEN_FCN (icode) (subtarget);
6183 if (GET_MODE (target) != GET_MODE (subtarget))
6185 convert_move (target, subtarget, 1);
6193 /* Without an appropriate setcc instruction, use a set of branches to
6194 get 1 and 0 stored into target. Presumably if the target has a
6195 STORE_FLAG_VALUE that isn't 1, then this will get cleaned up by ifcvt. */
6197 label0 = gen_label_rtx ();
6198 label1 = gen_label_rtx ();
6200 emit_jump_insn (bcc_gen_fctn[EQ] (label0));
6201 emit_move_insn (target, const0_rtx);
6202 emit_jump_insn (gen_jump (label1));
6204 emit_label (label0);
6205 emit_move_insn (target, const1_rtx);
6206 emit_label (label1);
6211 /* This is a helper function for the other atomic operations. This function
6212 emits a loop that contains SEQ that iterates until a compare-and-swap
6213 operation at the end succeeds. MEM is the memory to be modified. SEQ is
6214 a set of instructions that takes a value from OLD_REG as an input and
6215 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
6216 set to the current contents of MEM. After SEQ, a compare-and-swap will
6217 attempt to update MEM with NEW_REG. The function returns true when the
6218 loop was generated successfully. */
6221 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
6223 enum machine_mode mode = GET_MODE (mem);
6224 enum insn_code icode;
6225 rtx label, cmp_reg, subtarget;
6227 /* The loop we want to generate looks like
6233 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
6234 if (cmp_reg != old_reg)
6237 Note that we only do the plain load from memory once. Subsequent
6238 iterations use the value loaded by the compare-and-swap pattern. */
6240 label = gen_label_rtx ();
6241 cmp_reg = gen_reg_rtx (mode);
6243 emit_move_insn (cmp_reg, mem);
6245 emit_move_insn (old_reg, cmp_reg);
6249 /* If the target supports a compare-and-swap pattern that simultaneously
6250 sets some flag for success, then use it. Otherwise use the regular
6251 compare-and-swap and follow that immediately with a compare insn. */
6252 icode = sync_compare_and_swap_cc[mode];
6256 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
6258 if (subtarget != NULL_RTX)
6260 gcc_assert (subtarget == cmp_reg);
6265 case CODE_FOR_nothing:
6266 icode = sync_compare_and_swap[mode];
6267 if (icode == CODE_FOR_nothing)
6270 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
6272 if (subtarget == NULL_RTX)
6274 if (subtarget != cmp_reg)
6275 emit_move_insn (cmp_reg, subtarget);
6277 emit_cmp_insn (cmp_reg, old_reg, EQ, const0_rtx, mode, true);
6280 /* ??? Mark this jump predicted not taken? */
6281 emit_jump_insn (bcc_gen_fctn[NE] (label));
6286 /* This function generates the atomic operation MEM CODE= VAL. In this
6287 case, we do not care about any resulting value. Returns NULL if we
6288 cannot generate the operation. */
6291 expand_sync_operation (rtx mem, rtx val, enum rtx_code code)
6293 enum machine_mode mode = GET_MODE (mem);
6294 enum insn_code icode;
6297 /* Look to see if the target supports the operation directly. */
6301 icode = sync_add_optab[mode];
6304 icode = sync_ior_optab[mode];
6307 icode = sync_xor_optab[mode];
6310 icode = sync_and_optab[mode];
6313 icode = sync_nand_optab[mode];
6317 icode = sync_sub_optab[mode];
6318 if (icode == CODE_FOR_nothing)
6320 icode = sync_add_optab[mode];
6321 if (icode != CODE_FOR_nothing)
6323 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
6333 /* Generate the direct operation, if present. */
6334 if (icode != CODE_FOR_nothing)
6336 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6337 val = convert_modes (mode, GET_MODE (val), val, 1);
6338 if (!insn_data[icode].operand[1].predicate (val, mode))
6339 val = force_reg (mode, val);
6341 insn = GEN_FCN (icode) (mem, val);
6349 /* Failing that, generate a compare-and-swap loop in which we perform the
6350 operation with normal arithmetic instructions. */
6351 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6353 rtx t0 = gen_reg_rtx (mode), t1;
6360 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
6363 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
6364 true, OPTAB_LIB_WIDEN);
6366 insn = get_insns ();
6369 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
6376 /* This function generates the atomic operation MEM CODE= VAL. In this
6377 case, we do care about the resulting value: if AFTER is true then
6378 return the value MEM holds after the operation, if AFTER is false
6379 then return the value MEM holds before the operation. TARGET is an
6380 optional place for the result value to be stored. */
6383 expand_sync_fetch_operation (rtx mem, rtx val, enum rtx_code code,
6384 bool after, rtx target)
6386 enum machine_mode mode = GET_MODE (mem);
6387 enum insn_code old_code, new_code, icode;
6391 /* Look to see if the target supports the operation directly. */
6395 old_code = sync_old_add_optab[mode];
6396 new_code = sync_new_add_optab[mode];
6399 old_code = sync_old_ior_optab[mode];
6400 new_code = sync_new_ior_optab[mode];
6403 old_code = sync_old_xor_optab[mode];
6404 new_code = sync_new_xor_optab[mode];
6407 old_code = sync_old_and_optab[mode];
6408 new_code = sync_new_and_optab[mode];
6411 old_code = sync_old_nand_optab[mode];
6412 new_code = sync_new_nand_optab[mode];
6416 old_code = sync_old_sub_optab[mode];
6417 new_code = sync_new_sub_optab[mode];
6418 if (old_code == CODE_FOR_nothing && new_code == CODE_FOR_nothing)
6420 old_code = sync_old_add_optab[mode];
6421 new_code = sync_new_add_optab[mode];
6422 if (old_code != CODE_FOR_nothing || new_code != CODE_FOR_nothing)
6424 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
6434 /* If the target does supports the proper new/old operation, great. But
6435 if we only support the opposite old/new operation, check to see if we
6436 can compensate. In the case in which the old value is supported, then
6437 we can always perform the operation again with normal arithmetic. In
6438 the case in which the new value is supported, then we can only handle
6439 this in the case the operation is reversible. */
6444 if (icode == CODE_FOR_nothing)
6447 if (icode != CODE_FOR_nothing)
6454 if (icode == CODE_FOR_nothing
6455 && (code == PLUS || code == MINUS || code == XOR))
6458 if (icode != CODE_FOR_nothing)
6463 /* If we found something supported, great. */
6464 if (icode != CODE_FOR_nothing)
6466 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6467 target = gen_reg_rtx (mode);
6469 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6470 val = convert_modes (mode, GET_MODE (val), val, 1);
6471 if (!insn_data[icode].operand[2].predicate (val, mode))
6472 val = force_reg (mode, val);
6474 insn = GEN_FCN (icode) (target, mem, val);
6479 /* If we need to compensate for using an operation with the
6480 wrong return value, do so now. */
6487 else if (code == MINUS)
6492 target = expand_simple_unop (mode, NOT, target, NULL_RTX, true);
6493 target = expand_simple_binop (mode, code, target, val, NULL_RTX,
6494 true, OPTAB_LIB_WIDEN);
6501 /* Failing that, generate a compare-and-swap loop in which we perform the
6502 operation with normal arithmetic instructions. */
6503 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6505 rtx t0 = gen_reg_rtx (mode), t1;
6507 if (!target || !register_operand (target, mode))
6508 target = gen_reg_rtx (mode);
6513 emit_move_insn (target, t0);
6517 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
6520 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
6521 true, OPTAB_LIB_WIDEN);
6523 emit_move_insn (target, t1);
6525 insn = get_insns ();
6528 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
6535 /* This function expands a test-and-set operation. Ideally we atomically
6536 store VAL in MEM and return the previous value in MEM. Some targets
6537 may not support this operation and only support VAL with the constant 1;
6538 in this case while the return value will be 0/1, but the exact value
6539 stored in MEM is target defined. TARGET is an option place to stick
6540 the return value. */
6543 expand_sync_lock_test_and_set (rtx mem, rtx val, rtx target)
6545 enum machine_mode mode = GET_MODE (mem);
6546 enum insn_code icode;
6549 /* If the target supports the test-and-set directly, great. */
6550 icode = sync_lock_test_and_set[mode];
6551 if (icode != CODE_FOR_nothing)
6553 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6554 target = gen_reg_rtx (mode);
6556 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6557 val = convert_modes (mode, GET_MODE (val), val, 1);
6558 if (!insn_data[icode].operand[2].predicate (val, mode))
6559 val = force_reg (mode, val);
6561 insn = GEN_FCN (icode) (target, mem, val);
6569 /* Otherwise, use a compare-and-swap loop for the exchange. */
6570 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6572 if (!target || !register_operand (target, mode))
6573 target = gen_reg_rtx (mode);
6574 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6575 val = convert_modes (mode, GET_MODE (val), val, 1);
6576 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
6583 #include "gt-optabs.h"