1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
45 #include "basic-block.h"
48 /* Each optab contains info on how this target machine
49 can perform a particular operation
50 for all sizes and kinds of operands.
52 The operation to be performed is often specified
53 by passing one of these optabs as an argument.
55 See expr.h for documentation of these optabs. */
57 optab optab_table[OTI_MAX];
59 rtx libfunc_table[LTI_MAX];
61 /* Tables of patterns for converting one mode to another. */
62 convert_optab convert_optab_table[COI_MAX];
64 /* Contains the optab used for each rtx code. */
65 optab code_to_optab[NUM_RTX_CODE + 1];
67 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
68 gives the gen_function to make a branch to test that condition. */
70 rtxfun bcc_gen_fctn[NUM_RTX_CODE];
72 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
73 gives the insn code to make a store-condition insn
74 to test that condition. */
76 enum insn_code setcc_gen_code[NUM_RTX_CODE];
78 #ifdef HAVE_conditional_move
79 /* Indexed by the machine mode, gives the insn code to make a conditional
80 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
81 setcc_gen_code to cut down on the number of named patterns. Consider a day
82 when a lot more rtx codes are conditional (eg: for the ARM). */
84 enum insn_code movcc_gen_code[NUM_MACHINE_MODES];
87 /* Indexed by the machine mode, gives the insn code for vector conditional
90 enum insn_code vcond_gen_code[NUM_MACHINE_MODES];
91 enum insn_code vcondu_gen_code[NUM_MACHINE_MODES];
93 /* The insn generating function can not take an rtx_code argument.
94 TRAP_RTX is used as an rtx argument. Its code is replaced with
95 the code to be used in the trap insn and all other fields are ignored. */
96 static GTY(()) rtx trap_rtx;
98 static int add_equal_note (rtx, rtx, enum rtx_code, rtx, rtx);
99 static rtx widen_operand (rtx, enum machine_mode, enum machine_mode, int,
101 static void prepare_cmp_insn (rtx *, rtx *, enum rtx_code *, rtx,
102 enum machine_mode *, int *,
103 enum can_compare_purpose);
104 static enum insn_code can_fix_p (enum machine_mode, enum machine_mode, int,
106 static enum insn_code can_float_p (enum machine_mode, enum machine_mode, int);
107 static optab new_optab (void);
108 static convert_optab new_convert_optab (void);
109 static inline optab init_optab (enum rtx_code);
110 static inline optab init_optabv (enum rtx_code);
111 static inline convert_optab init_convert_optab (enum rtx_code);
112 static void init_libfuncs (optab, int, int, const char *, int);
113 static void init_integral_libfuncs (optab, const char *, int);
114 static void init_floating_libfuncs (optab, const char *, int);
115 static void init_interclass_conv_libfuncs (convert_optab, const char *,
116 enum mode_class, enum mode_class);
117 static void init_intraclass_conv_libfuncs (convert_optab, const char *,
118 enum mode_class, bool);
119 static void emit_cmp_and_jump_insn_1 (rtx, rtx, enum machine_mode,
120 enum rtx_code, int, rtx);
121 static void prepare_float_lib_cmp (rtx *, rtx *, enum rtx_code *,
122 enum machine_mode *, int *);
123 static rtx widen_clz (enum machine_mode, rtx, rtx);
124 static rtx expand_parity (enum machine_mode, rtx, rtx);
125 static enum rtx_code get_rtx_code (enum tree_code, bool);
126 static rtx vector_compare_rtx (tree, bool, enum insn_code);
128 /* Current libcall id. It doesn't matter what these are, as long
129 as they are unique to each libcall that is emitted. */
130 static HOST_WIDE_INT libcall_id = 0;
132 #ifndef HAVE_conditional_trap
133 #define HAVE_conditional_trap 0
134 #define gen_conditional_trap(a,b) (gcc_unreachable (), NULL_RTX)
137 /* Prefixes for the current version of decimal floating point (BID vs. DPD) */
138 #if ENABLE_DECIMAL_BID_FORMAT
139 #define DECIMAL_PREFIX "bid_"
141 #define DECIMAL_PREFIX "dpd_"
145 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
146 the result of operation CODE applied to OP0 (and OP1 if it is a binary
149 If the last insn does not set TARGET, don't do anything, but return 1.
151 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
152 don't add the REG_EQUAL note but return 0. Our caller can then try
153 again, ensuring that TARGET is not one of the operands. */
156 add_equal_note (rtx insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
158 rtx last_insn, insn, set;
161 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
163 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
164 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
165 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
166 && GET_RTX_CLASS (code) != RTX_COMPARE
167 && GET_RTX_CLASS (code) != RTX_UNARY)
170 if (GET_CODE (target) == ZERO_EXTRACT)
173 for (last_insn = insns;
174 NEXT_INSN (last_insn) != NULL_RTX;
175 last_insn = NEXT_INSN (last_insn))
178 set = single_set (last_insn);
182 if (! rtx_equal_p (SET_DEST (set), target)
183 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
184 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
185 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
188 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
189 besides the last insn. */
190 if (reg_overlap_mentioned_p (target, op0)
191 || (op1 && reg_overlap_mentioned_p (target, op1)))
193 insn = PREV_INSN (last_insn);
194 while (insn != NULL_RTX)
196 if (reg_set_p (target, insn))
199 insn = PREV_INSN (insn);
203 if (GET_RTX_CLASS (code) == RTX_UNARY)
204 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
206 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
208 set_unique_reg_note (last_insn, REG_EQUAL, note);
213 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
214 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
215 not actually do a sign-extend or zero-extend, but can leave the
216 higher-order bits of the result rtx undefined, for example, in the case
217 of logical operations, but not right shifts. */
220 widen_operand (rtx op, enum machine_mode mode, enum machine_mode oldmode,
221 int unsignedp, int no_extend)
225 /* If we don't have to extend and this is a constant, return it. */
226 if (no_extend && GET_MODE (op) == VOIDmode)
229 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
230 extend since it will be more efficient to do so unless the signedness of
231 a promoted object differs from our extension. */
233 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
234 && SUBREG_PROMOTED_UNSIGNED_P (op) == unsignedp))
235 return convert_modes (mode, oldmode, op, unsignedp);
237 /* If MODE is no wider than a single word, we return a paradoxical
239 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
240 return gen_rtx_SUBREG (mode, force_reg (GET_MODE (op), op), 0);
242 /* Otherwise, get an object of MODE, clobber it, and set the low-order
245 result = gen_reg_rtx (mode);
246 emit_insn (gen_rtx_CLOBBER (VOIDmode, result));
247 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
251 /* Return the optab used for computing the operation given by
252 the tree code, CODE. This function is not always usable (for
253 example, it cannot give complete results for multiplication
254 or division) but probably ought to be relied on more widely
255 throughout the expander. */
257 optab_for_tree_code (enum tree_code code, tree type)
269 return one_cmpl_optab;
278 return TYPE_UNSIGNED (type) ? umod_optab : smod_optab;
286 return TYPE_UNSIGNED (type) ? udiv_optab : sdiv_optab;
292 return TYPE_UNSIGNED (type) ? lshr_optab : ashr_optab;
301 return TYPE_UNSIGNED (type) ? umax_optab : smax_optab;
304 return TYPE_UNSIGNED (type) ? umin_optab : smin_optab;
306 case REALIGN_LOAD_EXPR:
307 return vec_realign_load_optab;
310 return TYPE_UNSIGNED (type) ? usum_widen_optab : ssum_widen_optab;
313 return TYPE_UNSIGNED (type) ? udot_prod_optab : sdot_prod_optab;
316 return TYPE_UNSIGNED (type) ? reduc_umax_optab : reduc_smax_optab;
319 return TYPE_UNSIGNED (type) ? reduc_umin_optab : reduc_smin_optab;
321 case REDUC_PLUS_EXPR:
322 return TYPE_UNSIGNED (type) ? reduc_uplus_optab : reduc_splus_optab;
324 case VEC_LSHIFT_EXPR:
325 return vec_shl_optab;
327 case VEC_RSHIFT_EXPR:
328 return vec_shr_optab;
330 case VEC_WIDEN_MULT_HI_EXPR:
331 return TYPE_UNSIGNED (type) ?
332 vec_widen_umult_hi_optab : vec_widen_smult_hi_optab;
334 case VEC_WIDEN_MULT_LO_EXPR:
335 return TYPE_UNSIGNED (type) ?
336 vec_widen_umult_lo_optab : vec_widen_smult_lo_optab;
338 case VEC_UNPACK_HI_EXPR:
339 return TYPE_UNSIGNED (type) ?
340 vec_unpacku_hi_optab : vec_unpacks_hi_optab;
342 case VEC_UNPACK_LO_EXPR:
343 return TYPE_UNSIGNED (type) ?
344 vec_unpacku_lo_optab : vec_unpacks_lo_optab;
346 case VEC_UNPACK_FLOAT_HI_EXPR:
347 /* The signedness is determined from input operand. */
348 return TYPE_UNSIGNED (type) ?
349 vec_unpacku_float_hi_optab : vec_unpacks_float_hi_optab;
351 case VEC_UNPACK_FLOAT_LO_EXPR:
352 /* The signedness is determined from input operand. */
353 return TYPE_UNSIGNED (type) ?
354 vec_unpacku_float_lo_optab : vec_unpacks_float_lo_optab;
356 case VEC_PACK_TRUNC_EXPR:
357 return vec_pack_trunc_optab;
359 case VEC_PACK_SAT_EXPR:
360 return TYPE_UNSIGNED (type) ? vec_pack_usat_optab : vec_pack_ssat_optab;
362 case VEC_PACK_FIX_TRUNC_EXPR:
363 /* The signedness is determined from output operand. */
364 return TYPE_UNSIGNED (type) ?
365 vec_pack_ufix_trunc_optab : vec_pack_sfix_trunc_optab;
371 trapv = INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type);
374 case POINTER_PLUS_EXPR:
376 return trapv ? addv_optab : add_optab;
379 return trapv ? subv_optab : sub_optab;
382 return trapv ? smulv_optab : smul_optab;
385 return trapv ? negv_optab : neg_optab;
388 return trapv ? absv_optab : abs_optab;
390 case VEC_EXTRACT_EVEN_EXPR:
391 return vec_extract_even_optab;
393 case VEC_EXTRACT_ODD_EXPR:
394 return vec_extract_odd_optab;
396 case VEC_INTERLEAVE_HIGH_EXPR:
397 return vec_interleave_high_optab;
399 case VEC_INTERLEAVE_LOW_EXPR:
400 return vec_interleave_low_optab;
408 /* Expand vector widening operations.
410 There are two different classes of operations handled here:
411 1) Operations whose result is wider than all the arguments to the operation.
412 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
413 In this case OP0 and optionally OP1 would be initialized,
414 but WIDE_OP wouldn't (not relevant for this case).
415 2) Operations whose result is of the same size as the last argument to the
416 operation, but wider than all the other arguments to the operation.
417 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
418 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
420 E.g, when called to expand the following operations, this is how
421 the arguments will be initialized:
423 widening-sum 2 oprnd0 - oprnd1
424 widening-dot-product 3 oprnd0 oprnd1 oprnd2
425 widening-mult 2 oprnd0 oprnd1 -
426 type-promotion (vec-unpack) 1 oprnd0 - - */
429 expand_widen_pattern_expr (tree exp, rtx op0, rtx op1, rtx wide_op, rtx target,
432 tree oprnd0, oprnd1, oprnd2;
433 enum machine_mode wmode = 0, tmode0, tmode1 = 0;
434 optab widen_pattern_optab;
436 enum machine_mode xmode0, xmode1 = 0, wxmode = 0;
439 rtx xop0, xop1, wxop;
440 int nops = TREE_OPERAND_LENGTH (exp);
442 oprnd0 = TREE_OPERAND (exp, 0);
443 tmode0 = TYPE_MODE (TREE_TYPE (oprnd0));
444 widen_pattern_optab =
445 optab_for_tree_code (TREE_CODE (exp), TREE_TYPE (oprnd0));
446 icode = (int) widen_pattern_optab->handlers[(int) tmode0].insn_code;
447 gcc_assert (icode != CODE_FOR_nothing);
448 xmode0 = insn_data[icode].operand[1].mode;
452 oprnd1 = TREE_OPERAND (exp, 1);
453 tmode1 = TYPE_MODE (TREE_TYPE (oprnd1));
454 xmode1 = insn_data[icode].operand[2].mode;
457 /* The last operand is of a wider mode than the rest of the operands. */
465 gcc_assert (tmode1 == tmode0);
467 oprnd2 = TREE_OPERAND (exp, 2);
468 wmode = TYPE_MODE (TREE_TYPE (oprnd2));
469 wxmode = insn_data[icode].operand[3].mode;
473 wmode = wxmode = insn_data[icode].operand[0].mode;
476 || ! (*insn_data[icode].operand[0].predicate) (target, wmode))
477 temp = gen_reg_rtx (wmode);
485 /* In case the insn wants input operands in modes different from
486 those of the actual operands, convert the operands. It would
487 seem that we don't need to convert CONST_INTs, but we do, so
488 that they're properly zero-extended, sign-extended or truncated
491 if (GET_MODE (op0) != xmode0 && xmode0 != VOIDmode)
492 xop0 = convert_modes (xmode0,
493 GET_MODE (op0) != VOIDmode
499 if (GET_MODE (op1) != xmode1 && xmode1 != VOIDmode)
500 xop1 = convert_modes (xmode1,
501 GET_MODE (op1) != VOIDmode
507 if (GET_MODE (wide_op) != wxmode && wxmode != VOIDmode)
508 wxop = convert_modes (wxmode,
509 GET_MODE (wide_op) != VOIDmode
514 /* Now, if insn's predicates don't allow our operands, put them into
517 if (! (*insn_data[icode].operand[1].predicate) (xop0, xmode0)
518 && xmode0 != VOIDmode)
519 xop0 = copy_to_mode_reg (xmode0, xop0);
523 if (! (*insn_data[icode].operand[2].predicate) (xop1, xmode1)
524 && xmode1 != VOIDmode)
525 xop1 = copy_to_mode_reg (xmode1, xop1);
529 if (! (*insn_data[icode].operand[3].predicate) (wxop, wxmode)
530 && wxmode != VOIDmode)
531 wxop = copy_to_mode_reg (wxmode, wxop);
533 pat = GEN_FCN (icode) (temp, xop0, xop1, wxop);
536 pat = GEN_FCN (icode) (temp, xop0, xop1);
542 if (! (*insn_data[icode].operand[2].predicate) (wxop, wxmode)
543 && wxmode != VOIDmode)
544 wxop = copy_to_mode_reg (wxmode, wxop);
546 pat = GEN_FCN (icode) (temp, xop0, wxop);
549 pat = GEN_FCN (icode) (temp, xop0);
556 /* Generate code to perform an operation specified by TERNARY_OPTAB
557 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
559 UNSIGNEDP is for the case where we have to widen the operands
560 to perform the operation. It says to use zero-extension.
562 If TARGET is nonzero, the value
563 is generated there, if it is convenient to do so.
564 In all cases an rtx is returned for the locus of the value;
565 this may or may not be TARGET. */
568 expand_ternary_op (enum machine_mode mode, optab ternary_optab, rtx op0,
569 rtx op1, rtx op2, rtx target, int unsignedp)
571 int icode = (int) ternary_optab->handlers[(int) mode].insn_code;
572 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
573 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
574 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
577 rtx xop0 = op0, xop1 = op1, xop2 = op2;
579 gcc_assert (ternary_optab->handlers[(int) mode].insn_code
580 != CODE_FOR_nothing);
582 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
583 temp = gen_reg_rtx (mode);
587 /* In case the insn wants input operands in modes different from
588 those of the actual operands, convert the operands. It would
589 seem that we don't need to convert CONST_INTs, but we do, so
590 that they're properly zero-extended, sign-extended or truncated
593 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
594 xop0 = convert_modes (mode0,
595 GET_MODE (op0) != VOIDmode
600 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
601 xop1 = convert_modes (mode1,
602 GET_MODE (op1) != VOIDmode
607 if (GET_MODE (op2) != mode2 && mode2 != VOIDmode)
608 xop2 = convert_modes (mode2,
609 GET_MODE (op2) != VOIDmode
614 /* Now, if insn's predicates don't allow our operands, put them into
617 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
618 && mode0 != VOIDmode)
619 xop0 = copy_to_mode_reg (mode0, xop0);
621 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
622 && mode1 != VOIDmode)
623 xop1 = copy_to_mode_reg (mode1, xop1);
625 if (!insn_data[icode].operand[3].predicate (xop2, mode2)
626 && mode2 != VOIDmode)
627 xop2 = copy_to_mode_reg (mode2, xop2);
629 pat = GEN_FCN (icode) (temp, xop0, xop1, xop2);
636 /* Like expand_binop, but return a constant rtx if the result can be
637 calculated at compile time. The arguments and return value are
638 otherwise the same as for expand_binop. */
641 simplify_expand_binop (enum machine_mode mode, optab binoptab,
642 rtx op0, rtx op1, rtx target, int unsignedp,
643 enum optab_methods methods)
645 if (CONSTANT_P (op0) && CONSTANT_P (op1))
647 rtx x = simplify_binary_operation (binoptab->code, mode, op0, op1);
653 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
656 /* Like simplify_expand_binop, but always put the result in TARGET.
657 Return true if the expansion succeeded. */
660 force_expand_binop (enum machine_mode mode, optab binoptab,
661 rtx op0, rtx op1, rtx target, int unsignedp,
662 enum optab_methods methods)
664 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
665 target, unsignedp, methods);
669 emit_move_insn (target, x);
673 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
676 expand_vec_shift_expr (tree vec_shift_expr, rtx target)
678 enum insn_code icode;
679 rtx rtx_op1, rtx_op2;
680 enum machine_mode mode1;
681 enum machine_mode mode2;
682 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_shift_expr));
683 tree vec_oprnd = TREE_OPERAND (vec_shift_expr, 0);
684 tree shift_oprnd = TREE_OPERAND (vec_shift_expr, 1);
688 switch (TREE_CODE (vec_shift_expr))
690 case VEC_RSHIFT_EXPR:
691 shift_optab = vec_shr_optab;
693 case VEC_LSHIFT_EXPR:
694 shift_optab = vec_shl_optab;
700 icode = (int) shift_optab->handlers[(int) mode].insn_code;
701 gcc_assert (icode != CODE_FOR_nothing);
703 mode1 = insn_data[icode].operand[1].mode;
704 mode2 = insn_data[icode].operand[2].mode;
706 rtx_op1 = expand_normal (vec_oprnd);
707 if (!(*insn_data[icode].operand[1].predicate) (rtx_op1, mode1)
708 && mode1 != VOIDmode)
709 rtx_op1 = force_reg (mode1, rtx_op1);
711 rtx_op2 = expand_normal (shift_oprnd);
712 if (!(*insn_data[icode].operand[2].predicate) (rtx_op2, mode2)
713 && mode2 != VOIDmode)
714 rtx_op2 = force_reg (mode2, rtx_op2);
717 || ! (*insn_data[icode].operand[0].predicate) (target, mode))
718 target = gen_reg_rtx (mode);
720 /* Emit instruction */
721 pat = GEN_FCN (icode) (target, rtx_op1, rtx_op2);
728 /* This subroutine of expand_doubleword_shift handles the cases in which
729 the effective shift value is >= BITS_PER_WORD. The arguments and return
730 value are the same as for the parent routine, except that SUPERWORD_OP1
731 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
732 INTO_TARGET may be null if the caller has decided to calculate it. */
735 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
736 rtx outof_target, rtx into_target,
737 int unsignedp, enum optab_methods methods)
739 if (into_target != 0)
740 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
741 into_target, unsignedp, methods))
744 if (outof_target != 0)
746 /* For a signed right shift, we must fill OUTOF_TARGET with copies
747 of the sign bit, otherwise we must fill it with zeros. */
748 if (binoptab != ashr_optab)
749 emit_move_insn (outof_target, CONST0_RTX (word_mode));
751 if (!force_expand_binop (word_mode, binoptab,
752 outof_input, GEN_INT (BITS_PER_WORD - 1),
753 outof_target, unsignedp, methods))
759 /* This subroutine of expand_doubleword_shift handles the cases in which
760 the effective shift value is < BITS_PER_WORD. The arguments and return
761 value are the same as for the parent routine. */
764 expand_subword_shift (enum machine_mode op1_mode, optab binoptab,
765 rtx outof_input, rtx into_input, rtx op1,
766 rtx outof_target, rtx into_target,
767 int unsignedp, enum optab_methods methods,
768 unsigned HOST_WIDE_INT shift_mask)
770 optab reverse_unsigned_shift, unsigned_shift;
773 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
774 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
776 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
777 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
778 the opposite direction to BINOPTAB. */
779 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
781 carries = outof_input;
782 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
783 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
788 /* We must avoid shifting by BITS_PER_WORD bits since that is either
789 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
790 has unknown behavior. Do a single shift first, then shift by the
791 remainder. It's OK to use ~OP1 as the remainder if shift counts
792 are truncated to the mode size. */
793 carries = expand_binop (word_mode, reverse_unsigned_shift,
794 outof_input, const1_rtx, 0, unsignedp, methods);
795 if (shift_mask == BITS_PER_WORD - 1)
797 tmp = immed_double_const (-1, -1, op1_mode);
798 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
803 tmp = immed_double_const (BITS_PER_WORD - 1, 0, op1_mode);
804 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
808 if (tmp == 0 || carries == 0)
810 carries = expand_binop (word_mode, reverse_unsigned_shift,
811 carries, tmp, 0, unsignedp, methods);
815 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
816 so the result can go directly into INTO_TARGET if convenient. */
817 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
818 into_target, unsignedp, methods);
822 /* Now OR in the bits carried over from OUTOF_INPUT. */
823 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
824 into_target, unsignedp, methods))
827 /* Use a standard word_mode shift for the out-of half. */
828 if (outof_target != 0)
829 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
830 outof_target, unsignedp, methods))
837 #ifdef HAVE_conditional_move
838 /* Try implementing expand_doubleword_shift using conditional moves.
839 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
840 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
841 are the shift counts to use in the former and latter case. All other
842 arguments are the same as the parent routine. */
845 expand_doubleword_shift_condmove (enum machine_mode op1_mode, optab binoptab,
846 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
847 rtx outof_input, rtx into_input,
848 rtx subword_op1, rtx superword_op1,
849 rtx outof_target, rtx into_target,
850 int unsignedp, enum optab_methods methods,
851 unsigned HOST_WIDE_INT shift_mask)
853 rtx outof_superword, into_superword;
855 /* Put the superword version of the output into OUTOF_SUPERWORD and
857 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
858 if (outof_target != 0 && subword_op1 == superword_op1)
860 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
861 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
862 into_superword = outof_target;
863 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
864 outof_superword, 0, unsignedp, methods))
869 into_superword = gen_reg_rtx (word_mode);
870 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
871 outof_superword, into_superword,
876 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
877 if (!expand_subword_shift (op1_mode, binoptab,
878 outof_input, into_input, subword_op1,
879 outof_target, into_target,
880 unsignedp, methods, shift_mask))
883 /* Select between them. Do the INTO half first because INTO_SUPERWORD
884 might be the current value of OUTOF_TARGET. */
885 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
886 into_target, into_superword, word_mode, false))
889 if (outof_target != 0)
890 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
891 outof_target, outof_superword,
899 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
900 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
901 input operand; the shift moves bits in the direction OUTOF_INPUT->
902 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
903 of the target. OP1 is the shift count and OP1_MODE is its mode.
904 If OP1 is constant, it will have been truncated as appropriate
905 and is known to be nonzero.
907 If SHIFT_MASK is zero, the result of word shifts is undefined when the
908 shift count is outside the range [0, BITS_PER_WORD). This routine must
909 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
911 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
912 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
913 fill with zeros or sign bits as appropriate.
915 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
916 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
917 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
918 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
921 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
922 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
923 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
924 function wants to calculate it itself.
926 Return true if the shift could be successfully synthesized. */
929 expand_doubleword_shift (enum machine_mode op1_mode, optab binoptab,
930 rtx outof_input, rtx into_input, rtx op1,
931 rtx outof_target, rtx into_target,
932 int unsignedp, enum optab_methods methods,
933 unsigned HOST_WIDE_INT shift_mask)
935 rtx superword_op1, tmp, cmp1, cmp2;
936 rtx subword_label, done_label;
937 enum rtx_code cmp_code;
939 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
940 fill the result with sign or zero bits as appropriate. If so, the value
941 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
942 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
943 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
945 This isn't worthwhile for constant shifts since the optimizers will
946 cope better with in-range shift counts. */
947 if (shift_mask >= BITS_PER_WORD
949 && !CONSTANT_P (op1))
951 if (!expand_doubleword_shift (op1_mode, binoptab,
952 outof_input, into_input, op1,
954 unsignedp, methods, shift_mask))
956 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
957 outof_target, unsignedp, methods))
962 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
963 is true when the effective shift value is less than BITS_PER_WORD.
964 Set SUPERWORD_OP1 to the shift count that should be used to shift
965 OUTOF_INPUT into INTO_TARGET when the condition is false. */
966 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
967 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
969 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
970 is a subword shift count. */
971 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
973 cmp2 = CONST0_RTX (op1_mode);
979 /* Set CMP1 to OP1 - BITS_PER_WORD. */
980 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
982 cmp2 = CONST0_RTX (op1_mode);
984 superword_op1 = cmp1;
989 /* If we can compute the condition at compile time, pick the
990 appropriate subroutine. */
991 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
992 if (tmp != 0 && GET_CODE (tmp) == CONST_INT)
994 if (tmp == const0_rtx)
995 return expand_superword_shift (binoptab, outof_input, superword_op1,
996 outof_target, into_target,
999 return expand_subword_shift (op1_mode, binoptab,
1000 outof_input, into_input, op1,
1001 outof_target, into_target,
1002 unsignedp, methods, shift_mask);
1005 #ifdef HAVE_conditional_move
1006 /* Try using conditional moves to generate straight-line code. */
1008 rtx start = get_last_insn ();
1009 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
1010 cmp_code, cmp1, cmp2,
1011 outof_input, into_input,
1013 outof_target, into_target,
1014 unsignedp, methods, shift_mask))
1016 delete_insns_since (start);
1020 /* As a last resort, use branches to select the correct alternative. */
1021 subword_label = gen_label_rtx ();
1022 done_label = gen_label_rtx ();
1025 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
1026 0, 0, subword_label);
1029 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
1030 outof_target, into_target,
1031 unsignedp, methods))
1034 emit_jump_insn (gen_jump (done_label));
1036 emit_label (subword_label);
1038 if (!expand_subword_shift (op1_mode, binoptab,
1039 outof_input, into_input, op1,
1040 outof_target, into_target,
1041 unsignedp, methods, shift_mask))
1044 emit_label (done_label);
1048 /* Subroutine of expand_binop. Perform a double word multiplication of
1049 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
1050 as the target's word_mode. This function return NULL_RTX if anything
1051 goes wrong, in which case it may have already emitted instructions
1052 which need to be deleted.
1054 If we want to multiply two two-word values and have normal and widening
1055 multiplies of single-word values, we can do this with three smaller
1056 multiplications. Note that we do not make a REG_NO_CONFLICT block here
1057 because we are not operating on one word at a time.
1059 The multiplication proceeds as follows:
1060 _______________________
1061 [__op0_high_|__op0_low__]
1062 _______________________
1063 * [__op1_high_|__op1_low__]
1064 _______________________________________________
1065 _______________________
1066 (1) [__op0_low__*__op1_low__]
1067 _______________________
1068 (2a) [__op0_low__*__op1_high_]
1069 _______________________
1070 (2b) [__op0_high_*__op1_low__]
1071 _______________________
1072 (3) [__op0_high_*__op1_high_]
1075 This gives a 4-word result. Since we are only interested in the
1076 lower 2 words, partial result (3) and the upper words of (2a) and
1077 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1078 calculated using non-widening multiplication.
1080 (1), however, needs to be calculated with an unsigned widening
1081 multiplication. If this operation is not directly supported we
1082 try using a signed widening multiplication and adjust the result.
1083 This adjustment works as follows:
1085 If both operands are positive then no adjustment is needed.
1087 If the operands have different signs, for example op0_low < 0 and
1088 op1_low >= 0, the instruction treats the most significant bit of
1089 op0_low as a sign bit instead of a bit with significance
1090 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1091 with 2**BITS_PER_WORD - op0_low, and two's complements the
1092 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1095 Similarly, if both operands are negative, we need to add
1096 (op0_low + op1_low) * 2**BITS_PER_WORD.
1098 We use a trick to adjust quickly. We logically shift op0_low right
1099 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1100 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1101 logical shift exists, we do an arithmetic right shift and subtract
1105 expand_doubleword_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
1106 bool umulp, enum optab_methods methods)
1108 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
1109 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
1110 rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
1111 rtx product, adjust, product_high, temp;
1113 rtx op0_high = operand_subword_force (op0, high, mode);
1114 rtx op0_low = operand_subword_force (op0, low, mode);
1115 rtx op1_high = operand_subword_force (op1, high, mode);
1116 rtx op1_low = operand_subword_force (op1, low, mode);
1118 /* If we're using an unsigned multiply to directly compute the product
1119 of the low-order words of the operands and perform any required
1120 adjustments of the operands, we begin by trying two more multiplications
1121 and then computing the appropriate sum.
1123 We have checked above that the required addition is provided.
1124 Full-word addition will normally always succeed, especially if
1125 it is provided at all, so we don't worry about its failure. The
1126 multiplication may well fail, however, so we do handle that. */
1130 /* ??? This could be done with emit_store_flag where available. */
1131 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
1132 NULL_RTX, 1, methods);
1134 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
1135 NULL_RTX, 0, OPTAB_DIRECT);
1138 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
1139 NULL_RTX, 0, methods);
1142 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
1143 NULL_RTX, 0, OPTAB_DIRECT);
1150 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
1151 NULL_RTX, 0, OPTAB_DIRECT);
1155 /* OP0_HIGH should now be dead. */
1159 /* ??? This could be done with emit_store_flag where available. */
1160 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
1161 NULL_RTX, 1, methods);
1163 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
1164 NULL_RTX, 0, OPTAB_DIRECT);
1167 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
1168 NULL_RTX, 0, methods);
1171 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
1172 NULL_RTX, 0, OPTAB_DIRECT);
1179 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
1180 NULL_RTX, 0, OPTAB_DIRECT);
1184 /* OP1_HIGH should now be dead. */
1186 adjust = expand_binop (word_mode, add_optab, adjust, temp,
1187 adjust, 0, OPTAB_DIRECT);
1189 if (target && !REG_P (target))
1193 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
1194 target, 1, OPTAB_DIRECT);
1196 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
1197 target, 1, OPTAB_DIRECT);
1202 product_high = operand_subword (product, high, 1, mode);
1203 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
1204 REG_P (product_high) ? product_high : adjust,
1206 emit_move_insn (product_high, adjust);
1210 /* Wrapper around expand_binop which takes an rtx code to specify
1211 the operation to perform, not an optab pointer. All other
1212 arguments are the same. */
1214 expand_simple_binop (enum machine_mode mode, enum rtx_code code, rtx op0,
1215 rtx op1, rtx target, int unsignedp,
1216 enum optab_methods methods)
1218 optab binop = code_to_optab[(int) code];
1221 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
1224 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1225 binop. Order them according to commutative_operand_precedence and, if
1226 possible, try to put TARGET or a pseudo first. */
1228 swap_commutative_operands_with_target (rtx target, rtx op0, rtx op1)
1230 int op0_prec = commutative_operand_precedence (op0);
1231 int op1_prec = commutative_operand_precedence (op1);
1233 if (op0_prec < op1_prec)
1236 if (op0_prec > op1_prec)
1239 /* With equal precedence, both orders are ok, but it is better if the
1240 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
1241 if (target == 0 || REG_P (target))
1242 return (REG_P (op1) && !REG_P (op0)) || target == op1;
1244 return rtx_equal_p (op1, target);
1248 /* Helper function for expand_binop: handle the case where there
1249 is an insn that directly implements the indicated operation.
1250 Returns null if this is not possible. */
1252 expand_binop_directly (enum machine_mode mode, optab binoptab,
1254 rtx target, int unsignedp, enum optab_methods methods,
1255 int commutative_op, rtx last)
1257 int icode = (int) binoptab->handlers[(int) mode].insn_code;
1258 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
1259 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
1260 enum machine_mode tmp_mode;
1262 rtx xop0 = op0, xop1 = op1;
1268 temp = gen_reg_rtx (mode);
1270 /* If it is a commutative operator and the modes would match
1271 if we would swap the operands, we can save the conversions. */
1274 if (GET_MODE (op0) != mode0 && GET_MODE (op1) != mode1
1275 && GET_MODE (op0) == mode1 && GET_MODE (op1) == mode0)
1279 tmp = op0; op0 = op1; op1 = tmp;
1280 tmp = xop0; xop0 = xop1; xop1 = tmp;
1284 /* In case the insn wants input operands in modes different from
1285 those of the actual operands, convert the operands. It would
1286 seem that we don't need to convert CONST_INTs, but we do, so
1287 that they're properly zero-extended, sign-extended or truncated
1290 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
1291 xop0 = convert_modes (mode0,
1292 GET_MODE (op0) != VOIDmode
1297 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
1298 xop1 = convert_modes (mode1,
1299 GET_MODE (op1) != VOIDmode
1304 /* Now, if insn's predicates don't allow our operands, put them into
1307 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
1308 && mode0 != VOIDmode)
1309 xop0 = copy_to_mode_reg (mode0, xop0);
1311 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
1312 && mode1 != VOIDmode)
1313 xop1 = copy_to_mode_reg (mode1, xop1);
1315 if (binoptab == vec_pack_trunc_optab
1316 || binoptab == vec_pack_usat_optab
1317 || binoptab == vec_pack_ssat_optab
1318 || binoptab == vec_pack_ufix_trunc_optab
1319 || binoptab == vec_pack_sfix_trunc_optab)
1321 /* The mode of the result is different then the mode of the
1323 tmp_mode = insn_data[icode].operand[0].mode;
1324 if (GET_MODE_NUNITS (tmp_mode) != 2 * GET_MODE_NUNITS (mode))
1330 if (!insn_data[icode].operand[0].predicate (temp, tmp_mode))
1331 temp = gen_reg_rtx (tmp_mode);
1333 pat = GEN_FCN (icode) (temp, xop0, xop1);
1336 /* If PAT is composed of more than one insn, try to add an appropriate
1337 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1338 operand, call expand_binop again, this time without a target. */
1339 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1340 && ! add_equal_note (pat, temp, binoptab->code, xop0, xop1))
1342 delete_insns_since (last);
1343 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1344 unsignedp, methods);
1351 delete_insns_since (last);
1355 /* Generate code to perform an operation specified by BINOPTAB
1356 on operands OP0 and OP1, with result having machine-mode MODE.
1358 UNSIGNEDP is for the case where we have to widen the operands
1359 to perform the operation. It says to use zero-extension.
1361 If TARGET is nonzero, the value
1362 is generated there, if it is convenient to do so.
1363 In all cases an rtx is returned for the locus of the value;
1364 this may or may not be TARGET. */
1367 expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1,
1368 rtx target, int unsignedp, enum optab_methods methods)
1370 enum optab_methods next_methods
1371 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1372 ? OPTAB_WIDEN : methods);
1373 enum mode_class class;
1374 enum machine_mode wider_mode;
1376 int commutative_op = 0;
1377 int shift_op = (binoptab->code == ASHIFT
1378 || binoptab->code == ASHIFTRT
1379 || binoptab->code == LSHIFTRT
1380 || binoptab->code == ROTATE
1381 || binoptab->code == ROTATERT);
1382 rtx entry_last = get_last_insn ();
1385 class = GET_MODE_CLASS (mode);
1387 /* If subtracting an integer constant, convert this into an addition of
1388 the negated constant. */
1390 if (binoptab == sub_optab && GET_CODE (op1) == CONST_INT)
1392 op1 = negate_rtx (mode, op1);
1393 binoptab = add_optab;
1396 /* If we are inside an appropriately-short loop and we are optimizing,
1397 force expensive constants into a register. */
1398 if (CONSTANT_P (op0) && optimize
1399 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
1401 if (GET_MODE (op0) != VOIDmode)
1402 op0 = convert_modes (mode, VOIDmode, op0, unsignedp);
1403 op0 = force_reg (mode, op0);
1406 if (CONSTANT_P (op1) && optimize
1407 && ! shift_op && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
1409 if (GET_MODE (op1) != VOIDmode)
1410 op1 = convert_modes (mode, VOIDmode, op1, unsignedp);
1411 op1 = force_reg (mode, op1);
1414 /* Record where to delete back to if we backtrack. */
1415 last = get_last_insn ();
1417 /* If operation is commutative,
1418 try to make the first operand a register.
1419 Even better, try to make it the same as the target.
1420 Also try to make the last operand a constant. */
1421 if (GET_RTX_CLASS (binoptab->code) == RTX_COMM_ARITH
1422 || binoptab == smul_widen_optab
1423 || binoptab == umul_widen_optab
1424 || binoptab == smul_highpart_optab
1425 || binoptab == umul_highpart_optab)
1429 if (swap_commutative_operands_with_target (target, op0, op1))
1437 /* If we can do it with a three-operand insn, do so. */
1439 if (methods != OPTAB_MUST_WIDEN
1440 && binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1442 temp = expand_binop_directly (mode, binoptab, op0, op1, target,
1443 unsignedp, methods, commutative_op, last);
1448 /* If we were trying to rotate, and that didn't work, try rotating
1449 the other direction before falling back to shifts and bitwise-or. */
1450 if (((binoptab == rotl_optab
1451 && rotr_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1452 || (binoptab == rotr_optab
1453 && rotl_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing))
1454 && class == MODE_INT)
1456 optab otheroptab = (binoptab == rotl_optab ? rotr_optab : rotl_optab);
1458 unsigned int bits = GET_MODE_BITSIZE (mode);
1460 if (GET_CODE (op1) == CONST_INT)
1461 newop1 = GEN_INT (bits - INTVAL (op1));
1462 else if (targetm.shift_truncation_mask (mode) == bits - 1)
1463 newop1 = negate_rtx (mode, op1);
1465 newop1 = expand_binop (mode, sub_optab,
1466 GEN_INT (bits), op1,
1467 NULL_RTX, unsignedp, OPTAB_DIRECT);
1469 temp = expand_binop_directly (mode, otheroptab, op0, newop1,
1470 target, unsignedp, methods,
1471 commutative_op, last);
1476 /* If this is a multiply, see if we can do a widening operation that
1477 takes operands of this mode and makes a wider mode. */
1479 if (binoptab == smul_optab
1480 && GET_MODE_WIDER_MODE (mode) != VOIDmode
1481 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1482 ->handlers[(int) GET_MODE_WIDER_MODE (mode)].insn_code)
1483 != CODE_FOR_nothing))
1485 temp = expand_binop (GET_MODE_WIDER_MODE (mode),
1486 unsignedp ? umul_widen_optab : smul_widen_optab,
1487 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1491 if (GET_MODE_CLASS (mode) == MODE_INT
1492 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1493 GET_MODE_BITSIZE (GET_MODE (temp))))
1494 return gen_lowpart (mode, temp);
1496 return convert_to_mode (mode, temp, unsignedp);
1500 /* Look for a wider mode of the same class for which we think we
1501 can open-code the operation. Check for a widening multiply at the
1502 wider mode as well. */
1504 if (CLASS_HAS_WIDER_MODES_P (class)
1505 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1506 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1507 wider_mode != VOIDmode;
1508 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1510 if (binoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
1511 || (binoptab == smul_optab
1512 && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
1513 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1514 ->handlers[(int) GET_MODE_WIDER_MODE (wider_mode)].insn_code)
1515 != CODE_FOR_nothing)))
1517 rtx xop0 = op0, xop1 = op1;
1520 /* For certain integer operations, we need not actually extend
1521 the narrow operands, as long as we will truncate
1522 the results to the same narrowness. */
1524 if ((binoptab == ior_optab || binoptab == and_optab
1525 || binoptab == xor_optab
1526 || binoptab == add_optab || binoptab == sub_optab
1527 || binoptab == smul_optab || binoptab == ashl_optab)
1528 && class == MODE_INT)
1531 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1533 /* The second operand of a shift must always be extended. */
1534 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1535 no_extend && binoptab != ashl_optab);
1537 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1538 unsignedp, OPTAB_DIRECT);
1541 if (class != MODE_INT
1542 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1543 GET_MODE_BITSIZE (wider_mode)))
1546 target = gen_reg_rtx (mode);
1547 convert_move (target, temp, 0);
1551 return gen_lowpart (mode, temp);
1554 delete_insns_since (last);
1558 /* These can be done a word at a time. */
1559 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1560 && class == MODE_INT
1561 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
1562 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1568 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1569 won't be accurate, so use a new target. */
1570 if (target == 0 || target == op0 || target == op1)
1571 target = gen_reg_rtx (mode);
1575 /* Do the actual arithmetic. */
1576 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
1578 rtx target_piece = operand_subword (target, i, 1, mode);
1579 rtx x = expand_binop (word_mode, binoptab,
1580 operand_subword_force (op0, i, mode),
1581 operand_subword_force (op1, i, mode),
1582 target_piece, unsignedp, next_methods);
1587 if (target_piece != x)
1588 emit_move_insn (target_piece, x);
1591 insns = get_insns ();
1594 if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
1596 if (binoptab->code != UNKNOWN)
1598 = gen_rtx_fmt_ee (binoptab->code, mode,
1599 copy_rtx (op0), copy_rtx (op1));
1603 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1608 /* Synthesize double word shifts from single word shifts. */
1609 if ((binoptab == lshr_optab || binoptab == ashl_optab
1610 || binoptab == ashr_optab)
1611 && class == MODE_INT
1612 && (GET_CODE (op1) == CONST_INT || !optimize_size)
1613 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1614 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1615 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1616 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1618 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1619 enum machine_mode op1_mode;
1621 double_shift_mask = targetm.shift_truncation_mask (mode);
1622 shift_mask = targetm.shift_truncation_mask (word_mode);
1623 op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode;
1625 /* Apply the truncation to constant shifts. */
1626 if (double_shift_mask > 0 && GET_CODE (op1) == CONST_INT)
1627 op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
1629 if (op1 == CONST0_RTX (op1_mode))
1632 /* Make sure that this is a combination that expand_doubleword_shift
1633 can handle. See the comments there for details. */
1634 if (double_shift_mask == 0
1635 || (shift_mask == BITS_PER_WORD - 1
1636 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1638 rtx insns, equiv_value;
1639 rtx into_target, outof_target;
1640 rtx into_input, outof_input;
1641 int left_shift, outof_word;
1643 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1644 won't be accurate, so use a new target. */
1645 if (target == 0 || target == op0 || target == op1)
1646 target = gen_reg_rtx (mode);
1650 /* OUTOF_* is the word we are shifting bits away from, and
1651 INTO_* is the word that we are shifting bits towards, thus
1652 they differ depending on the direction of the shift and
1653 WORDS_BIG_ENDIAN. */
1655 left_shift = binoptab == ashl_optab;
1656 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1658 outof_target = operand_subword (target, outof_word, 1, mode);
1659 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1661 outof_input = operand_subword_force (op0, outof_word, mode);
1662 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1664 if (expand_doubleword_shift (op1_mode, binoptab,
1665 outof_input, into_input, op1,
1666 outof_target, into_target,
1667 unsignedp, next_methods, shift_mask))
1669 insns = get_insns ();
1672 equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, op0, op1);
1673 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1680 /* Synthesize double word rotates from single word shifts. */
1681 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1682 && class == MODE_INT
1683 && GET_CODE (op1) == CONST_INT
1684 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1685 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1686 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1689 rtx into_target, outof_target;
1690 rtx into_input, outof_input;
1692 int shift_count, left_shift, outof_word;
1694 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1695 won't be accurate, so use a new target. Do this also if target is not
1696 a REG, first because having a register instead may open optimization
1697 opportunities, and second because if target and op0 happen to be MEMs
1698 designating the same location, we would risk clobbering it too early
1699 in the code sequence we generate below. */
1700 if (target == 0 || target == op0 || target == op1 || ! REG_P (target))
1701 target = gen_reg_rtx (mode);
1705 shift_count = INTVAL (op1);
1707 /* OUTOF_* is the word we are shifting bits away from, and
1708 INTO_* is the word that we are shifting bits towards, thus
1709 they differ depending on the direction of the shift and
1710 WORDS_BIG_ENDIAN. */
1712 left_shift = (binoptab == rotl_optab);
1713 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1715 outof_target = operand_subword (target, outof_word, 1, mode);
1716 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1718 outof_input = operand_subword_force (op0, outof_word, mode);
1719 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1721 if (shift_count == BITS_PER_WORD)
1723 /* This is just a word swap. */
1724 emit_move_insn (outof_target, into_input);
1725 emit_move_insn (into_target, outof_input);
1730 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1731 rtx first_shift_count, second_shift_count;
1732 optab reverse_unsigned_shift, unsigned_shift;
1734 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1735 ? lshr_optab : ashl_optab);
1737 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1738 ? ashl_optab : lshr_optab);
1740 if (shift_count > BITS_PER_WORD)
1742 first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1743 second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1747 first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1748 second_shift_count = GEN_INT (shift_count);
1751 into_temp1 = expand_binop (word_mode, unsigned_shift,
1752 outof_input, first_shift_count,
1753 NULL_RTX, unsignedp, next_methods);
1754 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1755 into_input, second_shift_count,
1756 NULL_RTX, unsignedp, next_methods);
1758 if (into_temp1 != 0 && into_temp2 != 0)
1759 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1760 into_target, unsignedp, next_methods);
1764 if (inter != 0 && inter != into_target)
1765 emit_move_insn (into_target, inter);
1767 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1768 into_input, first_shift_count,
1769 NULL_RTX, unsignedp, next_methods);
1770 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1771 outof_input, second_shift_count,
1772 NULL_RTX, unsignedp, next_methods);
1774 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1775 inter = expand_binop (word_mode, ior_optab,
1776 outof_temp1, outof_temp2,
1777 outof_target, unsignedp, next_methods);
1779 if (inter != 0 && inter != outof_target)
1780 emit_move_insn (outof_target, inter);
1783 insns = get_insns ();
1788 /* One may be tempted to wrap the insns in a REG_NO_CONFLICT
1789 block to help the register allocator a bit. But a multi-word
1790 rotate will need all the input bits when setting the output
1791 bits, so there clearly is a conflict between the input and
1792 output registers. So we can't use a no-conflict block here. */
1798 /* These can be done a word at a time by propagating carries. */
1799 if ((binoptab == add_optab || binoptab == sub_optab)
1800 && class == MODE_INT
1801 && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
1802 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1805 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1806 const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
1807 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1808 rtx xop0, xop1, xtarget;
1810 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1811 value is one of those, use it. Otherwise, use 1 since it is the
1812 one easiest to get. */
1813 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1814 int normalizep = STORE_FLAG_VALUE;
1819 /* Prepare the operands. */
1820 xop0 = force_reg (mode, op0);
1821 xop1 = force_reg (mode, op1);
1823 xtarget = gen_reg_rtx (mode);
1825 if (target == 0 || !REG_P (target))
1828 /* Indicate for flow that the entire target reg is being set. */
1830 emit_insn (gen_rtx_CLOBBER (VOIDmode, xtarget));
1832 /* Do the actual arithmetic. */
1833 for (i = 0; i < nwords; i++)
1835 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1836 rtx target_piece = operand_subword (xtarget, index, 1, mode);
1837 rtx op0_piece = operand_subword_force (xop0, index, mode);
1838 rtx op1_piece = operand_subword_force (xop1, index, mode);
1841 /* Main add/subtract of the input operands. */
1842 x = expand_binop (word_mode, binoptab,
1843 op0_piece, op1_piece,
1844 target_piece, unsignedp, next_methods);
1850 /* Store carry from main add/subtract. */
1851 carry_out = gen_reg_rtx (word_mode);
1852 carry_out = emit_store_flag_force (carry_out,
1853 (binoptab == add_optab
1856 word_mode, 1, normalizep);
1863 /* Add/subtract previous carry to main result. */
1864 newx = expand_binop (word_mode,
1865 normalizep == 1 ? binoptab : otheroptab,
1867 NULL_RTX, 1, next_methods);
1871 /* Get out carry from adding/subtracting carry in. */
1872 rtx carry_tmp = gen_reg_rtx (word_mode);
1873 carry_tmp = emit_store_flag_force (carry_tmp,
1874 (binoptab == add_optab
1877 word_mode, 1, normalizep);
1879 /* Logical-ior the two poss. carry together. */
1880 carry_out = expand_binop (word_mode, ior_optab,
1881 carry_out, carry_tmp,
1882 carry_out, 0, next_methods);
1886 emit_move_insn (target_piece, newx);
1890 if (x != target_piece)
1891 emit_move_insn (target_piece, x);
1894 carry_in = carry_out;
1897 if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
1899 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
1900 || ! rtx_equal_p (target, xtarget))
1902 rtx temp = emit_move_insn (target, xtarget);
1904 set_unique_reg_note (temp,
1906 gen_rtx_fmt_ee (binoptab->code, mode,
1917 delete_insns_since (last);
1920 /* Attempt to synthesize double word multiplies using a sequence of word
1921 mode multiplications. We first attempt to generate a sequence using a
1922 more efficient unsigned widening multiply, and if that fails we then
1923 try using a signed widening multiply. */
1925 if (binoptab == smul_optab
1926 && class == MODE_INT
1927 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1928 && smul_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1929 && add_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1931 rtx product = NULL_RTX;
1933 if (umul_widen_optab->handlers[(int) mode].insn_code
1934 != CODE_FOR_nothing)
1936 product = expand_doubleword_mult (mode, op0, op1, target,
1939 delete_insns_since (last);
1942 if (product == NULL_RTX
1943 && smul_widen_optab->handlers[(int) mode].insn_code
1944 != CODE_FOR_nothing)
1946 product = expand_doubleword_mult (mode, op0, op1, target,
1949 delete_insns_since (last);
1952 if (product != NULL_RTX)
1954 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1956 temp = emit_move_insn (target ? target : product, product);
1957 set_unique_reg_note (temp,
1959 gen_rtx_fmt_ee (MULT, mode,
1967 /* It can't be open-coded in this mode.
1968 Use a library call if one is available and caller says that's ok. */
1970 if (binoptab->handlers[(int) mode].libfunc
1971 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
1975 enum machine_mode op1_mode = mode;
1982 op1_mode = targetm.libgcc_shift_count_mode ();
1983 /* Specify unsigned here,
1984 since negative shift counts are meaningless. */
1985 op1x = convert_to_mode (op1_mode, op1, 1);
1988 if (GET_MODE (op0) != VOIDmode
1989 && GET_MODE (op0) != mode)
1990 op0 = convert_to_mode (mode, op0, unsignedp);
1992 /* Pass 1 for NO_QUEUE so we don't lose any increments
1993 if the libcall is cse'd or moved. */
1994 value = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
1995 NULL_RTX, LCT_CONST, mode, 2,
1996 op0, mode, op1x, op1_mode);
1998 insns = get_insns ();
2001 target = gen_reg_rtx (mode);
2002 emit_libcall_block (insns, target, value,
2003 gen_rtx_fmt_ee (binoptab->code, mode, op0, op1));
2008 delete_insns_since (last);
2010 /* It can't be done in this mode. Can we do it in a wider mode? */
2012 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
2013 || methods == OPTAB_MUST_WIDEN))
2015 /* Caller says, don't even try. */
2016 delete_insns_since (entry_last);
2020 /* Compute the value of METHODS to pass to recursive calls.
2021 Don't allow widening to be tried recursively. */
2023 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
2025 /* Look for a wider mode of the same class for which it appears we can do
2028 if (CLASS_HAS_WIDER_MODES_P (class))
2030 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2031 wider_mode != VOIDmode;
2032 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2034 if ((binoptab->handlers[(int) wider_mode].insn_code
2035 != CODE_FOR_nothing)
2036 || (methods == OPTAB_LIB
2037 && binoptab->handlers[(int) wider_mode].libfunc))
2039 rtx xop0 = op0, xop1 = op1;
2042 /* For certain integer operations, we need not actually extend
2043 the narrow operands, as long as we will truncate
2044 the results to the same narrowness. */
2046 if ((binoptab == ior_optab || binoptab == and_optab
2047 || binoptab == xor_optab
2048 || binoptab == add_optab || binoptab == sub_optab
2049 || binoptab == smul_optab || binoptab == ashl_optab)
2050 && class == MODE_INT)
2053 xop0 = widen_operand (xop0, wider_mode, mode,
2054 unsignedp, no_extend);
2056 /* The second operand of a shift must always be extended. */
2057 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
2058 no_extend && binoptab != ashl_optab);
2060 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
2061 unsignedp, methods);
2064 if (class != MODE_INT
2065 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
2066 GET_MODE_BITSIZE (wider_mode)))
2069 target = gen_reg_rtx (mode);
2070 convert_move (target, temp, 0);
2074 return gen_lowpart (mode, temp);
2077 delete_insns_since (last);
2082 delete_insns_since (entry_last);
2086 /* Expand a binary operator which has both signed and unsigned forms.
2087 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2090 If we widen unsigned operands, we may use a signed wider operation instead
2091 of an unsigned wider operation, since the result would be the same. */
2094 sign_expand_binop (enum machine_mode mode, optab uoptab, optab soptab,
2095 rtx op0, rtx op1, rtx target, int unsignedp,
2096 enum optab_methods methods)
2099 optab direct_optab = unsignedp ? uoptab : soptab;
2100 struct optab wide_soptab;
2102 /* Do it without widening, if possible. */
2103 temp = expand_binop (mode, direct_optab, op0, op1, target,
2104 unsignedp, OPTAB_DIRECT);
2105 if (temp || methods == OPTAB_DIRECT)
2108 /* Try widening to a signed int. Make a fake signed optab that
2109 hides any signed insn for direct use. */
2110 wide_soptab = *soptab;
2111 wide_soptab.handlers[(int) mode].insn_code = CODE_FOR_nothing;
2112 wide_soptab.handlers[(int) mode].libfunc = 0;
2114 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2115 unsignedp, OPTAB_WIDEN);
2117 /* For unsigned operands, try widening to an unsigned int. */
2118 if (temp == 0 && unsignedp)
2119 temp = expand_binop (mode, uoptab, op0, op1, target,
2120 unsignedp, OPTAB_WIDEN);
2121 if (temp || methods == OPTAB_WIDEN)
2124 /* Use the right width lib call if that exists. */
2125 temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_LIB);
2126 if (temp || methods == OPTAB_LIB)
2129 /* Must widen and use a lib call, use either signed or unsigned. */
2130 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2131 unsignedp, methods);
2135 return expand_binop (mode, uoptab, op0, op1, target,
2136 unsignedp, methods);
2140 /* Generate code to perform an operation specified by UNOPPTAB
2141 on operand OP0, with two results to TARG0 and TARG1.
2142 We assume that the order of the operands for the instruction
2143 is TARG0, TARG1, OP0.
2145 Either TARG0 or TARG1 may be zero, but what that means is that
2146 the result is not actually wanted. We will generate it into
2147 a dummy pseudo-reg and discard it. They may not both be zero.
2149 Returns 1 if this operation can be performed; 0 if not. */
2152 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
2155 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2156 enum mode_class class;
2157 enum machine_mode wider_mode;
2158 rtx entry_last = get_last_insn ();
2161 class = GET_MODE_CLASS (mode);
2164 targ0 = gen_reg_rtx (mode);
2166 targ1 = gen_reg_rtx (mode);
2168 /* Record where to go back to if we fail. */
2169 last = get_last_insn ();
2171 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2173 int icode = (int) unoptab->handlers[(int) mode].insn_code;
2174 enum machine_mode mode0 = insn_data[icode].operand[2].mode;
2178 if (GET_MODE (xop0) != VOIDmode
2179 && GET_MODE (xop0) != mode0)
2180 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2182 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2183 if (!insn_data[icode].operand[2].predicate (xop0, mode0))
2184 xop0 = copy_to_mode_reg (mode0, xop0);
2186 /* We could handle this, but we should always be called with a pseudo
2187 for our targets and all insns should take them as outputs. */
2188 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2189 gcc_assert (insn_data[icode].operand[1].predicate (targ1, mode));
2191 pat = GEN_FCN (icode) (targ0, targ1, xop0);
2198 delete_insns_since (last);
2201 /* It can't be done in this mode. Can we do it in a wider mode? */
2203 if (CLASS_HAS_WIDER_MODES_P (class))
2205 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2206 wider_mode != VOIDmode;
2207 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2209 if (unoptab->handlers[(int) wider_mode].insn_code
2210 != CODE_FOR_nothing)
2212 rtx t0 = gen_reg_rtx (wider_mode);
2213 rtx t1 = gen_reg_rtx (wider_mode);
2214 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2216 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
2218 convert_move (targ0, t0, unsignedp);
2219 convert_move (targ1, t1, unsignedp);
2223 delete_insns_since (last);
2228 delete_insns_since (entry_last);
2232 /* Generate code to perform an operation specified by BINOPTAB
2233 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2234 We assume that the order of the operands for the instruction
2235 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2236 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2238 Either TARG0 or TARG1 may be zero, but what that means is that
2239 the result is not actually wanted. We will generate it into
2240 a dummy pseudo-reg and discard it. They may not both be zero.
2242 Returns 1 if this operation can be performed; 0 if not. */
2245 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
2248 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2249 enum mode_class class;
2250 enum machine_mode wider_mode;
2251 rtx entry_last = get_last_insn ();
2254 class = GET_MODE_CLASS (mode);
2256 /* If we are inside an appropriately-short loop and we are optimizing,
2257 force expensive constants into a register. */
2258 if (CONSTANT_P (op0) && optimize
2259 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
2260 op0 = force_reg (mode, op0);
2262 if (CONSTANT_P (op1) && optimize
2263 && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
2264 op1 = force_reg (mode, op1);
2267 targ0 = gen_reg_rtx (mode);
2269 targ1 = gen_reg_rtx (mode);
2271 /* Record where to go back to if we fail. */
2272 last = get_last_insn ();
2274 if (binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2276 int icode = (int) binoptab->handlers[(int) mode].insn_code;
2277 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2278 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
2280 rtx xop0 = op0, xop1 = op1;
2282 /* In case the insn wants input operands in modes different from
2283 those of the actual operands, convert the operands. It would
2284 seem that we don't need to convert CONST_INTs, but we do, so
2285 that they're properly zero-extended, sign-extended or truncated
2288 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
2289 xop0 = convert_modes (mode0,
2290 GET_MODE (op0) != VOIDmode
2295 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
2296 xop1 = convert_modes (mode1,
2297 GET_MODE (op1) != VOIDmode
2302 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2303 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2304 xop0 = copy_to_mode_reg (mode0, xop0);
2306 if (!insn_data[icode].operand[2].predicate (xop1, mode1))
2307 xop1 = copy_to_mode_reg (mode1, xop1);
2309 /* We could handle this, but we should always be called with a pseudo
2310 for our targets and all insns should take them as outputs. */
2311 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2312 gcc_assert (insn_data[icode].operand[3].predicate (targ1, mode));
2314 pat = GEN_FCN (icode) (targ0, xop0, xop1, targ1);
2321 delete_insns_since (last);
2324 /* It can't be done in this mode. Can we do it in a wider mode? */
2326 if (CLASS_HAS_WIDER_MODES_P (class))
2328 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2329 wider_mode != VOIDmode;
2330 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2332 if (binoptab->handlers[(int) wider_mode].insn_code
2333 != CODE_FOR_nothing)
2335 rtx t0 = gen_reg_rtx (wider_mode);
2336 rtx t1 = gen_reg_rtx (wider_mode);
2337 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2338 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2340 if (expand_twoval_binop (binoptab, cop0, cop1,
2343 convert_move (targ0, t0, unsignedp);
2344 convert_move (targ1, t1, unsignedp);
2348 delete_insns_since (last);
2353 delete_insns_since (entry_last);
2357 /* Expand the two-valued library call indicated by BINOPTAB, but
2358 preserve only one of the values. If TARG0 is non-NULL, the first
2359 value is placed into TARG0; otherwise the second value is placed
2360 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2361 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2362 This routine assumes that the value returned by the library call is
2363 as if the return value was of an integral mode twice as wide as the
2364 mode of OP0. Returns 1 if the call was successful. */
2367 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2368 rtx targ0, rtx targ1, enum rtx_code code)
2370 enum machine_mode mode;
2371 enum machine_mode libval_mode;
2375 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2376 gcc_assert (!targ0 != !targ1);
2378 mode = GET_MODE (op0);
2379 if (!binoptab->handlers[(int) mode].libfunc)
2382 /* The value returned by the library function will have twice as
2383 many bits as the nominal MODE. */
2384 libval_mode = smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode),
2387 libval = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
2388 NULL_RTX, LCT_CONST,
2392 /* Get the part of VAL containing the value that we want. */
2393 libval = simplify_gen_subreg (mode, libval, libval_mode,
2394 targ0 ? 0 : GET_MODE_SIZE (mode));
2395 insns = get_insns ();
2397 /* Move the into the desired location. */
2398 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2399 gen_rtx_fmt_ee (code, mode, op0, op1));
2405 /* Wrapper around expand_unop which takes an rtx code to specify
2406 the operation to perform, not an optab pointer. All other
2407 arguments are the same. */
2409 expand_simple_unop (enum machine_mode mode, enum rtx_code code, rtx op0,
2410 rtx target, int unsignedp)
2412 optab unop = code_to_optab[(int) code];
2415 return expand_unop (mode, unop, op0, target, unsignedp);
2421 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2423 widen_clz (enum machine_mode mode, rtx op0, rtx target)
2425 enum mode_class class = GET_MODE_CLASS (mode);
2426 if (CLASS_HAS_WIDER_MODES_P (class))
2428 enum machine_mode wider_mode;
2429 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2430 wider_mode != VOIDmode;
2431 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2433 if (clz_optab->handlers[(int) wider_mode].insn_code
2434 != CODE_FOR_nothing)
2436 rtx xop0, temp, last;
2438 last = get_last_insn ();
2441 target = gen_reg_rtx (mode);
2442 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2443 temp = expand_unop (wider_mode, clz_optab, xop0, NULL_RTX, true);
2445 temp = expand_binop (wider_mode, sub_optab, temp,
2446 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2447 - GET_MODE_BITSIZE (mode)),
2448 target, true, OPTAB_DIRECT);
2450 delete_insns_since (last);
2462 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2464 widen_bswap (enum machine_mode mode, rtx op0, rtx target)
2466 enum mode_class class = GET_MODE_CLASS (mode);
2467 enum machine_mode wider_mode;
2470 if (!CLASS_HAS_WIDER_MODES_P (class))
2473 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2474 wider_mode != VOIDmode;
2475 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2476 if (bswap_optab->handlers[wider_mode].insn_code != CODE_FOR_nothing)
2481 last = get_last_insn ();
2483 x = widen_operand (op0, wider_mode, mode, true, true);
2484 x = expand_unop (wider_mode, bswap_optab, x, NULL_RTX, true);
2487 x = expand_shift (RSHIFT_EXPR, wider_mode, x,
2488 size_int (GET_MODE_BITSIZE (wider_mode)
2489 - GET_MODE_BITSIZE (mode)),
2495 target = gen_reg_rtx (mode);
2496 emit_move_insn (target, gen_lowpart (mode, x));
2499 delete_insns_since (last);
2504 /* Try calculating bswap as two bswaps of two word-sized operands. */
2507 expand_doubleword_bswap (enum machine_mode mode, rtx op, rtx target)
2511 t1 = expand_unop (word_mode, bswap_optab,
2512 operand_subword_force (op, 0, mode), NULL_RTX, true);
2513 t0 = expand_unop (word_mode, bswap_optab,
2514 operand_subword_force (op, 1, mode), NULL_RTX, true);
2517 target = gen_reg_rtx (mode);
2519 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
2520 emit_move_insn (operand_subword (target, 0, 1, mode), t0);
2521 emit_move_insn (operand_subword (target, 1, 1, mode), t1);
2526 /* Try calculating (parity x) as (and (popcount x) 1), where
2527 popcount can also be done in a wider mode. */
2529 expand_parity (enum machine_mode mode, rtx op0, rtx target)
2531 enum mode_class class = GET_MODE_CLASS (mode);
2532 if (CLASS_HAS_WIDER_MODES_P (class))
2534 enum machine_mode wider_mode;
2535 for (wider_mode = mode; wider_mode != VOIDmode;
2536 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2538 if (popcount_optab->handlers[(int) wider_mode].insn_code
2539 != CODE_FOR_nothing)
2541 rtx xop0, temp, last;
2543 last = get_last_insn ();
2546 target = gen_reg_rtx (mode);
2547 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2548 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2551 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2552 target, true, OPTAB_DIRECT);
2554 delete_insns_since (last);
2563 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2564 conditions, VAL may already be a SUBREG against which we cannot generate
2565 a further SUBREG. In this case, we expect forcing the value into a
2566 register will work around the situation. */
2569 lowpart_subreg_maybe_copy (enum machine_mode omode, rtx val,
2570 enum machine_mode imode)
2573 ret = lowpart_subreg (omode, val, imode);
2576 val = force_reg (imode, val);
2577 ret = lowpart_subreg (omode, val, imode);
2578 gcc_assert (ret != NULL);
2583 /* Expand a floating point absolute value or negation operation via a
2584 logical operation on the sign bit. */
2587 expand_absneg_bit (enum rtx_code code, enum machine_mode mode,
2588 rtx op0, rtx target)
2590 const struct real_format *fmt;
2591 int bitpos, word, nwords, i;
2592 enum machine_mode imode;
2593 HOST_WIDE_INT hi, lo;
2596 /* The format has to have a simple sign bit. */
2597 fmt = REAL_MODE_FORMAT (mode);
2601 bitpos = fmt->signbit_rw;
2605 /* Don't create negative zeros if the format doesn't support them. */
2606 if (code == NEG && !fmt->has_signed_zero)
2609 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2611 imode = int_mode_for_mode (mode);
2612 if (imode == BLKmode)
2621 if (FLOAT_WORDS_BIG_ENDIAN)
2622 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2624 word = bitpos / BITS_PER_WORD;
2625 bitpos = bitpos % BITS_PER_WORD;
2626 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2629 if (bitpos < HOST_BITS_PER_WIDE_INT)
2632 lo = (HOST_WIDE_INT) 1 << bitpos;
2636 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2642 if (target == 0 || target == op0)
2643 target = gen_reg_rtx (mode);
2649 for (i = 0; i < nwords; ++i)
2651 rtx targ_piece = operand_subword (target, i, 1, mode);
2652 rtx op0_piece = operand_subword_force (op0, i, mode);
2656 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2658 immed_double_const (lo, hi, imode),
2659 targ_piece, 1, OPTAB_LIB_WIDEN);
2660 if (temp != targ_piece)
2661 emit_move_insn (targ_piece, temp);
2664 emit_move_insn (targ_piece, op0_piece);
2667 insns = get_insns ();
2670 temp = gen_rtx_fmt_e (code, mode, copy_rtx (op0));
2671 emit_no_conflict_block (insns, target, op0, NULL_RTX, temp);
2675 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2676 gen_lowpart (imode, op0),
2677 immed_double_const (lo, hi, imode),
2678 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2679 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2681 set_unique_reg_note (get_last_insn (), REG_EQUAL,
2682 gen_rtx_fmt_e (code, mode, copy_rtx (op0)));
2688 /* Generate code to perform an operation specified by UNOPTAB
2689 on operand OP0, with result having machine-mode MODE.
2691 UNSIGNEDP is for the case where we have to widen the operands
2692 to perform the operation. It says to use zero-extension.
2694 If TARGET is nonzero, the value
2695 is generated there, if it is convenient to do so.
2696 In all cases an rtx is returned for the locus of the value;
2697 this may or may not be TARGET. */
2700 expand_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
2703 enum mode_class class;
2704 enum machine_mode wider_mode;
2706 rtx last = get_last_insn ();
2709 class = GET_MODE_CLASS (mode);
2711 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2713 int icode = (int) unoptab->handlers[(int) mode].insn_code;
2714 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2720 temp = gen_reg_rtx (mode);
2722 if (GET_MODE (xop0) != VOIDmode
2723 && GET_MODE (xop0) != mode0)
2724 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2726 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2728 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2729 xop0 = copy_to_mode_reg (mode0, xop0);
2731 if (!insn_data[icode].operand[0].predicate (temp, mode))
2732 temp = gen_reg_rtx (mode);
2734 pat = GEN_FCN (icode) (temp, xop0);
2737 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
2738 && ! add_equal_note (pat, temp, unoptab->code, xop0, NULL_RTX))
2740 delete_insns_since (last);
2741 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
2749 delete_insns_since (last);
2752 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2754 /* Widening clz needs special treatment. */
2755 if (unoptab == clz_optab)
2757 temp = widen_clz (mode, op0, target);
2764 /* Widening (or narrowing) bswap needs special treatment. */
2765 if (unoptab == bswap_optab)
2767 temp = widen_bswap (mode, op0, target);
2771 if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
2772 && unoptab->handlers[word_mode].insn_code != CODE_FOR_nothing)
2774 temp = expand_doubleword_bswap (mode, op0, target);
2782 if (CLASS_HAS_WIDER_MODES_P (class))
2783 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2784 wider_mode != VOIDmode;
2785 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2787 if (unoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing)
2791 /* For certain operations, we need not actually extend
2792 the narrow operand, as long as we will truncate the
2793 results to the same narrowness. */
2795 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2796 (unoptab == neg_optab
2797 || unoptab == one_cmpl_optab)
2798 && class == MODE_INT);
2800 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2805 if (class != MODE_INT
2806 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
2807 GET_MODE_BITSIZE (wider_mode)))
2810 target = gen_reg_rtx (mode);
2811 convert_move (target, temp, 0);
2815 return gen_lowpart (mode, temp);
2818 delete_insns_since (last);
2822 /* These can be done a word at a time. */
2823 if (unoptab == one_cmpl_optab
2824 && class == MODE_INT
2825 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
2826 && unoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
2831 if (target == 0 || target == op0)
2832 target = gen_reg_rtx (mode);
2836 /* Do the actual arithmetic. */
2837 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
2839 rtx target_piece = operand_subword (target, i, 1, mode);
2840 rtx x = expand_unop (word_mode, unoptab,
2841 operand_subword_force (op0, i, mode),
2842 target_piece, unsignedp);
2844 if (target_piece != x)
2845 emit_move_insn (target_piece, x);
2848 insns = get_insns ();
2851 emit_no_conflict_block (insns, target, op0, NULL_RTX,
2852 gen_rtx_fmt_e (unoptab->code, mode,
2857 if (unoptab->code == NEG)
2859 /* Try negating floating point values by flipping the sign bit. */
2860 if (SCALAR_FLOAT_MODE_P (mode))
2862 temp = expand_absneg_bit (NEG, mode, op0, target);
2867 /* If there is no negation pattern, and we have no negative zero,
2868 try subtracting from zero. */
2869 if (!HONOR_SIGNED_ZEROS (mode))
2871 temp = expand_binop (mode, (unoptab == negv_optab
2872 ? subv_optab : sub_optab),
2873 CONST0_RTX (mode), op0, target,
2874 unsignedp, OPTAB_DIRECT);
2880 /* Try calculating parity (x) as popcount (x) % 2. */
2881 if (unoptab == parity_optab)
2883 temp = expand_parity (mode, op0, target);
2889 /* Now try a library call in this mode. */
2890 if (unoptab->handlers[(int) mode].libfunc)
2894 enum machine_mode outmode = mode;
2896 /* All of these functions return small values. Thus we choose to
2897 have them return something that isn't a double-word. */
2898 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
2899 || unoptab == popcount_optab || unoptab == parity_optab)
2901 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node)));
2905 /* Pass 1 for NO_QUEUE so we don't lose any increments
2906 if the libcall is cse'd or moved. */
2907 value = emit_library_call_value (unoptab->handlers[(int) mode].libfunc,
2908 NULL_RTX, LCT_CONST, outmode,
2910 insns = get_insns ();
2913 target = gen_reg_rtx (outmode);
2914 emit_libcall_block (insns, target, value,
2915 gen_rtx_fmt_e (unoptab->code, outmode, op0));
2920 /* It can't be done in this mode. Can we do it in a wider mode? */
2922 if (CLASS_HAS_WIDER_MODES_P (class))
2924 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2925 wider_mode != VOIDmode;
2926 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2928 if ((unoptab->handlers[(int) wider_mode].insn_code
2929 != CODE_FOR_nothing)
2930 || unoptab->handlers[(int) wider_mode].libfunc)
2934 /* For certain operations, we need not actually extend
2935 the narrow operand, as long as we will truncate the
2936 results to the same narrowness. */
2938 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2939 (unoptab == neg_optab
2940 || unoptab == one_cmpl_optab)
2941 && class == MODE_INT);
2943 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2946 /* If we are generating clz using wider mode, adjust the
2948 if (unoptab == clz_optab && temp != 0)
2949 temp = expand_binop (wider_mode, sub_optab, temp,
2950 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2951 - GET_MODE_BITSIZE (mode)),
2952 target, true, OPTAB_DIRECT);
2956 if (class != MODE_INT)
2959 target = gen_reg_rtx (mode);
2960 convert_move (target, temp, 0);
2964 return gen_lowpart (mode, temp);
2967 delete_insns_since (last);
2972 /* One final attempt at implementing negation via subtraction,
2973 this time allowing widening of the operand. */
2974 if (unoptab->code == NEG && !HONOR_SIGNED_ZEROS (mode))
2977 temp = expand_binop (mode,
2978 unoptab == negv_optab ? subv_optab : sub_optab,
2979 CONST0_RTX (mode), op0,
2980 target, unsignedp, OPTAB_LIB_WIDEN);
2988 /* Emit code to compute the absolute value of OP0, with result to
2989 TARGET if convenient. (TARGET may be 0.) The return value says
2990 where the result actually is to be found.
2992 MODE is the mode of the operand; the mode of the result is
2993 different but can be deduced from MODE.
2998 expand_abs_nojump (enum machine_mode mode, rtx op0, rtx target,
2999 int result_unsignedp)
3004 result_unsignedp = 1;
3006 /* First try to do it with a special abs instruction. */
3007 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
3012 /* For floating point modes, try clearing the sign bit. */
3013 if (SCALAR_FLOAT_MODE_P (mode))
3015 temp = expand_absneg_bit (ABS, mode, op0, target);
3020 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3021 if (smax_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
3022 && !HONOR_SIGNED_ZEROS (mode))
3024 rtx last = get_last_insn ();
3026 temp = expand_unop (mode, neg_optab, op0, NULL_RTX, 0);
3028 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3034 delete_insns_since (last);
3037 /* If this machine has expensive jumps, we can do integer absolute
3038 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3039 where W is the width of MODE. */
3041 if (GET_MODE_CLASS (mode) == MODE_INT && BRANCH_COST >= 2)
3043 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
3044 size_int (GET_MODE_BITSIZE (mode) - 1),
3047 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
3050 temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab,
3051 temp, extended, target, 0, OPTAB_LIB_WIDEN);
3061 expand_abs (enum machine_mode mode, rtx op0, rtx target,
3062 int result_unsignedp, int safe)
3067 result_unsignedp = 1;
3069 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
3073 /* If that does not win, use conditional jump and negate. */
3075 /* It is safe to use the target if it is the same
3076 as the source if this is also a pseudo register */
3077 if (op0 == target && REG_P (op0)
3078 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
3081 op1 = gen_label_rtx ();
3082 if (target == 0 || ! safe
3083 || GET_MODE (target) != mode
3084 || (MEM_P (target) && MEM_VOLATILE_P (target))
3086 && REGNO (target) < FIRST_PSEUDO_REGISTER))
3087 target = gen_reg_rtx (mode);
3089 emit_move_insn (target, op0);
3092 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
3093 NULL_RTX, NULL_RTX, op1);
3095 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3098 emit_move_insn (target, op0);
3104 /* A subroutine of expand_copysign, perform the copysign operation using the
3105 abs and neg primitives advertised to exist on the target. The assumption
3106 is that we have a split register file, and leaving op0 in fp registers,
3107 and not playing with subregs so much, will help the register allocator. */
3110 expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3111 int bitpos, bool op0_is_abs)
3113 enum machine_mode imode;
3120 /* Check if the back end provides an insn that handles signbit for the
3122 icode = (int) signbit_optab->handlers [(int) mode].insn_code;
3123 if (icode != CODE_FOR_nothing)
3125 imode = insn_data[icode].operand[0].mode;
3126 sign = gen_reg_rtx (imode);
3127 emit_unop_insn (icode, sign, op1, UNKNOWN);
3131 HOST_WIDE_INT hi, lo;
3133 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3135 imode = int_mode_for_mode (mode);
3136 if (imode == BLKmode)
3138 op1 = gen_lowpart (imode, op1);
3145 if (FLOAT_WORDS_BIG_ENDIAN)
3146 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3148 word = bitpos / BITS_PER_WORD;
3149 bitpos = bitpos % BITS_PER_WORD;
3150 op1 = operand_subword_force (op1, word, mode);
3153 if (bitpos < HOST_BITS_PER_WIDE_INT)
3156 lo = (HOST_WIDE_INT) 1 << bitpos;
3160 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
3164 sign = gen_reg_rtx (imode);
3165 sign = expand_binop (imode, and_optab, op1,
3166 immed_double_const (lo, hi, imode),
3167 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3172 op0 = expand_unop (mode, abs_optab, op0, target, 0);
3179 if (target == NULL_RTX)
3180 target = copy_to_reg (op0);
3182 emit_move_insn (target, op0);
3185 label = gen_label_rtx ();
3186 emit_cmp_and_jump_insns (sign, const0_rtx, EQ, NULL_RTX, imode, 1, label);
3188 if (GET_CODE (op0) == CONST_DOUBLE)
3189 op0 = simplify_unary_operation (NEG, mode, op0, mode);
3191 op0 = expand_unop (mode, neg_optab, op0, target, 0);
3193 emit_move_insn (target, op0);
3201 /* A subroutine of expand_copysign, perform the entire copysign operation
3202 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3203 is true if op0 is known to have its sign bit clear. */
3206 expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3207 int bitpos, bool op0_is_abs)
3209 enum machine_mode imode;
3210 HOST_WIDE_INT hi, lo;
3211 int word, nwords, i;
3214 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3216 imode = int_mode_for_mode (mode);
3217 if (imode == BLKmode)
3226 if (FLOAT_WORDS_BIG_ENDIAN)
3227 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3229 word = bitpos / BITS_PER_WORD;
3230 bitpos = bitpos % BITS_PER_WORD;
3231 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
3234 if (bitpos < HOST_BITS_PER_WIDE_INT)
3237 lo = (HOST_WIDE_INT) 1 << bitpos;
3241 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
3245 if (target == 0 || target == op0 || target == op1)
3246 target = gen_reg_rtx (mode);
3252 for (i = 0; i < nwords; ++i)
3254 rtx targ_piece = operand_subword (target, i, 1, mode);
3255 rtx op0_piece = operand_subword_force (op0, i, mode);
3260 op0_piece = expand_binop (imode, and_optab, op0_piece,
3261 immed_double_const (~lo, ~hi, imode),
3262 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3264 op1 = expand_binop (imode, and_optab,
3265 operand_subword_force (op1, i, mode),
3266 immed_double_const (lo, hi, imode),
3267 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3269 temp = expand_binop (imode, ior_optab, op0_piece, op1,
3270 targ_piece, 1, OPTAB_LIB_WIDEN);
3271 if (temp != targ_piece)
3272 emit_move_insn (targ_piece, temp);
3275 emit_move_insn (targ_piece, op0_piece);
3278 insns = get_insns ();
3281 emit_no_conflict_block (insns, target, op0, op1, NULL_RTX);
3285 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
3286 immed_double_const (lo, hi, imode),
3287 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3289 op0 = gen_lowpart (imode, op0);
3291 op0 = expand_binop (imode, and_optab, op0,
3292 immed_double_const (~lo, ~hi, imode),
3293 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3295 temp = expand_binop (imode, ior_optab, op0, op1,
3296 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3297 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3303 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3304 scalar floating point mode. Return NULL if we do not know how to
3305 expand the operation inline. */
3308 expand_copysign (rtx op0, rtx op1, rtx target)
3310 enum machine_mode mode = GET_MODE (op0);
3311 const struct real_format *fmt;
3315 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3316 gcc_assert (GET_MODE (op1) == mode);
3318 /* First try to do it with a special instruction. */
3319 temp = expand_binop (mode, copysign_optab, op0, op1,
3320 target, 0, OPTAB_DIRECT);
3324 fmt = REAL_MODE_FORMAT (mode);
3325 if (fmt == NULL || !fmt->has_signed_zero)
3329 if (GET_CODE (op0) == CONST_DOUBLE)
3331 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
3332 op0 = simplify_unary_operation (ABS, mode, op0, mode);
3336 if (fmt->signbit_ro >= 0
3337 && (GET_CODE (op0) == CONST_DOUBLE
3338 || (neg_optab->handlers[mode].insn_code != CODE_FOR_nothing
3339 && abs_optab->handlers[mode].insn_code != CODE_FOR_nothing)))
3341 temp = expand_copysign_absneg (mode, op0, op1, target,
3342 fmt->signbit_ro, op0_is_abs);
3347 if (fmt->signbit_rw < 0)
3349 return expand_copysign_bit (mode, op0, op1, target,
3350 fmt->signbit_rw, op0_is_abs);
3353 /* Generate an instruction whose insn-code is INSN_CODE,
3354 with two operands: an output TARGET and an input OP0.
3355 TARGET *must* be nonzero, and the output is always stored there.
3356 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3357 the value that is stored into TARGET. */
3360 emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code)
3363 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
3368 /* Now, if insn does not accept our operands, put them into pseudos. */
3370 if (!insn_data[icode].operand[1].predicate (op0, mode0))
3371 op0 = copy_to_mode_reg (mode0, op0);
3373 if (!insn_data[icode].operand[0].predicate (temp, GET_MODE (temp)))
3374 temp = gen_reg_rtx (GET_MODE (temp));
3376 pat = GEN_FCN (icode) (temp, op0);
3378 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && code != UNKNOWN)
3379 add_equal_note (pat, temp, code, op0, NULL_RTX);
3384 emit_move_insn (target, temp);
3387 struct no_conflict_data
3389 rtx target, first, insn;
3393 /* Called via note_stores by emit_no_conflict_block and emit_libcall_block.
3394 Set P->must_stay if the currently examined clobber / store has to stay
3395 in the list of insns that constitute the actual no_conflict block /
3398 no_conflict_move_test (rtx dest, rtx set, void *p0)
3400 struct no_conflict_data *p= p0;
3402 /* If this inns directly contributes to setting the target, it must stay. */
3403 if (reg_overlap_mentioned_p (p->target, dest))
3404 p->must_stay = true;
3405 /* If we haven't committed to keeping any other insns in the list yet,
3406 there is nothing more to check. */
3407 else if (p->insn == p->first)
3409 /* If this insn sets / clobbers a register that feeds one of the insns
3410 already in the list, this insn has to stay too. */
3411 else if (reg_overlap_mentioned_p (dest, PATTERN (p->first))
3412 || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest)))
3413 || reg_used_between_p (dest, p->first, p->insn)
3414 /* Likewise if this insn depends on a register set by a previous
3415 insn in the list, or if it sets a result (presumably a hard
3416 register) that is set or clobbered by a previous insn.
3417 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3418 SET_DEST perform the former check on the address, and the latter
3419 check on the MEM. */
3420 || (GET_CODE (set) == SET
3421 && (modified_in_p (SET_SRC (set), p->first)
3422 || modified_in_p (SET_DEST (set), p->first)
3423 || modified_between_p (SET_SRC (set), p->first, p->insn)
3424 || modified_between_p (SET_DEST (set), p->first, p->insn))))
3425 p->must_stay = true;
3428 /* Encapsulate the block starting at FIRST and ending with LAST, which is
3429 logically equivalent to EQUIV, so it gets manipulated as a unit if it
3430 is possible to do so. */
3433 maybe_encapsulate_block (rtx first, rtx last, rtx equiv)
3435 if (!flag_non_call_exceptions || !may_trap_p (equiv))
3437 /* We can't attach the REG_LIBCALL and REG_RETVAL notes when the
3438 encapsulated region would not be in one basic block, i.e. when
3439 there is a control_flow_insn_p insn between FIRST and LAST. */
3440 bool attach_libcall_retval_notes = true;
3441 rtx insn, next = NEXT_INSN (last);
3443 for (insn = first; insn != next; insn = NEXT_INSN (insn))
3444 if (control_flow_insn_p (insn))
3446 attach_libcall_retval_notes = false;
3450 if (attach_libcall_retval_notes)
3452 REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
3454 REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first,
3456 next = NEXT_INSN (last);
3457 for (insn = first; insn != next; insn = NEXT_INSN (insn))
3458 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_LIBCALL_ID,
3459 GEN_INT (libcall_id),
3466 /* Emit code to perform a series of operations on a multi-word quantity, one
3469 Such a block is preceded by a CLOBBER of the output, consists of multiple
3470 insns, each setting one word of the output, and followed by a SET copying
3471 the output to itself.
3473 Each of the insns setting words of the output receives a REG_NO_CONFLICT
3474 note indicating that it doesn't conflict with the (also multi-word)
3475 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
3478 INSNS is a block of code generated to perform the operation, not including
3479 the CLOBBER and final copy. All insns that compute intermediate values
3480 are first emitted, followed by the block as described above.
3482 TARGET, OP0, and OP1 are the output and inputs of the operations,
3483 respectively. OP1 may be zero for a unary operation.
3485 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
3488 If TARGET is not a register, INSNS is simply emitted with no special
3489 processing. Likewise if anything in INSNS is not an INSN or if
3490 there is a libcall block inside INSNS.
3492 The final insn emitted is returned. */
3495 emit_no_conflict_block (rtx insns, rtx target, rtx op0, rtx op1, rtx equiv)
3497 rtx prev, next, first, last, insn;
3499 if (!REG_P (target) || reload_in_progress)
3500 return emit_insn (insns);
3502 for (insn = insns; insn; insn = NEXT_INSN (insn))
3503 if (!NONJUMP_INSN_P (insn)
3504 || find_reg_note (insn, REG_LIBCALL, NULL_RTX))
3505 return emit_insn (insns);
3507 /* First emit all insns that do not store into words of the output and remove
3508 these from the list. */
3509 for (insn = insns; insn; insn = next)
3512 struct no_conflict_data data;
3514 next = NEXT_INSN (insn);
3516 /* Some ports (cris) create a libcall regions at their own. We must
3517 avoid any potential nesting of LIBCALLs. */
3518 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3519 remove_note (insn, note);
3520 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3521 remove_note (insn, note);
3522 if ((note = find_reg_note (insn, REG_LIBCALL_ID, NULL)) != NULL)
3523 remove_note (insn, note);
3525 data.target = target;
3529 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3530 if (! data.must_stay)
3532 if (PREV_INSN (insn))
3533 NEXT_INSN (PREV_INSN (insn)) = next;
3538 PREV_INSN (next) = PREV_INSN (insn);
3544 prev = get_last_insn ();
3546 /* Now write the CLOBBER of the output, followed by the setting of each
3547 of the words, followed by the final copy. */
3548 if (target != op0 && target != op1)
3549 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
3551 for (insn = insns; insn; insn = next)
3553 next = NEXT_INSN (insn);
3556 if (op1 && REG_P (op1))
3557 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op1,
3560 if (op0 && REG_P (op0))
3561 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op0,
3565 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3566 != CODE_FOR_nothing)
3568 last = emit_move_insn (target, target);
3570 set_unique_reg_note (last, REG_EQUAL, equiv);
3574 last = get_last_insn ();
3576 /* Remove any existing REG_EQUAL note from "last", or else it will
3577 be mistaken for a note referring to the full contents of the
3578 alleged libcall value when found together with the REG_RETVAL
3579 note added below. An existing note can come from an insn
3580 expansion at "last". */
3581 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3585 first = get_insns ();
3587 first = NEXT_INSN (prev);
3589 maybe_encapsulate_block (first, last, equiv);
3594 /* Emit code to make a call to a constant function or a library call.
3596 INSNS is a list containing all insns emitted in the call.
3597 These insns leave the result in RESULT. Our block is to copy RESULT
3598 to TARGET, which is logically equivalent to EQUIV.
3600 We first emit any insns that set a pseudo on the assumption that these are
3601 loading constants into registers; doing so allows them to be safely cse'ed
3602 between blocks. Then we emit all the other insns in the block, followed by
3603 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3604 note with an operand of EQUIV.
3606 Moving assignments to pseudos outside of the block is done to improve
3607 the generated code, but is not required to generate correct code,
3608 hence being unable to move an assignment is not grounds for not making
3609 a libcall block. There are two reasons why it is safe to leave these
3610 insns inside the block: First, we know that these pseudos cannot be
3611 used in generated RTL outside the block since they are created for
3612 temporary purposes within the block. Second, CSE will not record the
3613 values of anything set inside a libcall block, so we know they must
3614 be dead at the end of the block.
3616 Except for the first group of insns (the ones setting pseudos), the
3617 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3619 emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv)
3621 rtx final_dest = target;
3622 rtx prev, next, first, last, insn;
3624 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3625 into a MEM later. Protect the libcall block from this change. */
3626 if (! REG_P (target) || REG_USERVAR_P (target))
3627 target = gen_reg_rtx (GET_MODE (target));
3629 /* If we're using non-call exceptions, a libcall corresponding to an
3630 operation that may trap may also trap. */
3631 if (flag_non_call_exceptions && may_trap_p (equiv))
3633 for (insn = insns; insn; insn = NEXT_INSN (insn))
3636 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3638 if (note != 0 && INTVAL (XEXP (note, 0)) <= 0)
3639 remove_note (insn, note);
3643 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3644 reg note to indicate that this call cannot throw or execute a nonlocal
3645 goto (unless there is already a REG_EH_REGION note, in which case
3647 for (insn = insns; insn; insn = NEXT_INSN (insn))
3650 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3653 XEXP (note, 0) = constm1_rtx;
3655 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx,
3659 /* First emit all insns that set pseudos. Remove them from the list as
3660 we go. Avoid insns that set pseudos which were referenced in previous
3661 insns. These can be generated by move_by_pieces, for example,
3662 to update an address. Similarly, avoid insns that reference things
3663 set in previous insns. */
3665 for (insn = insns; insn; insn = next)
3667 rtx set = single_set (insn);
3670 /* Some ports (cris) create a libcall regions at their own. We must
3671 avoid any potential nesting of LIBCALLs. */
3672 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3673 remove_note (insn, note);
3674 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3675 remove_note (insn, note);
3676 if ((note = find_reg_note (insn, REG_LIBCALL_ID, NULL)) != NULL)
3677 remove_note (insn, note);
3679 next = NEXT_INSN (insn);
3681 if (set != 0 && REG_P (SET_DEST (set))
3682 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3684 struct no_conflict_data data;
3686 data.target = const0_rtx;
3690 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3691 if (! data.must_stay)
3693 if (PREV_INSN (insn))
3694 NEXT_INSN (PREV_INSN (insn)) = next;
3699 PREV_INSN (next) = PREV_INSN (insn);
3705 /* Some ports use a loop to copy large arguments onto the stack.
3706 Don't move anything outside such a loop. */
3711 prev = get_last_insn ();
3713 /* Write the remaining insns followed by the final copy. */
3715 for (insn = insns; insn; insn = next)
3717 next = NEXT_INSN (insn);
3722 last = emit_move_insn (target, result);
3723 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3724 != CODE_FOR_nothing)
3725 set_unique_reg_note (last, REG_EQUAL, copy_rtx (equiv));
3728 /* Remove any existing REG_EQUAL note from "last", or else it will
3729 be mistaken for a note referring to the full contents of the
3730 libcall value when found together with the REG_RETVAL note added
3731 below. An existing note can come from an insn expansion at
3733 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3736 if (final_dest != target)
3737 emit_move_insn (final_dest, target);
3740 first = get_insns ();
3742 first = NEXT_INSN (prev);
3744 maybe_encapsulate_block (first, last, equiv);
3747 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3748 PURPOSE describes how this comparison will be used. CODE is the rtx
3749 comparison code we will be using.
3751 ??? Actually, CODE is slightly weaker than that. A target is still
3752 required to implement all of the normal bcc operations, but not
3753 required to implement all (or any) of the unordered bcc operations. */
3756 can_compare_p (enum rtx_code code, enum machine_mode mode,
3757 enum can_compare_purpose purpose)
3761 if (cmp_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3763 if (purpose == ccp_jump)
3764 return bcc_gen_fctn[(int) code] != NULL;
3765 else if (purpose == ccp_store_flag)
3766 return setcc_gen_code[(int) code] != CODE_FOR_nothing;
3768 /* There's only one cmov entry point, and it's allowed to fail. */
3771 if (purpose == ccp_jump
3772 && cbranch_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3774 if (purpose == ccp_cmov
3775 && cmov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3777 if (purpose == ccp_store_flag
3778 && cstore_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3780 mode = GET_MODE_WIDER_MODE (mode);
3782 while (mode != VOIDmode);
3787 /* This function is called when we are going to emit a compare instruction that
3788 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3790 *PMODE is the mode of the inputs (in case they are const_int).
3791 *PUNSIGNEDP nonzero says that the operands are unsigned;
3792 this matters if they need to be widened.
3794 If they have mode BLKmode, then SIZE specifies the size of both operands.
3796 This function performs all the setup necessary so that the caller only has
3797 to emit a single comparison insn. This setup can involve doing a BLKmode
3798 comparison or emitting a library call to perform the comparison if no insn
3799 is available to handle it.
3800 The values which are passed in through pointers can be modified; the caller
3801 should perform the comparison on the modified values. Constant
3802 comparisons must have already been folded. */
3805 prepare_cmp_insn (rtx *px, rtx *py, enum rtx_code *pcomparison, rtx size,
3806 enum machine_mode *pmode, int *punsignedp,
3807 enum can_compare_purpose purpose)
3809 enum machine_mode mode = *pmode;
3810 rtx x = *px, y = *py;
3811 int unsignedp = *punsignedp;
3813 /* If we are inside an appropriately-short loop and we are optimizing,
3814 force expensive constants into a register. */
3815 if (CONSTANT_P (x) && optimize
3816 && rtx_cost (x, COMPARE) > COSTS_N_INSNS (1))
3817 x = force_reg (mode, x);
3819 if (CONSTANT_P (y) && optimize
3820 && rtx_cost (y, COMPARE) > COSTS_N_INSNS (1))
3821 y = force_reg (mode, y);
3824 /* Make sure if we have a canonical comparison. The RTL
3825 documentation states that canonical comparisons are required only
3826 for targets which have cc0. */
3827 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
3830 /* Don't let both operands fail to indicate the mode. */
3831 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
3832 x = force_reg (mode, x);
3834 /* Handle all BLKmode compares. */
3836 if (mode == BLKmode)
3838 enum machine_mode cmp_mode, result_mode;
3839 enum insn_code cmp_code;
3844 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
3848 /* Try to use a memory block compare insn - either cmpstr
3849 or cmpmem will do. */
3850 for (cmp_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
3851 cmp_mode != VOIDmode;
3852 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode))
3854 cmp_code = cmpmem_optab[cmp_mode];
3855 if (cmp_code == CODE_FOR_nothing)
3856 cmp_code = cmpstr_optab[cmp_mode];
3857 if (cmp_code == CODE_FOR_nothing)
3858 cmp_code = cmpstrn_optab[cmp_mode];
3859 if (cmp_code == CODE_FOR_nothing)
3862 /* Must make sure the size fits the insn's mode. */
3863 if ((GET_CODE (size) == CONST_INT
3864 && INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode)))
3865 || (GET_MODE_BITSIZE (GET_MODE (size))
3866 > GET_MODE_BITSIZE (cmp_mode)))
3869 result_mode = insn_data[cmp_code].operand[0].mode;
3870 result = gen_reg_rtx (result_mode);
3871 size = convert_to_mode (cmp_mode, size, 1);
3872 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
3876 *pmode = result_mode;
3880 /* Otherwise call a library function, memcmp. */
3881 libfunc = memcmp_libfunc;
3882 length_type = sizetype;
3883 result_mode = TYPE_MODE (integer_type_node);
3884 cmp_mode = TYPE_MODE (length_type);
3885 size = convert_to_mode (TYPE_MODE (length_type), size,
3886 TYPE_UNSIGNED (length_type));
3888 result = emit_library_call_value (libfunc, 0, LCT_PURE_MAKE_BLOCK,
3895 *pmode = result_mode;
3899 /* Don't allow operands to the compare to trap, as that can put the
3900 compare and branch in different basic blocks. */
3901 if (flag_non_call_exceptions)
3904 x = force_reg (mode, x);
3906 y = force_reg (mode, y);
3911 if (can_compare_p (*pcomparison, mode, purpose))
3914 /* Handle a lib call just for the mode we are using. */
3916 if (cmp_optab->handlers[(int) mode].libfunc && !SCALAR_FLOAT_MODE_P (mode))
3918 rtx libfunc = cmp_optab->handlers[(int) mode].libfunc;
3921 /* If we want unsigned, and this mode has a distinct unsigned
3922 comparison routine, use that. */
3923 if (unsignedp && ucmp_optab->handlers[(int) mode].libfunc)
3924 libfunc = ucmp_optab->handlers[(int) mode].libfunc;
3926 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST_MAKE_BLOCK,
3927 targetm.libgcc_cmp_return_mode (),
3928 2, x, mode, y, mode);
3930 /* There are two kinds of comparison routines. Biased routines
3931 return 0/1/2, and unbiased routines return -1/0/1. Other parts
3932 of gcc expect that the comparison operation is equivalent
3933 to the modified comparison. For signed comparisons compare the
3934 result against 1 in the biased case, and zero in the unbiased
3935 case. For unsigned comparisons always compare against 1 after
3936 biasing the unbiased result by adding 1. This gives us a way to
3942 if (!TARGET_LIB_INT_CMP_BIASED)
3945 *px = plus_constant (result, 1);
3952 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3953 prepare_float_lib_cmp (px, py, pcomparison, pmode, punsignedp);
3956 /* Before emitting an insn with code ICODE, make sure that X, which is going
3957 to be used for operand OPNUM of the insn, is converted from mode MODE to
3958 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3959 that it is accepted by the operand predicate. Return the new value. */
3962 prepare_operand (int icode, rtx x, int opnum, enum machine_mode mode,
3963 enum machine_mode wider_mode, int unsignedp)
3965 if (mode != wider_mode)
3966 x = convert_modes (wider_mode, mode, x, unsignedp);
3968 if (!insn_data[icode].operand[opnum].predicate
3969 (x, insn_data[icode].operand[opnum].mode))
3971 if (reload_completed)
3973 x = copy_to_mode_reg (insn_data[icode].operand[opnum].mode, x);
3979 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3980 we can do the comparison.
3981 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3982 be NULL_RTX which indicates that only a comparison is to be generated. */
3985 emit_cmp_and_jump_insn_1 (rtx x, rtx y, enum machine_mode mode,
3986 enum rtx_code comparison, int unsignedp, rtx label)
3988 rtx test = gen_rtx_fmt_ee (comparison, mode, x, y);
3989 enum mode_class class = GET_MODE_CLASS (mode);
3990 enum machine_mode wider_mode = mode;
3992 /* Try combined insns first. */
3995 enum insn_code icode;
3996 PUT_MODE (test, wider_mode);
4000 icode = cbranch_optab->handlers[(int) wider_mode].insn_code;
4002 if (icode != CODE_FOR_nothing
4003 && insn_data[icode].operand[0].predicate (test, wider_mode))
4005 x = prepare_operand (icode, x, 1, mode, wider_mode, unsignedp);
4006 y = prepare_operand (icode, y, 2, mode, wider_mode, unsignedp);
4007 emit_jump_insn (GEN_FCN (icode) (test, x, y, label));
4012 /* Handle some compares against zero. */
4013 icode = (int) tst_optab->handlers[(int) wider_mode].insn_code;
4014 if (y == CONST0_RTX (mode) && icode != CODE_FOR_nothing)
4016 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
4017 emit_insn (GEN_FCN (icode) (x));
4019 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
4023 /* Handle compares for which there is a directly suitable insn. */
4025 icode = (int) cmp_optab->handlers[(int) wider_mode].insn_code;
4026 if (icode != CODE_FOR_nothing)
4028 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
4029 y = prepare_operand (icode, y, 1, mode, wider_mode, unsignedp);
4030 emit_insn (GEN_FCN (icode) (x, y));
4032 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
4036 if (!CLASS_HAS_WIDER_MODES_P (class))
4039 wider_mode = GET_MODE_WIDER_MODE (wider_mode);
4041 while (wider_mode != VOIDmode);
4046 /* Generate code to compare X with Y so that the condition codes are
4047 set and to jump to LABEL if the condition is true. If X is a
4048 constant and Y is not a constant, then the comparison is swapped to
4049 ensure that the comparison RTL has the canonical form.
4051 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
4052 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
4053 the proper branch condition code.
4055 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4057 MODE is the mode of the inputs (in case they are const_int).
4059 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
4060 be passed unchanged to emit_cmp_insn, then potentially converted into an
4061 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
4064 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
4065 enum machine_mode mode, int unsignedp, rtx label)
4067 rtx op0 = x, op1 = y;
4069 /* Swap operands and condition to ensure canonical RTL. */
4070 if (swap_commutative_operands_p (x, y))
4072 /* If we're not emitting a branch, callers are required to pass
4073 operands in an order conforming to canonical RTL. We relax this
4074 for commutative comparsions so callers using EQ don't need to do
4075 swapping by hand. */
4076 gcc_assert (label || (comparison == swap_condition (comparison)));
4079 comparison = swap_condition (comparison);
4083 /* If OP0 is still a constant, then both X and Y must be constants.
4084 Force X into a register to create canonical RTL. */
4085 if (CONSTANT_P (op0))
4086 op0 = force_reg (mode, op0);
4090 comparison = unsigned_condition (comparison);
4092 prepare_cmp_insn (&op0, &op1, &comparison, size, &mode, &unsignedp,
4094 emit_cmp_and_jump_insn_1 (op0, op1, mode, comparison, unsignedp, label);
4097 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
4100 emit_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
4101 enum machine_mode mode, int unsignedp)
4103 emit_cmp_and_jump_insns (x, y, comparison, size, mode, unsignedp, 0);
4106 /* Emit a library call comparison between floating point X and Y.
4107 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4110 prepare_float_lib_cmp (rtx *px, rtx *py, enum rtx_code *pcomparison,
4111 enum machine_mode *pmode, int *punsignedp)
4113 enum rtx_code comparison = *pcomparison;
4114 enum rtx_code swapped = swap_condition (comparison);
4115 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
4118 enum machine_mode orig_mode = GET_MODE (x);
4119 enum machine_mode mode;
4120 rtx value, target, insns, equiv;
4122 bool reversed_p = false;
4124 for (mode = orig_mode;
4126 mode = GET_MODE_WIDER_MODE (mode))
4128 if ((libfunc = code_to_optab[comparison]->handlers[mode].libfunc))
4131 if ((libfunc = code_to_optab[swapped]->handlers[mode].libfunc))
4134 tmp = x; x = y; y = tmp;
4135 comparison = swapped;
4139 if ((libfunc = code_to_optab[reversed]->handlers[mode].libfunc)
4140 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, reversed))
4142 comparison = reversed;
4148 gcc_assert (mode != VOIDmode);
4150 if (mode != orig_mode)
4152 x = convert_to_mode (mode, x, 0);
4153 y = convert_to_mode (mode, y, 0);
4156 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4157 the RTL. The allows the RTL optimizers to delete the libcall if the
4158 condition can be determined at compile-time. */
4159 if (comparison == UNORDERED)
4161 rtx temp = simplify_gen_relational (NE, word_mode, mode, x, x);
4162 equiv = simplify_gen_relational (NE, word_mode, mode, y, y);
4163 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
4164 temp, const_true_rtx, equiv);
4168 equiv = simplify_gen_relational (comparison, word_mode, mode, x, y);
4169 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4171 rtx true_rtx, false_rtx;
4176 true_rtx = const0_rtx;
4177 false_rtx = const_true_rtx;
4181 true_rtx = const_true_rtx;
4182 false_rtx = const0_rtx;
4186 true_rtx = const1_rtx;
4187 false_rtx = const0_rtx;
4191 true_rtx = const0_rtx;
4192 false_rtx = constm1_rtx;
4196 true_rtx = constm1_rtx;
4197 false_rtx = const0_rtx;
4201 true_rtx = const0_rtx;
4202 false_rtx = const1_rtx;
4208 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
4209 equiv, true_rtx, false_rtx);
4214 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4215 word_mode, 2, x, mode, y, mode);
4216 insns = get_insns ();
4219 target = gen_reg_rtx (word_mode);
4220 emit_libcall_block (insns, target, value, equiv);
4222 if (comparison == UNORDERED
4223 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4224 comparison = reversed_p ? EQ : NE;
4229 *pcomparison = comparison;
4233 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4236 emit_indirect_jump (rtx loc)
4238 if (!insn_data[(int) CODE_FOR_indirect_jump].operand[0].predicate
4240 loc = copy_to_mode_reg (Pmode, loc);
4242 emit_jump_insn (gen_indirect_jump (loc));
4246 #ifdef HAVE_conditional_move
4248 /* Emit a conditional move instruction if the machine supports one for that
4249 condition and machine mode.
4251 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4252 the mode to use should they be constants. If it is VOIDmode, they cannot
4255 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4256 should be stored there. MODE is the mode to use should they be constants.
4257 If it is VOIDmode, they cannot both be constants.
4259 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4260 is not supported. */
4263 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
4264 enum machine_mode cmode, rtx op2, rtx op3,
4265 enum machine_mode mode, int unsignedp)
4267 rtx tem, subtarget, comparison, insn;
4268 enum insn_code icode;
4269 enum rtx_code reversed;
4271 /* If one operand is constant, make it the second one. Only do this
4272 if the other operand is not constant as well. */
4274 if (swap_commutative_operands_p (op0, op1))
4279 code = swap_condition (code);
4282 /* get_condition will prefer to generate LT and GT even if the old
4283 comparison was against zero, so undo that canonicalization here since
4284 comparisons against zero are cheaper. */
4285 if (code == LT && op1 == const1_rtx)
4286 code = LE, op1 = const0_rtx;
4287 else if (code == GT && op1 == constm1_rtx)
4288 code = GE, op1 = const0_rtx;
4290 if (cmode == VOIDmode)
4291 cmode = GET_MODE (op0);
4293 if (swap_commutative_operands_p (op2, op3)
4294 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4303 if (mode == VOIDmode)
4304 mode = GET_MODE (op2);
4306 icode = movcc_gen_code[mode];
4308 if (icode == CODE_FOR_nothing)
4312 target = gen_reg_rtx (mode);
4316 /* If the insn doesn't accept these operands, put them in pseudos. */
4318 if (!insn_data[icode].operand[0].predicate
4319 (subtarget, insn_data[icode].operand[0].mode))
4320 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4322 if (!insn_data[icode].operand[2].predicate
4323 (op2, insn_data[icode].operand[2].mode))
4324 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4326 if (!insn_data[icode].operand[3].predicate
4327 (op3, insn_data[icode].operand[3].mode))
4328 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4330 /* Everything should now be in the suitable form, so emit the compare insn
4331 and then the conditional move. */
4334 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4336 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4337 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4338 return NULL and let the caller figure out how best to deal with this
4340 if (GET_CODE (comparison) != code)
4343 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4345 /* If that failed, then give up. */
4351 if (subtarget != target)
4352 convert_move (target, subtarget, 0);
4357 /* Return nonzero if a conditional move of mode MODE is supported.
4359 This function is for combine so it can tell whether an insn that looks
4360 like a conditional move is actually supported by the hardware. If we
4361 guess wrong we lose a bit on optimization, but that's it. */
4362 /* ??? sparc64 supports conditionally moving integers values based on fp
4363 comparisons, and vice versa. How do we handle them? */
4366 can_conditionally_move_p (enum machine_mode mode)
4368 if (movcc_gen_code[mode] != CODE_FOR_nothing)
4374 #endif /* HAVE_conditional_move */
4376 /* Emit a conditional addition instruction if the machine supports one for that
4377 condition and machine mode.
4379 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4380 the mode to use should they be constants. If it is VOIDmode, they cannot
4383 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
4384 should be stored there. MODE is the mode to use should they be constants.
4385 If it is VOIDmode, they cannot both be constants.
4387 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4388 is not supported. */
4391 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
4392 enum machine_mode cmode, rtx op2, rtx op3,
4393 enum machine_mode mode, int unsignedp)
4395 rtx tem, subtarget, comparison, insn;
4396 enum insn_code icode;
4397 enum rtx_code reversed;
4399 /* If one operand is constant, make it the second one. Only do this
4400 if the other operand is not constant as well. */
4402 if (swap_commutative_operands_p (op0, op1))
4407 code = swap_condition (code);
4410 /* get_condition will prefer to generate LT and GT even if the old
4411 comparison was against zero, so undo that canonicalization here since
4412 comparisons against zero are cheaper. */
4413 if (code == LT && op1 == const1_rtx)
4414 code = LE, op1 = const0_rtx;
4415 else if (code == GT && op1 == constm1_rtx)
4416 code = GE, op1 = const0_rtx;
4418 if (cmode == VOIDmode)
4419 cmode = GET_MODE (op0);
4421 if (swap_commutative_operands_p (op2, op3)
4422 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4431 if (mode == VOIDmode)
4432 mode = GET_MODE (op2);
4434 icode = addcc_optab->handlers[(int) mode].insn_code;
4436 if (icode == CODE_FOR_nothing)
4440 target = gen_reg_rtx (mode);
4442 /* If the insn doesn't accept these operands, put them in pseudos. */
4444 if (!insn_data[icode].operand[0].predicate
4445 (target, insn_data[icode].operand[0].mode))
4446 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4450 if (!insn_data[icode].operand[2].predicate
4451 (op2, insn_data[icode].operand[2].mode))
4452 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4454 if (!insn_data[icode].operand[3].predicate
4455 (op3, insn_data[icode].operand[3].mode))
4456 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4458 /* Everything should now be in the suitable form, so emit the compare insn
4459 and then the conditional move. */
4462 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4464 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4465 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4466 return NULL and let the caller figure out how best to deal with this
4468 if (GET_CODE (comparison) != code)
4471 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4473 /* If that failed, then give up. */
4479 if (subtarget != target)
4480 convert_move (target, subtarget, 0);
4485 /* These functions attempt to generate an insn body, rather than
4486 emitting the insn, but if the gen function already emits them, we
4487 make no attempt to turn them back into naked patterns. */
4489 /* Generate and return an insn body to add Y to X. */
4492 gen_add2_insn (rtx x, rtx y)
4494 int icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4496 gcc_assert (insn_data[icode].operand[0].predicate
4497 (x, insn_data[icode].operand[0].mode));
4498 gcc_assert (insn_data[icode].operand[1].predicate
4499 (x, insn_data[icode].operand[1].mode));
4500 gcc_assert (insn_data[icode].operand[2].predicate
4501 (y, insn_data[icode].operand[2].mode));
4503 return GEN_FCN (icode) (x, x, y);
4506 /* Generate and return an insn body to add r1 and c,
4507 storing the result in r0. */
4509 gen_add3_insn (rtx r0, rtx r1, rtx c)
4511 int icode = (int) add_optab->handlers[(int) GET_MODE (r0)].insn_code;
4513 if (icode == CODE_FOR_nothing
4514 || !(insn_data[icode].operand[0].predicate
4515 (r0, insn_data[icode].operand[0].mode))
4516 || !(insn_data[icode].operand[1].predicate
4517 (r1, insn_data[icode].operand[1].mode))
4518 || !(insn_data[icode].operand[2].predicate
4519 (c, insn_data[icode].operand[2].mode)))
4522 return GEN_FCN (icode) (r0, r1, c);
4526 have_add2_insn (rtx x, rtx y)
4530 gcc_assert (GET_MODE (x) != VOIDmode);
4532 icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4534 if (icode == CODE_FOR_nothing)
4537 if (!(insn_data[icode].operand[0].predicate
4538 (x, insn_data[icode].operand[0].mode))
4539 || !(insn_data[icode].operand[1].predicate
4540 (x, insn_data[icode].operand[1].mode))
4541 || !(insn_data[icode].operand[2].predicate
4542 (y, insn_data[icode].operand[2].mode)))
4548 /* Generate and return an insn body to subtract Y from X. */
4551 gen_sub2_insn (rtx x, rtx y)
4553 int icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4555 gcc_assert (insn_data[icode].operand[0].predicate
4556 (x, insn_data[icode].operand[0].mode));
4557 gcc_assert (insn_data[icode].operand[1].predicate
4558 (x, insn_data[icode].operand[1].mode));
4559 gcc_assert (insn_data[icode].operand[2].predicate
4560 (y, insn_data[icode].operand[2].mode));
4562 return GEN_FCN (icode) (x, x, y);
4565 /* Generate and return an insn body to subtract r1 and c,
4566 storing the result in r0. */
4568 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4570 int icode = (int) sub_optab->handlers[(int) GET_MODE (r0)].insn_code;
4572 if (icode == CODE_FOR_nothing
4573 || !(insn_data[icode].operand[0].predicate
4574 (r0, insn_data[icode].operand[0].mode))
4575 || !(insn_data[icode].operand[1].predicate
4576 (r1, insn_data[icode].operand[1].mode))
4577 || !(insn_data[icode].operand[2].predicate
4578 (c, insn_data[icode].operand[2].mode)))
4581 return GEN_FCN (icode) (r0, r1, c);
4585 have_sub2_insn (rtx x, rtx y)
4589 gcc_assert (GET_MODE (x) != VOIDmode);
4591 icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4593 if (icode == CODE_FOR_nothing)
4596 if (!(insn_data[icode].operand[0].predicate
4597 (x, insn_data[icode].operand[0].mode))
4598 || !(insn_data[icode].operand[1].predicate
4599 (x, insn_data[icode].operand[1].mode))
4600 || !(insn_data[icode].operand[2].predicate
4601 (y, insn_data[icode].operand[2].mode)))
4607 /* Generate the body of an instruction to copy Y into X.
4608 It may be a list of insns, if one insn isn't enough. */
4611 gen_move_insn (rtx x, rtx y)
4616 emit_move_insn_1 (x, y);
4622 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4623 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4624 no such operation exists, CODE_FOR_nothing will be returned. */
4627 can_extend_p (enum machine_mode to_mode, enum machine_mode from_mode,
4631 #ifdef HAVE_ptr_extend
4633 return CODE_FOR_ptr_extend;
4636 tab = unsignedp ? zext_optab : sext_optab;
4637 return tab->handlers[to_mode][from_mode].insn_code;
4640 /* Generate the body of an insn to extend Y (with mode MFROM)
4641 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4644 gen_extend_insn (rtx x, rtx y, enum machine_mode mto,
4645 enum machine_mode mfrom, int unsignedp)
4647 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
4648 return GEN_FCN (icode) (x, y);
4651 /* can_fix_p and can_float_p say whether the target machine
4652 can directly convert a given fixed point type to
4653 a given floating point type, or vice versa.
4654 The returned value is the CODE_FOR_... value to use,
4655 or CODE_FOR_nothing if these modes cannot be directly converted.
4657 *TRUNCP_PTR is set to 1 if it is necessary to output
4658 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4660 static enum insn_code
4661 can_fix_p (enum machine_mode fixmode, enum machine_mode fltmode,
4662 int unsignedp, int *truncp_ptr)
4665 enum insn_code icode;
4667 tab = unsignedp ? ufixtrunc_optab : sfixtrunc_optab;
4668 icode = tab->handlers[fixmode][fltmode].insn_code;
4669 if (icode != CODE_FOR_nothing)
4675 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4676 for this to work. We need to rework the fix* and ftrunc* patterns
4677 and documentation. */
4678 tab = unsignedp ? ufix_optab : sfix_optab;
4679 icode = tab->handlers[fixmode][fltmode].insn_code;
4680 if (icode != CODE_FOR_nothing
4681 && ftrunc_optab->handlers[fltmode].insn_code != CODE_FOR_nothing)
4688 return CODE_FOR_nothing;
4691 static enum insn_code
4692 can_float_p (enum machine_mode fltmode, enum machine_mode fixmode,
4697 tab = unsignedp ? ufloat_optab : sfloat_optab;
4698 return tab->handlers[fltmode][fixmode].insn_code;
4701 /* Generate code to convert FROM to floating point
4702 and store in TO. FROM must be fixed point and not VOIDmode.
4703 UNSIGNEDP nonzero means regard FROM as unsigned.
4704 Normally this is done by correcting the final value
4705 if it is negative. */
4708 expand_float (rtx to, rtx from, int unsignedp)
4710 enum insn_code icode;
4712 enum machine_mode fmode, imode;
4713 bool can_do_signed = false;
4715 /* Crash now, because we won't be able to decide which mode to use. */
4716 gcc_assert (GET_MODE (from) != VOIDmode);
4718 /* Look for an insn to do the conversion. Do it in the specified
4719 modes if possible; otherwise convert either input, output or both to
4720 wider mode. If the integer mode is wider than the mode of FROM,
4721 we can do the conversion signed even if the input is unsigned. */
4723 for (fmode = GET_MODE (to); fmode != VOIDmode;
4724 fmode = GET_MODE_WIDER_MODE (fmode))
4725 for (imode = GET_MODE (from); imode != VOIDmode;
4726 imode = GET_MODE_WIDER_MODE (imode))
4728 int doing_unsigned = unsignedp;
4730 if (fmode != GET_MODE (to)
4731 && significand_size (fmode) < GET_MODE_BITSIZE (GET_MODE (from)))
4734 icode = can_float_p (fmode, imode, unsignedp);
4735 if (icode == CODE_FOR_nothing && unsignedp)
4737 enum insn_code scode = can_float_p (fmode, imode, 0);
4738 if (scode != CODE_FOR_nothing)
4739 can_do_signed = true;
4740 if (imode != GET_MODE (from))
4741 icode = scode, doing_unsigned = 0;
4744 if (icode != CODE_FOR_nothing)
4746 if (imode != GET_MODE (from))
4747 from = convert_to_mode (imode, from, unsignedp);
4749 if (fmode != GET_MODE (to))
4750 target = gen_reg_rtx (fmode);
4752 emit_unop_insn (icode, target, from,
4753 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
4756 convert_move (to, target, 0);
4761 /* Unsigned integer, and no way to convert directly. For binary
4762 floating point modes, convert as signed, then conditionally adjust
4764 if (unsignedp && can_do_signed && !DECIMAL_FLOAT_MODE_P (GET_MODE (to)))
4766 rtx label = gen_label_rtx ();
4768 REAL_VALUE_TYPE offset;
4770 /* Look for a usable floating mode FMODE wider than the source and at
4771 least as wide as the target. Using FMODE will avoid rounding woes
4772 with unsigned values greater than the signed maximum value. */
4774 for (fmode = GET_MODE (to); fmode != VOIDmode;
4775 fmode = GET_MODE_WIDER_MODE (fmode))
4776 if (GET_MODE_BITSIZE (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
4777 && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
4780 if (fmode == VOIDmode)
4782 /* There is no such mode. Pretend the target is wide enough. */
4783 fmode = GET_MODE (to);
4785 /* Avoid double-rounding when TO is narrower than FROM. */
4786 if ((significand_size (fmode) + 1)
4787 < GET_MODE_BITSIZE (GET_MODE (from)))
4790 rtx neglabel = gen_label_rtx ();
4792 /* Don't use TARGET if it isn't a register, is a hard register,
4793 or is the wrong mode. */
4795 || REGNO (target) < FIRST_PSEUDO_REGISTER
4796 || GET_MODE (target) != fmode)
4797 target = gen_reg_rtx (fmode);
4799 imode = GET_MODE (from);
4800 do_pending_stack_adjust ();
4802 /* Test whether the sign bit is set. */
4803 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
4806 /* The sign bit is not set. Convert as signed. */
4807 expand_float (target, from, 0);
4808 emit_jump_insn (gen_jump (label));
4811 /* The sign bit is set.
4812 Convert to a usable (positive signed) value by shifting right
4813 one bit, while remembering if a nonzero bit was shifted
4814 out; i.e., compute (from & 1) | (from >> 1). */
4816 emit_label (neglabel);
4817 temp = expand_binop (imode, and_optab, from, const1_rtx,
4818 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4819 temp1 = expand_shift (RSHIFT_EXPR, imode, from, integer_one_node,
4821 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
4823 expand_float (target, temp, 0);
4825 /* Multiply by 2 to undo the shift above. */
4826 temp = expand_binop (fmode, add_optab, target, target,
4827 target, 0, OPTAB_LIB_WIDEN);
4829 emit_move_insn (target, temp);
4831 do_pending_stack_adjust ();
4837 /* If we are about to do some arithmetic to correct for an
4838 unsigned operand, do it in a pseudo-register. */
4840 if (GET_MODE (to) != fmode
4841 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
4842 target = gen_reg_rtx (fmode);
4844 /* Convert as signed integer to floating. */
4845 expand_float (target, from, 0);
4847 /* If FROM is negative (and therefore TO is negative),
4848 correct its value by 2**bitwidth. */
4850 do_pending_stack_adjust ();
4851 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from),
4855 real_2expN (&offset, GET_MODE_BITSIZE (GET_MODE (from)));
4856 temp = expand_binop (fmode, add_optab, target,
4857 CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode),
4858 target, 0, OPTAB_LIB_WIDEN);
4860 emit_move_insn (target, temp);
4862 do_pending_stack_adjust ();
4867 /* No hardware instruction available; call a library routine. */
4872 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
4874 if (GET_MODE_SIZE (GET_MODE (from)) < GET_MODE_SIZE (SImode))
4875 from = convert_to_mode (SImode, from, unsignedp);
4877 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
4878 gcc_assert (libfunc);
4882 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4883 GET_MODE (to), 1, from,
4885 insns = get_insns ();
4888 emit_libcall_block (insns, target, value,
4889 gen_rtx_FLOAT (GET_MODE (to), from));
4894 /* Copy result to requested destination
4895 if we have been computing in a temp location. */
4899 if (GET_MODE (target) == GET_MODE (to))
4900 emit_move_insn (to, target);
4902 convert_move (to, target, 0);
4906 /* Generate code to convert FROM to fixed point and store in TO. FROM
4907 must be floating point. */
4910 expand_fix (rtx to, rtx from, int unsignedp)
4912 enum insn_code icode;
4914 enum machine_mode fmode, imode;
4917 /* We first try to find a pair of modes, one real and one integer, at
4918 least as wide as FROM and TO, respectively, in which we can open-code
4919 this conversion. If the integer mode is wider than the mode of TO,
4920 we can do the conversion either signed or unsigned. */
4922 for (fmode = GET_MODE (from); fmode != VOIDmode;
4923 fmode = GET_MODE_WIDER_MODE (fmode))
4924 for (imode = GET_MODE (to); imode != VOIDmode;
4925 imode = GET_MODE_WIDER_MODE (imode))
4927 int doing_unsigned = unsignedp;
4929 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
4930 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
4931 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
4933 if (icode != CODE_FOR_nothing)
4935 if (fmode != GET_MODE (from))
4936 from = convert_to_mode (fmode, from, 0);
4940 rtx temp = gen_reg_rtx (GET_MODE (from));
4941 from = expand_unop (GET_MODE (from), ftrunc_optab, from,
4945 if (imode != GET_MODE (to))
4946 target = gen_reg_rtx (imode);
4948 emit_unop_insn (icode, target, from,
4949 doing_unsigned ? UNSIGNED_FIX : FIX);
4951 convert_move (to, target, unsignedp);
4956 /* For an unsigned conversion, there is one more way to do it.
4957 If we have a signed conversion, we generate code that compares
4958 the real value to the largest representable positive number. If if
4959 is smaller, the conversion is done normally. Otherwise, subtract
4960 one plus the highest signed number, convert, and add it back.
4962 We only need to check all real modes, since we know we didn't find
4963 anything with a wider integer mode.
4965 This code used to extend FP value into mode wider than the destination.
4966 This is not needed. Consider, for instance conversion from SFmode
4969 The hot path through the code is dealing with inputs smaller than 2^63
4970 and doing just the conversion, so there is no bits to lose.
4972 In the other path we know the value is positive in the range 2^63..2^64-1
4973 inclusive. (as for other imput overflow happens and result is undefined)
4974 So we know that the most important bit set in mantissa corresponds to
4975 2^63. The subtraction of 2^63 should not generate any rounding as it
4976 simply clears out that bit. The rest is trivial. */
4978 if (unsignedp && GET_MODE_BITSIZE (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
4979 for (fmode = GET_MODE (from); fmode != VOIDmode;
4980 fmode = GET_MODE_WIDER_MODE (fmode))
4981 if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0,
4985 REAL_VALUE_TYPE offset;
4986 rtx limit, lab1, lab2, insn;
4988 bitsize = GET_MODE_BITSIZE (GET_MODE (to));
4989 real_2expN (&offset, bitsize - 1);
4990 limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode);
4991 lab1 = gen_label_rtx ();
4992 lab2 = gen_label_rtx ();
4994 if (fmode != GET_MODE (from))
4995 from = convert_to_mode (fmode, from, 0);
4997 /* See if we need to do the subtraction. */
4998 do_pending_stack_adjust ();
4999 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from),
5002 /* If not, do the signed "fix" and branch around fixup code. */
5003 expand_fix (to, from, 0);
5004 emit_jump_insn (gen_jump (lab2));
5007 /* Otherwise, subtract 2**(N-1), convert to signed number,
5008 then add 2**(N-1). Do the addition using XOR since this
5009 will often generate better code. */
5011 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
5012 NULL_RTX, 0, OPTAB_LIB_WIDEN);
5013 expand_fix (to, target, 0);
5014 target = expand_binop (GET_MODE (to), xor_optab, to,
5016 ((HOST_WIDE_INT) 1 << (bitsize - 1),
5018 to, 1, OPTAB_LIB_WIDEN);
5021 emit_move_insn (to, target);
5025 if (mov_optab->handlers[(int) GET_MODE (to)].insn_code
5026 != CODE_FOR_nothing)
5028 /* Make a place for a REG_NOTE and add it. */
5029 insn = emit_move_insn (to, to);
5030 set_unique_reg_note (insn,
5032 gen_rtx_fmt_e (UNSIGNED_FIX,
5040 /* We can't do it with an insn, so use a library call. But first ensure
5041 that the mode of TO is at least as wide as SImode, since those are the
5042 only library calls we know about. */
5044 if (GET_MODE_SIZE (GET_MODE (to)) < GET_MODE_SIZE (SImode))
5046 target = gen_reg_rtx (SImode);
5048 expand_fix (target, from, unsignedp);
5056 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
5057 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
5058 gcc_assert (libfunc);
5062 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
5063 GET_MODE (to), 1, from,
5065 insns = get_insns ();
5068 emit_libcall_block (insns, target, value,
5069 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
5070 GET_MODE (to), from));
5075 if (GET_MODE (to) == GET_MODE (target))
5076 emit_move_insn (to, target);
5078 convert_move (to, target, 0);
5082 /* Generate code to convert FROM to fixed point and store in TO. FROM
5083 must be floating point, TO must be signed. Use the conversion optab
5084 TAB to do the conversion. */
5087 expand_sfix_optab (rtx to, rtx from, convert_optab tab)
5089 enum insn_code icode;
5091 enum machine_mode fmode, imode;
5093 /* We first try to find a pair of modes, one real and one integer, at
5094 least as wide as FROM and TO, respectively, in which we can open-code
5095 this conversion. If the integer mode is wider than the mode of TO,
5096 we can do the conversion either signed or unsigned. */
5098 for (fmode = GET_MODE (from); fmode != VOIDmode;
5099 fmode = GET_MODE_WIDER_MODE (fmode))
5100 for (imode = GET_MODE (to); imode != VOIDmode;
5101 imode = GET_MODE_WIDER_MODE (imode))
5103 icode = tab->handlers[imode][fmode].insn_code;
5104 if (icode != CODE_FOR_nothing)
5106 if (fmode != GET_MODE (from))
5107 from = convert_to_mode (fmode, from, 0);
5109 if (imode != GET_MODE (to))
5110 target = gen_reg_rtx (imode);
5112 emit_unop_insn (icode, target, from, UNKNOWN);
5114 convert_move (to, target, 0);
5122 /* Report whether we have an instruction to perform the operation
5123 specified by CODE on operands of mode MODE. */
5125 have_insn_for (enum rtx_code code, enum machine_mode mode)
5127 return (code_to_optab[(int) code] != 0
5128 && (code_to_optab[(int) code]->handlers[(int) mode].insn_code
5129 != CODE_FOR_nothing));
5132 /* Create a blank optab. */
5137 optab op = ggc_alloc (sizeof (struct optab));
5138 for (i = 0; i < NUM_MACHINE_MODES; i++)
5140 op->handlers[i].insn_code = CODE_FOR_nothing;
5141 op->handlers[i].libfunc = 0;
5147 static convert_optab
5148 new_convert_optab (void)
5151 convert_optab op = ggc_alloc (sizeof (struct convert_optab));
5152 for (i = 0; i < NUM_MACHINE_MODES; i++)
5153 for (j = 0; j < NUM_MACHINE_MODES; j++)
5155 op->handlers[i][j].insn_code = CODE_FOR_nothing;
5156 op->handlers[i][j].libfunc = 0;
5161 /* Same, but fill in its code as CODE, and write it into the
5162 code_to_optab table. */
5164 init_optab (enum rtx_code code)
5166 optab op = new_optab ();
5168 code_to_optab[(int) code] = op;
5172 /* Same, but fill in its code as CODE, and do _not_ write it into
5173 the code_to_optab table. */
5175 init_optabv (enum rtx_code code)
5177 optab op = new_optab ();
5182 /* Conversion optabs never go in the code_to_optab table. */
5183 static inline convert_optab
5184 init_convert_optab (enum rtx_code code)
5186 convert_optab op = new_convert_optab ();
5191 /* Initialize the libfunc fields of an entire group of entries in some
5192 optab. Each entry is set equal to a string consisting of a leading
5193 pair of underscores followed by a generic operation name followed by
5194 a mode name (downshifted to lowercase) followed by a single character
5195 representing the number of operands for the given operation (which is
5196 usually one of the characters '2', '3', or '4').
5198 OPTABLE is the table in which libfunc fields are to be initialized.
5199 FIRST_MODE is the first machine mode index in the given optab to
5201 LAST_MODE is the last machine mode index in the given optab to
5203 OPNAME is the generic (string) name of the operation.
5204 SUFFIX is the character which specifies the number of operands for
5205 the given generic operation.
5209 init_libfuncs (optab optable, int first_mode, int last_mode,
5210 const char *opname, int suffix)
5213 unsigned opname_len = strlen (opname);
5215 for (mode = first_mode; (int) mode <= (int) last_mode;
5216 mode = (enum machine_mode) ((int) mode + 1))
5218 const char *mname = GET_MODE_NAME (mode);
5219 unsigned mname_len = strlen (mname);
5220 char *libfunc_name = alloca (2 + opname_len + mname_len + 1 + 1);
5227 for (q = opname; *q; )
5229 for (q = mname; *q; q++)
5230 *p++ = TOLOWER (*q);
5234 optable->handlers[(int) mode].libfunc
5235 = init_one_libfunc (ggc_alloc_string (libfunc_name, p - libfunc_name));
5239 /* Initialize the libfunc fields of an entire group of entries in some
5240 optab which correspond to all integer mode operations. The parameters
5241 have the same meaning as similarly named ones for the `init_libfuncs'
5242 routine. (See above). */
5245 init_integral_libfuncs (optab optable, const char *opname, int suffix)
5247 int maxsize = 2*BITS_PER_WORD;
5248 if (maxsize < LONG_LONG_TYPE_SIZE)
5249 maxsize = LONG_LONG_TYPE_SIZE;
5250 init_libfuncs (optable, word_mode,
5251 mode_for_size (maxsize, MODE_INT, 0),
5255 /* Initialize the libfunc fields of an entire group of entries in some
5256 optab which correspond to all real mode operations. The parameters
5257 have the same meaning as similarly named ones for the `init_libfuncs'
5258 routine. (See above). */
5261 init_floating_libfuncs (optab optable, const char *opname, int suffix)
5263 char *dec_opname = alloca (sizeof (DECIMAL_PREFIX) + strlen (opname));
5265 /* For BID support, change the name to have either a bid_ or dpd_ prefix
5266 depending on the low level floating format used. */
5267 memcpy (dec_opname, DECIMAL_PREFIX, sizeof (DECIMAL_PREFIX) - 1);
5268 strcpy (dec_opname + sizeof (DECIMAL_PREFIX) - 1, opname);
5270 init_libfuncs (optable, MIN_MODE_FLOAT, MAX_MODE_FLOAT, opname, suffix);
5271 init_libfuncs (optable, MIN_MODE_DECIMAL_FLOAT, MAX_MODE_DECIMAL_FLOAT,
5272 dec_opname, suffix);
5275 /* Initialize the libfunc fields of an entire group of entries of an
5276 inter-mode-class conversion optab. The string formation rules are
5277 similar to the ones for init_libfuncs, above, but instead of having
5278 a mode name and an operand count these functions have two mode names
5279 and no operand count. */
5281 init_interclass_conv_libfuncs (convert_optab tab, const char *opname,
5282 enum mode_class from_class,
5283 enum mode_class to_class)
5285 enum machine_mode first_from_mode = GET_CLASS_NARROWEST_MODE (from_class);
5286 enum machine_mode first_to_mode = GET_CLASS_NARROWEST_MODE (to_class);
5287 size_t opname_len = strlen (opname);
5288 size_t max_mname_len = 0;
5290 enum machine_mode fmode, tmode;
5291 const char *fname, *tname;
5293 char *libfunc_name, *suffix;
5294 char *nondec_name, *dec_name, *nondec_suffix, *dec_suffix;
5297 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5298 depends on which underlying decimal floating point format is used. */
5299 const size_t dec_len = sizeof (DECIMAL_PREFIX) - 1;
5301 for (fmode = first_from_mode;
5303 fmode = GET_MODE_WIDER_MODE (fmode))
5304 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (fmode)));
5306 for (tmode = first_to_mode;
5308 tmode = GET_MODE_WIDER_MODE (tmode))
5309 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (tmode)));
5311 nondec_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
5312 nondec_name[0] = '_';
5313 nondec_name[1] = '_';
5314 memcpy (&nondec_name[2], opname, opname_len);
5315 nondec_suffix = nondec_name + opname_len + 2;
5317 dec_name = alloca (2 + dec_len + opname_len + 2*max_mname_len + 1 + 1);
5320 memcpy (&dec_name[2], DECIMAL_PREFIX, dec_len);
5321 memcpy (&dec_name[2+dec_len], opname, opname_len);
5322 dec_suffix = dec_name + dec_len + opname_len + 2;
5324 for (fmode = first_from_mode; fmode != VOIDmode;
5325 fmode = GET_MODE_WIDER_MODE (fmode))
5326 for (tmode = first_to_mode; tmode != VOIDmode;
5327 tmode = GET_MODE_WIDER_MODE (tmode))
5329 fname = GET_MODE_NAME (fmode);
5330 tname = GET_MODE_NAME (tmode);
5332 if (DECIMAL_FLOAT_MODE_P(fmode) || DECIMAL_FLOAT_MODE_P(tmode))
5334 libfunc_name = dec_name;
5335 suffix = dec_suffix;
5339 libfunc_name = nondec_name;
5340 suffix = nondec_suffix;
5344 for (q = fname; *q; p++, q++)
5346 for (q = tname; *q; p++, q++)
5351 tab->handlers[tmode][fmode].libfunc
5352 = init_one_libfunc (ggc_alloc_string (libfunc_name,
5357 /* Initialize the libfunc fields of an entire group of entries of an
5358 intra-mode-class conversion optab. The string formation rules are
5359 similar to the ones for init_libfunc, above. WIDENING says whether
5360 the optab goes from narrow to wide modes or vice versa. These functions
5361 have two mode names _and_ an operand count. */
5363 init_intraclass_conv_libfuncs (convert_optab tab, const char *opname,
5364 enum mode_class class, bool widening)
5366 enum machine_mode first_mode = GET_CLASS_NARROWEST_MODE (class);
5367 size_t opname_len = strlen (opname);
5368 size_t max_mname_len = 0;
5370 enum machine_mode nmode, wmode;
5371 const char *nname, *wname;
5373 char *nondec_name, *dec_name, *nondec_suffix, *dec_suffix;
5374 char *libfunc_name, *suffix;
5377 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5378 depends on which underlying decimal floating point format is used. */
5379 const size_t dec_len = sizeof (DECIMAL_PREFIX) - 1;
5381 for (nmode = first_mode; nmode != VOIDmode;
5382 nmode = GET_MODE_WIDER_MODE (nmode))
5383 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (nmode)));
5385 nondec_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
5386 nondec_name[0] = '_';
5387 nondec_name[1] = '_';
5388 memcpy (&nondec_name[2], opname, opname_len);
5389 nondec_suffix = nondec_name + opname_len + 2;
5391 dec_name = alloca (2 + dec_len + opname_len + 2*max_mname_len + 1 + 1);
5394 memcpy (&dec_name[2], DECIMAL_PREFIX, dec_len);
5395 memcpy (&dec_name[2 + dec_len], opname, opname_len);
5396 dec_suffix = dec_name + dec_len + opname_len + 2;
5398 for (nmode = first_mode; nmode != VOIDmode;
5399 nmode = GET_MODE_WIDER_MODE (nmode))
5400 for (wmode = GET_MODE_WIDER_MODE (nmode); wmode != VOIDmode;
5401 wmode = GET_MODE_WIDER_MODE (wmode))
5403 nname = GET_MODE_NAME (nmode);
5404 wname = GET_MODE_NAME (wmode);
5406 if (DECIMAL_FLOAT_MODE_P(nmode) || DECIMAL_FLOAT_MODE_P(wmode))
5408 libfunc_name = dec_name;
5409 suffix = dec_suffix;
5413 libfunc_name = nondec_name;
5414 suffix = nondec_suffix;
5418 for (q = widening ? nname : wname; *q; p++, q++)
5420 for (q = widening ? wname : nname; *q; p++, q++)
5426 tab->handlers[widening ? wmode : nmode]
5427 [widening ? nmode : wmode].libfunc
5428 = init_one_libfunc (ggc_alloc_string (libfunc_name,
5435 init_one_libfunc (const char *name)
5439 /* Create a FUNCTION_DECL that can be passed to
5440 targetm.encode_section_info. */
5441 /* ??? We don't have any type information except for this is
5442 a function. Pretend this is "int foo()". */
5443 tree decl = build_decl (FUNCTION_DECL, get_identifier (name),
5444 build_function_type (integer_type_node, NULL_TREE));
5445 DECL_ARTIFICIAL (decl) = 1;
5446 DECL_EXTERNAL (decl) = 1;
5447 TREE_PUBLIC (decl) = 1;
5449 symbol = XEXP (DECL_RTL (decl), 0);
5451 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
5452 are the flags assigned by targetm.encode_section_info. */
5453 SET_SYMBOL_REF_DECL (symbol, 0);
5458 /* Call this to reset the function entry for one optab (OPTABLE) in mode
5459 MODE to NAME, which should be either 0 or a string constant. */
5461 set_optab_libfunc (optab optable, enum machine_mode mode, const char *name)
5464 optable->handlers[mode].libfunc = init_one_libfunc (name);
5466 optable->handlers[mode].libfunc = 0;
5469 /* Call this to reset the function entry for one conversion optab
5470 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
5471 either 0 or a string constant. */
5473 set_conv_libfunc (convert_optab optable, enum machine_mode tmode,
5474 enum machine_mode fmode, const char *name)
5477 optable->handlers[tmode][fmode].libfunc = init_one_libfunc (name);
5479 optable->handlers[tmode][fmode].libfunc = 0;
5482 /* Call this once to initialize the contents of the optabs
5483 appropriately for the current target machine. */
5489 enum machine_mode int_mode;
5491 /* Start by initializing all tables to contain CODE_FOR_nothing. */
5493 for (i = 0; i < NUM_RTX_CODE; i++)
5494 setcc_gen_code[i] = CODE_FOR_nothing;
5496 #ifdef HAVE_conditional_move
5497 for (i = 0; i < NUM_MACHINE_MODES; i++)
5498 movcc_gen_code[i] = CODE_FOR_nothing;
5501 for (i = 0; i < NUM_MACHINE_MODES; i++)
5503 vcond_gen_code[i] = CODE_FOR_nothing;
5504 vcondu_gen_code[i] = CODE_FOR_nothing;
5507 add_optab = init_optab (PLUS);
5508 addv_optab = init_optabv (PLUS);
5509 sub_optab = init_optab (MINUS);
5510 subv_optab = init_optabv (MINUS);
5511 smul_optab = init_optab (MULT);
5512 smulv_optab = init_optabv (MULT);
5513 smul_highpart_optab = init_optab (UNKNOWN);
5514 umul_highpart_optab = init_optab (UNKNOWN);
5515 smul_widen_optab = init_optab (UNKNOWN);
5516 umul_widen_optab = init_optab (UNKNOWN);
5517 usmul_widen_optab = init_optab (UNKNOWN);
5518 smadd_widen_optab = init_optab (UNKNOWN);
5519 umadd_widen_optab = init_optab (UNKNOWN);
5520 smsub_widen_optab = init_optab (UNKNOWN);
5521 umsub_widen_optab = init_optab (UNKNOWN);
5522 sdiv_optab = init_optab (DIV);
5523 sdivv_optab = init_optabv (DIV);
5524 sdivmod_optab = init_optab (UNKNOWN);
5525 udiv_optab = init_optab (UDIV);
5526 udivmod_optab = init_optab (UNKNOWN);
5527 smod_optab = init_optab (MOD);
5528 umod_optab = init_optab (UMOD);
5529 fmod_optab = init_optab (UNKNOWN);
5530 remainder_optab = init_optab (UNKNOWN);
5531 ftrunc_optab = init_optab (UNKNOWN);
5532 and_optab = init_optab (AND);
5533 ior_optab = init_optab (IOR);
5534 xor_optab = init_optab (XOR);
5535 ashl_optab = init_optab (ASHIFT);
5536 ashr_optab = init_optab (ASHIFTRT);
5537 lshr_optab = init_optab (LSHIFTRT);
5538 rotl_optab = init_optab (ROTATE);
5539 rotr_optab = init_optab (ROTATERT);
5540 smin_optab = init_optab (SMIN);
5541 smax_optab = init_optab (SMAX);
5542 umin_optab = init_optab (UMIN);
5543 umax_optab = init_optab (UMAX);
5544 pow_optab = init_optab (UNKNOWN);
5545 atan2_optab = init_optab (UNKNOWN);
5547 /* These three have codes assigned exclusively for the sake of
5549 mov_optab = init_optab (SET);
5550 movstrict_optab = init_optab (STRICT_LOW_PART);
5551 cmp_optab = init_optab (COMPARE);
5553 storent_optab = init_optab (UNKNOWN);
5555 ucmp_optab = init_optab (UNKNOWN);
5556 tst_optab = init_optab (UNKNOWN);
5558 eq_optab = init_optab (EQ);
5559 ne_optab = init_optab (NE);
5560 gt_optab = init_optab (GT);
5561 ge_optab = init_optab (GE);
5562 lt_optab = init_optab (LT);
5563 le_optab = init_optab (LE);
5564 unord_optab = init_optab (UNORDERED);
5566 neg_optab = init_optab (NEG);
5567 negv_optab = init_optabv (NEG);
5568 abs_optab = init_optab (ABS);
5569 absv_optab = init_optabv (ABS);
5570 addcc_optab = init_optab (UNKNOWN);
5571 one_cmpl_optab = init_optab (NOT);
5572 bswap_optab = init_optab (BSWAP);
5573 ffs_optab = init_optab (FFS);
5574 clz_optab = init_optab (CLZ);
5575 ctz_optab = init_optab (CTZ);
5576 popcount_optab = init_optab (POPCOUNT);
5577 parity_optab = init_optab (PARITY);
5578 sqrt_optab = init_optab (SQRT);
5579 floor_optab = init_optab (UNKNOWN);
5580 ceil_optab = init_optab (UNKNOWN);
5581 round_optab = init_optab (UNKNOWN);
5582 btrunc_optab = init_optab (UNKNOWN);
5583 nearbyint_optab = init_optab (UNKNOWN);
5584 rint_optab = init_optab (UNKNOWN);
5585 sincos_optab = init_optab (UNKNOWN);
5586 sin_optab = init_optab (UNKNOWN);
5587 asin_optab = init_optab (UNKNOWN);
5588 cos_optab = init_optab (UNKNOWN);
5589 acos_optab = init_optab (UNKNOWN);
5590 exp_optab = init_optab (UNKNOWN);
5591 exp10_optab = init_optab (UNKNOWN);
5592 exp2_optab = init_optab (UNKNOWN);
5593 expm1_optab = init_optab (UNKNOWN);
5594 ldexp_optab = init_optab (UNKNOWN);
5595 scalb_optab = init_optab (UNKNOWN);
5596 logb_optab = init_optab (UNKNOWN);
5597 ilogb_optab = init_optab (UNKNOWN);
5598 log_optab = init_optab (UNKNOWN);
5599 log10_optab = init_optab (UNKNOWN);
5600 log2_optab = init_optab (UNKNOWN);
5601 log1p_optab = init_optab (UNKNOWN);
5602 tan_optab = init_optab (UNKNOWN);
5603 atan_optab = init_optab (UNKNOWN);
5604 copysign_optab = init_optab (UNKNOWN);
5605 signbit_optab = init_optab (UNKNOWN);
5607 isinf_optab = init_optab (UNKNOWN);
5609 strlen_optab = init_optab (UNKNOWN);
5610 cbranch_optab = init_optab (UNKNOWN);
5611 cmov_optab = init_optab (UNKNOWN);
5612 cstore_optab = init_optab (UNKNOWN);
5613 push_optab = init_optab (UNKNOWN);
5615 reduc_smax_optab = init_optab (UNKNOWN);
5616 reduc_umax_optab = init_optab (UNKNOWN);
5617 reduc_smin_optab = init_optab (UNKNOWN);
5618 reduc_umin_optab = init_optab (UNKNOWN);
5619 reduc_splus_optab = init_optab (UNKNOWN);
5620 reduc_uplus_optab = init_optab (UNKNOWN);
5622 ssum_widen_optab = init_optab (UNKNOWN);
5623 usum_widen_optab = init_optab (UNKNOWN);
5624 sdot_prod_optab = init_optab (UNKNOWN);
5625 udot_prod_optab = init_optab (UNKNOWN);
5627 vec_extract_optab = init_optab (UNKNOWN);
5628 vec_extract_even_optab = init_optab (UNKNOWN);
5629 vec_extract_odd_optab = init_optab (UNKNOWN);
5630 vec_interleave_high_optab = init_optab (UNKNOWN);
5631 vec_interleave_low_optab = init_optab (UNKNOWN);
5632 vec_set_optab = init_optab (UNKNOWN);
5633 vec_init_optab = init_optab (UNKNOWN);
5634 vec_shl_optab = init_optab (UNKNOWN);
5635 vec_shr_optab = init_optab (UNKNOWN);
5636 vec_realign_load_optab = init_optab (UNKNOWN);
5637 movmisalign_optab = init_optab (UNKNOWN);
5638 vec_widen_umult_hi_optab = init_optab (UNKNOWN);
5639 vec_widen_umult_lo_optab = init_optab (UNKNOWN);
5640 vec_widen_smult_hi_optab = init_optab (UNKNOWN);
5641 vec_widen_smult_lo_optab = init_optab (UNKNOWN);
5642 vec_unpacks_hi_optab = init_optab (UNKNOWN);
5643 vec_unpacks_lo_optab = init_optab (UNKNOWN);
5644 vec_unpacku_hi_optab = init_optab (UNKNOWN);
5645 vec_unpacku_lo_optab = init_optab (UNKNOWN);
5646 vec_unpacks_float_hi_optab = init_optab (UNKNOWN);
5647 vec_unpacks_float_lo_optab = init_optab (UNKNOWN);
5648 vec_unpacku_float_hi_optab = init_optab (UNKNOWN);
5649 vec_unpacku_float_lo_optab = init_optab (UNKNOWN);
5650 vec_pack_trunc_optab = init_optab (UNKNOWN);
5651 vec_pack_usat_optab = init_optab (UNKNOWN);
5652 vec_pack_ssat_optab = init_optab (UNKNOWN);
5653 vec_pack_ufix_trunc_optab = init_optab (UNKNOWN);
5654 vec_pack_sfix_trunc_optab = init_optab (UNKNOWN);
5656 powi_optab = init_optab (UNKNOWN);
5659 sext_optab = init_convert_optab (SIGN_EXTEND);
5660 zext_optab = init_convert_optab (ZERO_EXTEND);
5661 trunc_optab = init_convert_optab (TRUNCATE);
5662 sfix_optab = init_convert_optab (FIX);
5663 ufix_optab = init_convert_optab (UNSIGNED_FIX);
5664 sfixtrunc_optab = init_convert_optab (UNKNOWN);
5665 ufixtrunc_optab = init_convert_optab (UNKNOWN);
5666 sfloat_optab = init_convert_optab (FLOAT);
5667 ufloat_optab = init_convert_optab (UNSIGNED_FLOAT);
5668 lrint_optab = init_convert_optab (UNKNOWN);
5669 lround_optab = init_convert_optab (UNKNOWN);
5670 lfloor_optab = init_convert_optab (UNKNOWN);
5671 lceil_optab = init_convert_optab (UNKNOWN);
5673 for (i = 0; i < NUM_MACHINE_MODES; i++)
5675 movmem_optab[i] = CODE_FOR_nothing;
5676 cmpstr_optab[i] = CODE_FOR_nothing;
5677 cmpstrn_optab[i] = CODE_FOR_nothing;
5678 cmpmem_optab[i] = CODE_FOR_nothing;
5679 setmem_optab[i] = CODE_FOR_nothing;
5681 sync_add_optab[i] = CODE_FOR_nothing;
5682 sync_sub_optab[i] = CODE_FOR_nothing;
5683 sync_ior_optab[i] = CODE_FOR_nothing;
5684 sync_and_optab[i] = CODE_FOR_nothing;
5685 sync_xor_optab[i] = CODE_FOR_nothing;
5686 sync_nand_optab[i] = CODE_FOR_nothing;
5687 sync_old_add_optab[i] = CODE_FOR_nothing;
5688 sync_old_sub_optab[i] = CODE_FOR_nothing;
5689 sync_old_ior_optab[i] = CODE_FOR_nothing;
5690 sync_old_and_optab[i] = CODE_FOR_nothing;
5691 sync_old_xor_optab[i] = CODE_FOR_nothing;
5692 sync_old_nand_optab[i] = CODE_FOR_nothing;
5693 sync_new_add_optab[i] = CODE_FOR_nothing;
5694 sync_new_sub_optab[i] = CODE_FOR_nothing;
5695 sync_new_ior_optab[i] = CODE_FOR_nothing;
5696 sync_new_and_optab[i] = CODE_FOR_nothing;
5697 sync_new_xor_optab[i] = CODE_FOR_nothing;
5698 sync_new_nand_optab[i] = CODE_FOR_nothing;
5699 sync_compare_and_swap[i] = CODE_FOR_nothing;
5700 sync_compare_and_swap_cc[i] = CODE_FOR_nothing;
5701 sync_lock_test_and_set[i] = CODE_FOR_nothing;
5702 sync_lock_release[i] = CODE_FOR_nothing;
5704 reload_in_optab[i] = reload_out_optab[i] = CODE_FOR_nothing;
5707 /* Fill in the optabs with the insns we support. */
5710 /* The ffs function operates on `int'. Fall back on it if we do not
5711 have a libgcc2 function for that width. */
5712 int_mode = mode_for_size (INT_TYPE_SIZE, MODE_INT, 0);
5713 ffs_optab->handlers[(int) int_mode].libfunc = init_one_libfunc ("ffs");
5715 /* Initialize the optabs with the names of the library functions. */
5716 init_integral_libfuncs (add_optab, "add", '3');
5717 init_floating_libfuncs (add_optab, "add", '3');
5718 init_integral_libfuncs (addv_optab, "addv", '3');
5719 init_floating_libfuncs (addv_optab, "add", '3');
5720 init_integral_libfuncs (sub_optab, "sub", '3');
5721 init_floating_libfuncs (sub_optab, "sub", '3');
5722 init_integral_libfuncs (subv_optab, "subv", '3');
5723 init_floating_libfuncs (subv_optab, "sub", '3');
5724 init_integral_libfuncs (smul_optab, "mul", '3');
5725 init_floating_libfuncs (smul_optab, "mul", '3');
5726 init_integral_libfuncs (smulv_optab, "mulv", '3');
5727 init_floating_libfuncs (smulv_optab, "mul", '3');
5728 init_integral_libfuncs (sdiv_optab, "div", '3');
5729 init_floating_libfuncs (sdiv_optab, "div", '3');
5730 init_integral_libfuncs (sdivv_optab, "divv", '3');
5731 init_integral_libfuncs (udiv_optab, "udiv", '3');
5732 init_integral_libfuncs (sdivmod_optab, "divmod", '4');
5733 init_integral_libfuncs (udivmod_optab, "udivmod", '4');
5734 init_integral_libfuncs (smod_optab, "mod", '3');
5735 init_integral_libfuncs (umod_optab, "umod", '3');
5736 init_floating_libfuncs (ftrunc_optab, "ftrunc", '2');
5737 init_integral_libfuncs (and_optab, "and", '3');
5738 init_integral_libfuncs (ior_optab, "ior", '3');
5739 init_integral_libfuncs (xor_optab, "xor", '3');
5740 init_integral_libfuncs (ashl_optab, "ashl", '3');
5741 init_integral_libfuncs (ashr_optab, "ashr", '3');
5742 init_integral_libfuncs (lshr_optab, "lshr", '3');
5743 init_integral_libfuncs (smin_optab, "min", '3');
5744 init_floating_libfuncs (smin_optab, "min", '3');
5745 init_integral_libfuncs (smax_optab, "max", '3');
5746 init_floating_libfuncs (smax_optab, "max", '3');
5747 init_integral_libfuncs (umin_optab, "umin", '3');
5748 init_integral_libfuncs (umax_optab, "umax", '3');
5749 init_integral_libfuncs (neg_optab, "neg", '2');
5750 init_floating_libfuncs (neg_optab, "neg", '2');
5751 init_integral_libfuncs (negv_optab, "negv", '2');
5752 init_floating_libfuncs (negv_optab, "neg", '2');
5753 init_integral_libfuncs (one_cmpl_optab, "one_cmpl", '2');
5754 init_integral_libfuncs (ffs_optab, "ffs", '2');
5755 init_integral_libfuncs (clz_optab, "clz", '2');
5756 init_integral_libfuncs (ctz_optab, "ctz", '2');
5757 init_integral_libfuncs (popcount_optab, "popcount", '2');
5758 init_integral_libfuncs (parity_optab, "parity", '2');
5760 /* Comparison libcalls for integers MUST come in pairs,
5762 init_integral_libfuncs (cmp_optab, "cmp", '2');
5763 init_integral_libfuncs (ucmp_optab, "ucmp", '2');
5764 init_floating_libfuncs (cmp_optab, "cmp", '2');
5766 /* EQ etc are floating point only. */
5767 init_floating_libfuncs (eq_optab, "eq", '2');
5768 init_floating_libfuncs (ne_optab, "ne", '2');
5769 init_floating_libfuncs (gt_optab, "gt", '2');
5770 init_floating_libfuncs (ge_optab, "ge", '2');
5771 init_floating_libfuncs (lt_optab, "lt", '2');
5772 init_floating_libfuncs (le_optab, "le", '2');
5773 init_floating_libfuncs (unord_optab, "unord", '2');
5775 init_floating_libfuncs (powi_optab, "powi", '2');
5778 init_interclass_conv_libfuncs (sfloat_optab, "float",
5779 MODE_INT, MODE_FLOAT);
5780 init_interclass_conv_libfuncs (sfloat_optab, "float",
5781 MODE_INT, MODE_DECIMAL_FLOAT);
5782 init_interclass_conv_libfuncs (ufloat_optab, "floatun",
5783 MODE_INT, MODE_FLOAT);
5784 init_interclass_conv_libfuncs (ufloat_optab, "floatun",
5785 MODE_INT, MODE_DECIMAL_FLOAT);
5786 init_interclass_conv_libfuncs (sfix_optab, "fix",
5787 MODE_FLOAT, MODE_INT);
5788 init_interclass_conv_libfuncs (sfix_optab, "fix",
5789 MODE_DECIMAL_FLOAT, MODE_INT);
5790 init_interclass_conv_libfuncs (ufix_optab, "fixuns",
5791 MODE_FLOAT, MODE_INT);
5792 init_interclass_conv_libfuncs (ufix_optab, "fixuns",
5793 MODE_DECIMAL_FLOAT, MODE_INT);
5794 init_interclass_conv_libfuncs (ufloat_optab, "floatuns",
5795 MODE_INT, MODE_DECIMAL_FLOAT);
5796 init_interclass_conv_libfuncs (lrint_optab, "lrint",
5797 MODE_INT, MODE_FLOAT);
5798 init_interclass_conv_libfuncs (lround_optab, "lround",
5799 MODE_INT, MODE_FLOAT);
5800 init_interclass_conv_libfuncs (lfloor_optab, "lfloor",
5801 MODE_INT, MODE_FLOAT);
5802 init_interclass_conv_libfuncs (lceil_optab, "lceil",
5803 MODE_INT, MODE_FLOAT);
5805 /* sext_optab is also used for FLOAT_EXTEND. */
5806 init_intraclass_conv_libfuncs (sext_optab, "extend", MODE_FLOAT, true);
5807 init_intraclass_conv_libfuncs (sext_optab, "extend", MODE_DECIMAL_FLOAT, true);
5808 init_interclass_conv_libfuncs (sext_optab, "extend", MODE_FLOAT, MODE_DECIMAL_FLOAT);
5809 init_interclass_conv_libfuncs (sext_optab, "extend", MODE_DECIMAL_FLOAT, MODE_FLOAT);
5810 init_intraclass_conv_libfuncs (trunc_optab, "trunc", MODE_FLOAT, false);
5811 init_intraclass_conv_libfuncs (trunc_optab, "trunc", MODE_DECIMAL_FLOAT, false);
5812 init_interclass_conv_libfuncs (trunc_optab, "trunc", MODE_FLOAT, MODE_DECIMAL_FLOAT);
5813 init_interclass_conv_libfuncs (trunc_optab, "trunc", MODE_DECIMAL_FLOAT, MODE_FLOAT);
5815 /* Explicitly initialize the bswap libfuncs since we need them to be
5816 valid for things other than word_mode. */
5817 set_optab_libfunc (bswap_optab, SImode, "__bswapsi2");
5818 set_optab_libfunc (bswap_optab, DImode, "__bswapdi2");
5820 /* Use cabs for double complex abs, since systems generally have cabs.
5821 Don't define any libcall for float complex, so that cabs will be used. */
5822 if (complex_double_type_node)
5823 abs_optab->handlers[TYPE_MODE (complex_double_type_node)].libfunc
5824 = init_one_libfunc ("cabs");
5826 abort_libfunc = init_one_libfunc ("abort");
5827 memcpy_libfunc = init_one_libfunc ("memcpy");
5828 memmove_libfunc = init_one_libfunc ("memmove");
5829 memcmp_libfunc = init_one_libfunc ("memcmp");
5830 memset_libfunc = init_one_libfunc ("memset");
5831 setbits_libfunc = init_one_libfunc ("__setbits");
5833 #ifndef DONT_USE_BUILTIN_SETJMP
5834 setjmp_libfunc = init_one_libfunc ("__builtin_setjmp");
5835 longjmp_libfunc = init_one_libfunc ("__builtin_longjmp");
5837 setjmp_libfunc = init_one_libfunc ("setjmp");
5838 longjmp_libfunc = init_one_libfunc ("longjmp");
5840 unwind_sjlj_register_libfunc = init_one_libfunc ("_Unwind_SjLj_Register");
5841 unwind_sjlj_unregister_libfunc
5842 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5844 /* For function entry/exit instrumentation. */
5845 profile_function_entry_libfunc
5846 = init_one_libfunc ("__cyg_profile_func_enter");
5847 profile_function_exit_libfunc
5848 = init_one_libfunc ("__cyg_profile_func_exit");
5850 gcov_flush_libfunc = init_one_libfunc ("__gcov_flush");
5852 if (HAVE_conditional_trap)
5853 trap_rtx = gen_rtx_fmt_ee (EQ, VOIDmode, NULL_RTX, NULL_RTX);
5855 /* Allow the target to add more libcalls or rename some, etc. */
5856 targetm.init_libfuncs ();
5861 /* Print information about the current contents of the optabs on
5865 debug_optab_libfuncs (void)
5871 /* Dump the arithmetic optabs. */
5872 for (i = 0; i != (int) OTI_MAX; i++)
5873 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5876 struct optab_handlers *h;
5879 h = &o->handlers[j];
5882 gcc_assert (GET_CODE (h->libfunc) == SYMBOL_REF);
5883 fprintf (stderr, "%s\t%s:\t%s\n",
5884 GET_RTX_NAME (o->code),
5886 XSTR (h->libfunc, 0));
5890 /* Dump the conversion optabs. */
5891 for (i = 0; i < (int) COI_MAX; ++i)
5892 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5893 for (k = 0; k < NUM_MACHINE_MODES; ++k)
5896 struct optab_handlers *h;
5898 o = &convert_optab_table[i];
5899 h = &o->handlers[j][k];
5902 gcc_assert (GET_CODE (h->libfunc) == SYMBOL_REF);
5903 fprintf (stderr, "%s\t%s\t%s:\t%s\n",
5904 GET_RTX_NAME (o->code),
5907 XSTR (h->libfunc, 0));
5915 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5916 CODE. Return 0 on failure. */
5919 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED, rtx op1,
5920 rtx op2 ATTRIBUTE_UNUSED, rtx tcode ATTRIBUTE_UNUSED)
5922 enum machine_mode mode = GET_MODE (op1);
5923 enum insn_code icode;
5926 if (!HAVE_conditional_trap)
5929 if (mode == VOIDmode)
5932 icode = cmp_optab->handlers[(int) mode].insn_code;
5933 if (icode == CODE_FOR_nothing)
5937 op1 = prepare_operand (icode, op1, 0, mode, mode, 0);
5938 op2 = prepare_operand (icode, op2, 1, mode, mode, 0);
5944 emit_insn (GEN_FCN (icode) (op1, op2));
5946 PUT_CODE (trap_rtx, code);
5947 gcc_assert (HAVE_conditional_trap);
5948 insn = gen_conditional_trap (trap_rtx, tcode);
5952 insn = get_insns ();
5959 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5960 or unsigned operation code. */
5962 static enum rtx_code
5963 get_rtx_code (enum tree_code tcode, bool unsignedp)
5975 code = unsignedp ? LTU : LT;
5978 code = unsignedp ? LEU : LE;
5981 code = unsignedp ? GTU : GT;
5984 code = unsignedp ? GEU : GE;
5987 case UNORDERED_EXPR:
6018 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
6019 unsigned operators. Do not generate compare instruction. */
6022 vector_compare_rtx (tree cond, bool unsignedp, enum insn_code icode)
6024 enum rtx_code rcode;
6026 rtx rtx_op0, rtx_op1;
6028 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
6029 ensures that condition is a relational operation. */
6030 gcc_assert (COMPARISON_CLASS_P (cond));
6032 rcode = get_rtx_code (TREE_CODE (cond), unsignedp);
6033 t_op0 = TREE_OPERAND (cond, 0);
6034 t_op1 = TREE_OPERAND (cond, 1);
6036 /* Expand operands. */
6037 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)),
6039 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)),
6042 if (!insn_data[icode].operand[4].predicate (rtx_op0, GET_MODE (rtx_op0))
6043 && GET_MODE (rtx_op0) != VOIDmode)
6044 rtx_op0 = force_reg (GET_MODE (rtx_op0), rtx_op0);
6046 if (!insn_data[icode].operand[5].predicate (rtx_op1, GET_MODE (rtx_op1))
6047 && GET_MODE (rtx_op1) != VOIDmode)
6048 rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1);
6050 return gen_rtx_fmt_ee (rcode, VOIDmode, rtx_op0, rtx_op1);
6053 /* Return insn code for VEC_COND_EXPR EXPR. */
6055 static inline enum insn_code
6056 get_vcond_icode (tree expr, enum machine_mode mode)
6058 enum insn_code icode = CODE_FOR_nothing;
6060 if (TYPE_UNSIGNED (TREE_TYPE (expr)))
6061 icode = vcondu_gen_code[mode];
6063 icode = vcond_gen_code[mode];
6067 /* Return TRUE iff, appropriate vector insns are available
6068 for vector cond expr expr in VMODE mode. */
6071 expand_vec_cond_expr_p (tree expr, enum machine_mode vmode)
6073 if (get_vcond_icode (expr, vmode) == CODE_FOR_nothing)
6078 /* Generate insns for VEC_COND_EXPR. */
6081 expand_vec_cond_expr (tree vec_cond_expr, rtx target)
6083 enum insn_code icode;
6084 rtx comparison, rtx_op1, rtx_op2, cc_op0, cc_op1;
6085 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_cond_expr));
6086 bool unsignedp = TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr));
6088 icode = get_vcond_icode (vec_cond_expr, mode);
6089 if (icode == CODE_FOR_nothing)
6092 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6093 target = gen_reg_rtx (mode);
6095 /* Get comparison rtx. First expand both cond expr operands. */
6096 comparison = vector_compare_rtx (TREE_OPERAND (vec_cond_expr, 0),
6098 cc_op0 = XEXP (comparison, 0);
6099 cc_op1 = XEXP (comparison, 1);
6100 /* Expand both operands and force them in reg, if required. */
6101 rtx_op1 = expand_normal (TREE_OPERAND (vec_cond_expr, 1));
6102 if (!insn_data[icode].operand[1].predicate (rtx_op1, mode)
6103 && mode != VOIDmode)
6104 rtx_op1 = force_reg (mode, rtx_op1);
6106 rtx_op2 = expand_normal (TREE_OPERAND (vec_cond_expr, 2));
6107 if (!insn_data[icode].operand[2].predicate (rtx_op2, mode)
6108 && mode != VOIDmode)
6109 rtx_op2 = force_reg (mode, rtx_op2);
6111 /* Emit instruction! */
6112 emit_insn (GEN_FCN (icode) (target, rtx_op1, rtx_op2,
6113 comparison, cc_op0, cc_op1));
6119 /* This is an internal subroutine of the other compare_and_swap expanders.
6120 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
6121 operation. TARGET is an optional place to store the value result of
6122 the operation. ICODE is the particular instruction to expand. Return
6123 the result of the operation. */
6126 expand_val_compare_and_swap_1 (rtx mem, rtx old_val, rtx new_val,
6127 rtx target, enum insn_code icode)
6129 enum machine_mode mode = GET_MODE (mem);
6132 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6133 target = gen_reg_rtx (mode);
6135 if (GET_MODE (old_val) != VOIDmode && GET_MODE (old_val) != mode)
6136 old_val = convert_modes (mode, GET_MODE (old_val), old_val, 1);
6137 if (!insn_data[icode].operand[2].predicate (old_val, mode))
6138 old_val = force_reg (mode, old_val);
6140 if (GET_MODE (new_val) != VOIDmode && GET_MODE (new_val) != mode)
6141 new_val = convert_modes (mode, GET_MODE (new_val), new_val, 1);
6142 if (!insn_data[icode].operand[3].predicate (new_val, mode))
6143 new_val = force_reg (mode, new_val);
6145 insn = GEN_FCN (icode) (target, mem, old_val, new_val);
6146 if (insn == NULL_RTX)
6153 /* Expand a compare-and-swap operation and return its value. */
6156 expand_val_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
6158 enum machine_mode mode = GET_MODE (mem);
6159 enum insn_code icode = sync_compare_and_swap[mode];
6161 if (icode == CODE_FOR_nothing)
6164 return expand_val_compare_and_swap_1 (mem, old_val, new_val, target, icode);
6167 /* Expand a compare-and-swap operation and store true into the result if
6168 the operation was successful and false otherwise. Return the result.
6169 Unlike other routines, TARGET is not optional. */
6172 expand_bool_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
6174 enum machine_mode mode = GET_MODE (mem);
6175 enum insn_code icode;
6176 rtx subtarget, label0, label1;
6178 /* If the target supports a compare-and-swap pattern that simultaneously
6179 sets some flag for success, then use it. Otherwise use the regular
6180 compare-and-swap and follow that immediately with a compare insn. */
6181 icode = sync_compare_and_swap_cc[mode];
6185 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
6187 if (subtarget != NULL_RTX)
6191 case CODE_FOR_nothing:
6192 icode = sync_compare_and_swap[mode];
6193 if (icode == CODE_FOR_nothing)
6196 /* Ensure that if old_val == mem, that we're not comparing
6197 against an old value. */
6198 if (MEM_P (old_val))
6199 old_val = force_reg (mode, old_val);
6201 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
6203 if (subtarget == NULL_RTX)
6206 emit_cmp_insn (subtarget, old_val, EQ, const0_rtx, mode, true);
6209 /* If the target has a sane STORE_FLAG_VALUE, then go ahead and use a
6210 setcc instruction from the beginning. We don't work too hard here,
6211 but it's nice to not be stupid about initial code gen either. */
6212 if (STORE_FLAG_VALUE == 1)
6214 icode = setcc_gen_code[EQ];
6215 if (icode != CODE_FOR_nothing)
6217 enum machine_mode cmode = insn_data[icode].operand[0].mode;
6221 if (!insn_data[icode].operand[0].predicate (target, cmode))
6222 subtarget = gen_reg_rtx (cmode);
6224 insn = GEN_FCN (icode) (subtarget);
6228 if (GET_MODE (target) != GET_MODE (subtarget))
6230 convert_move (target, subtarget, 1);
6238 /* Without an appropriate setcc instruction, use a set of branches to
6239 get 1 and 0 stored into target. Presumably if the target has a
6240 STORE_FLAG_VALUE that isn't 1, then this will get cleaned up by ifcvt. */
6242 label0 = gen_label_rtx ();
6243 label1 = gen_label_rtx ();
6245 emit_jump_insn (bcc_gen_fctn[EQ] (label0));
6246 emit_move_insn (target, const0_rtx);
6247 emit_jump_insn (gen_jump (label1));
6249 emit_label (label0);
6250 emit_move_insn (target, const1_rtx);
6251 emit_label (label1);
6256 /* This is a helper function for the other atomic operations. This function
6257 emits a loop that contains SEQ that iterates until a compare-and-swap
6258 operation at the end succeeds. MEM is the memory to be modified. SEQ is
6259 a set of instructions that takes a value from OLD_REG as an input and
6260 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
6261 set to the current contents of MEM. After SEQ, a compare-and-swap will
6262 attempt to update MEM with NEW_REG. The function returns true when the
6263 loop was generated successfully. */
6266 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
6268 enum machine_mode mode = GET_MODE (mem);
6269 enum insn_code icode;
6270 rtx label, cmp_reg, subtarget;
6272 /* The loop we want to generate looks like
6278 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
6279 if (cmp_reg != old_reg)
6282 Note that we only do the plain load from memory once. Subsequent
6283 iterations use the value loaded by the compare-and-swap pattern. */
6285 label = gen_label_rtx ();
6286 cmp_reg = gen_reg_rtx (mode);
6288 emit_move_insn (cmp_reg, mem);
6290 emit_move_insn (old_reg, cmp_reg);
6294 /* If the target supports a compare-and-swap pattern that simultaneously
6295 sets some flag for success, then use it. Otherwise use the regular
6296 compare-and-swap and follow that immediately with a compare insn. */
6297 icode = sync_compare_and_swap_cc[mode];
6301 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
6303 if (subtarget != NULL_RTX)
6305 gcc_assert (subtarget == cmp_reg);
6310 case CODE_FOR_nothing:
6311 icode = sync_compare_and_swap[mode];
6312 if (icode == CODE_FOR_nothing)
6315 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
6317 if (subtarget == NULL_RTX)
6319 if (subtarget != cmp_reg)
6320 emit_move_insn (cmp_reg, subtarget);
6322 emit_cmp_insn (cmp_reg, old_reg, EQ, const0_rtx, mode, true);
6325 /* ??? Mark this jump predicted not taken? */
6326 emit_jump_insn (bcc_gen_fctn[NE] (label));
6331 /* This function generates the atomic operation MEM CODE= VAL. In this
6332 case, we do not care about any resulting value. Returns NULL if we
6333 cannot generate the operation. */
6336 expand_sync_operation (rtx mem, rtx val, enum rtx_code code)
6338 enum machine_mode mode = GET_MODE (mem);
6339 enum insn_code icode;
6342 /* Look to see if the target supports the operation directly. */
6346 icode = sync_add_optab[mode];
6349 icode = sync_ior_optab[mode];
6352 icode = sync_xor_optab[mode];
6355 icode = sync_and_optab[mode];
6358 icode = sync_nand_optab[mode];
6362 icode = sync_sub_optab[mode];
6363 if (icode == CODE_FOR_nothing)
6365 icode = sync_add_optab[mode];
6366 if (icode != CODE_FOR_nothing)
6368 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
6378 /* Generate the direct operation, if present. */
6379 if (icode != CODE_FOR_nothing)
6381 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6382 val = convert_modes (mode, GET_MODE (val), val, 1);
6383 if (!insn_data[icode].operand[1].predicate (val, mode))
6384 val = force_reg (mode, val);
6386 insn = GEN_FCN (icode) (mem, val);
6394 /* Failing that, generate a compare-and-swap loop in which we perform the
6395 operation with normal arithmetic instructions. */
6396 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6398 rtx t0 = gen_reg_rtx (mode), t1;
6405 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
6408 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
6409 true, OPTAB_LIB_WIDEN);
6411 insn = get_insns ();
6414 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
6421 /* This function generates the atomic operation MEM CODE= VAL. In this
6422 case, we do care about the resulting value: if AFTER is true then
6423 return the value MEM holds after the operation, if AFTER is false
6424 then return the value MEM holds before the operation. TARGET is an
6425 optional place for the result value to be stored. */
6428 expand_sync_fetch_operation (rtx mem, rtx val, enum rtx_code code,
6429 bool after, rtx target)
6431 enum machine_mode mode = GET_MODE (mem);
6432 enum insn_code old_code, new_code, icode;
6436 /* Look to see if the target supports the operation directly. */
6440 old_code = sync_old_add_optab[mode];
6441 new_code = sync_new_add_optab[mode];
6444 old_code = sync_old_ior_optab[mode];
6445 new_code = sync_new_ior_optab[mode];
6448 old_code = sync_old_xor_optab[mode];
6449 new_code = sync_new_xor_optab[mode];
6452 old_code = sync_old_and_optab[mode];
6453 new_code = sync_new_and_optab[mode];
6456 old_code = sync_old_nand_optab[mode];
6457 new_code = sync_new_nand_optab[mode];
6461 old_code = sync_old_sub_optab[mode];
6462 new_code = sync_new_sub_optab[mode];
6463 if (old_code == CODE_FOR_nothing && new_code == CODE_FOR_nothing)
6465 old_code = sync_old_add_optab[mode];
6466 new_code = sync_new_add_optab[mode];
6467 if (old_code != CODE_FOR_nothing || new_code != CODE_FOR_nothing)
6469 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
6479 /* If the target does supports the proper new/old operation, great. But
6480 if we only support the opposite old/new operation, check to see if we
6481 can compensate. In the case in which the old value is supported, then
6482 we can always perform the operation again with normal arithmetic. In
6483 the case in which the new value is supported, then we can only handle
6484 this in the case the operation is reversible. */
6489 if (icode == CODE_FOR_nothing)
6492 if (icode != CODE_FOR_nothing)
6499 if (icode == CODE_FOR_nothing
6500 && (code == PLUS || code == MINUS || code == XOR))
6503 if (icode != CODE_FOR_nothing)
6508 /* If we found something supported, great. */
6509 if (icode != CODE_FOR_nothing)
6511 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6512 target = gen_reg_rtx (mode);
6514 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6515 val = convert_modes (mode, GET_MODE (val), val, 1);
6516 if (!insn_data[icode].operand[2].predicate (val, mode))
6517 val = force_reg (mode, val);
6519 insn = GEN_FCN (icode) (target, mem, val);
6524 /* If we need to compensate for using an operation with the
6525 wrong return value, do so now. */
6532 else if (code == MINUS)
6537 target = expand_simple_unop (mode, NOT, target, NULL_RTX, true);
6538 target = expand_simple_binop (mode, code, target, val, NULL_RTX,
6539 true, OPTAB_LIB_WIDEN);
6546 /* Failing that, generate a compare-and-swap loop in which we perform the
6547 operation with normal arithmetic instructions. */
6548 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6550 rtx t0 = gen_reg_rtx (mode), t1;
6552 if (!target || !register_operand (target, mode))
6553 target = gen_reg_rtx (mode);
6558 emit_move_insn (target, t0);
6562 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
6565 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
6566 true, OPTAB_LIB_WIDEN);
6568 emit_move_insn (target, t1);
6570 insn = get_insns ();
6573 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
6580 /* This function expands a test-and-set operation. Ideally we atomically
6581 store VAL in MEM and return the previous value in MEM. Some targets
6582 may not support this operation and only support VAL with the constant 1;
6583 in this case while the return value will be 0/1, but the exact value
6584 stored in MEM is target defined. TARGET is an option place to stick
6585 the return value. */
6588 expand_sync_lock_test_and_set (rtx mem, rtx val, rtx target)
6590 enum machine_mode mode = GET_MODE (mem);
6591 enum insn_code icode;
6594 /* If the target supports the test-and-set directly, great. */
6595 icode = sync_lock_test_and_set[mode];
6596 if (icode != CODE_FOR_nothing)
6598 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6599 target = gen_reg_rtx (mode);
6601 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6602 val = convert_modes (mode, GET_MODE (val), val, 1);
6603 if (!insn_data[icode].operand[2].predicate (val, mode))
6604 val = force_reg (mode, val);
6606 insn = GEN_FCN (icode) (target, mem, val);
6614 /* Otherwise, use a compare-and-swap loop for the exchange. */
6615 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6617 if (!target || !register_operand (target, mode))
6618 target = gen_reg_rtx (mode);
6619 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6620 val = convert_modes (mode, GET_MODE (val), val, 1);
6621 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
6628 #include "gt-optabs.h"