1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
45 #include "basic-block.h"
48 /* Each optab contains info on how this target machine
49 can perform a particular operation
50 for all sizes and kinds of operands.
52 The operation to be performed is often specified
53 by passing one of these optabs as an argument.
55 See expr.h for documentation of these optabs. */
57 #if GCC_VERSION >= 4000 && HAVE_DESIGNATED_INITIALIZERS
58 __extension__ struct optab_d optab_table[OTI_MAX]
59 = { [0 ... OTI_MAX - 1].handlers[0 ... NUM_MACHINE_MODES - 1].insn_code
62 /* init_insn_codes will do runtime initialization otherwise. */
63 struct optab_d optab_table[OTI_MAX];
66 rtx libfunc_table[LTI_MAX];
68 /* Tables of patterns for converting one mode to another. */
69 #if GCC_VERSION >= 4000 && HAVE_DESIGNATED_INITIALIZERS
70 __extension__ struct convert_optab_d convert_optab_table[COI_MAX]
71 = { [0 ... COI_MAX - 1].handlers[0 ... NUM_MACHINE_MODES - 1]
72 [0 ... NUM_MACHINE_MODES - 1].insn_code
75 /* init_convert_optab will do runtime initialization otherwise. */
76 struct convert_optab_d convert_optab_table[COI_MAX];
79 /* Contains the optab used for each rtx code. */
80 optab code_to_optab[NUM_RTX_CODE + 1];
82 #ifdef HAVE_conditional_move
83 /* Indexed by the machine mode, gives the insn code to make a conditional
84 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
85 setcc_gen_code to cut down on the number of named patterns. Consider a day
86 when a lot more rtx codes are conditional (eg: for the ARM). */
88 enum insn_code movcc_gen_code[NUM_MACHINE_MODES];
91 /* Indexed by the machine mode, gives the insn code for vector conditional
94 enum insn_code vcond_gen_code[NUM_MACHINE_MODES];
95 enum insn_code vcondu_gen_code[NUM_MACHINE_MODES];
97 static void prepare_float_lib_cmp (rtx, rtx, enum rtx_code, rtx *,
99 static rtx expand_unop_direct (enum machine_mode, optab, rtx, rtx, int);
101 /* Debug facility for use in GDB. */
102 void debug_optab_libfuncs (void);
104 /* Prefixes for the current version of decimal floating point (BID vs. DPD) */
105 #if ENABLE_DECIMAL_BID_FORMAT
106 #define DECIMAL_PREFIX "bid_"
108 #define DECIMAL_PREFIX "dpd_"
112 /* Info about libfunc. We use same hashtable for normal optabs and conversion
113 optab. In the first case mode2 is unused. */
114 struct GTY(()) libfunc_entry {
116 enum machine_mode mode1, mode2;
120 /* Hash table used to convert declarations into nodes. */
121 static GTY((param_is (struct libfunc_entry))) htab_t libfunc_hash;
123 /* Used for attribute_hash. */
126 hash_libfunc (const void *p)
128 const struct libfunc_entry *const e = (const struct libfunc_entry *) p;
130 return (((int) e->mode1 + (int) e->mode2 * NUM_MACHINE_MODES)
134 /* Used for optab_hash. */
137 eq_libfunc (const void *p, const void *q)
139 const struct libfunc_entry *const e1 = (const struct libfunc_entry *) p;
140 const struct libfunc_entry *const e2 = (const struct libfunc_entry *) q;
142 return (e1->optab == e2->optab
143 && e1->mode1 == e2->mode1
144 && e1->mode2 == e2->mode2);
147 /* Return libfunc corresponding operation defined by OPTAB converting
148 from MODE2 to MODE1. Trigger lazy initialization if needed, return NULL
149 if no libfunc is available. */
151 convert_optab_libfunc (convert_optab optab, enum machine_mode mode1,
152 enum machine_mode mode2)
154 struct libfunc_entry e;
155 struct libfunc_entry **slot;
157 e.optab = (size_t) (optab - &convert_optab_table[0]);
160 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, NO_INSERT);
163 if (optab->libcall_gen)
165 optab->libcall_gen (optab, optab->libcall_basename, mode1, mode2);
166 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, NO_INSERT);
168 return (*slot)->libfunc;
174 return (*slot)->libfunc;
177 /* Return libfunc corresponding operation defined by OPTAB in MODE.
178 Trigger lazy initialization if needed, return NULL if no libfunc is
181 optab_libfunc (optab optab, enum machine_mode mode)
183 struct libfunc_entry e;
184 struct libfunc_entry **slot;
186 e.optab = (size_t) (optab - &optab_table[0]);
189 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, NO_INSERT);
192 if (optab->libcall_gen)
194 optab->libcall_gen (optab, optab->libcall_basename,
195 optab->libcall_suffix, mode);
196 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash,
199 return (*slot)->libfunc;
205 return (*slot)->libfunc;
209 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
210 the result of operation CODE applied to OP0 (and OP1 if it is a binary
213 If the last insn does not set TARGET, don't do anything, but return 1.
215 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
216 don't add the REG_EQUAL note but return 0. Our caller can then try
217 again, ensuring that TARGET is not one of the operands. */
220 add_equal_note (rtx insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
222 rtx last_insn, insn, set;
225 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
227 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
228 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
229 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
230 && GET_RTX_CLASS (code) != RTX_COMPARE
231 && GET_RTX_CLASS (code) != RTX_UNARY)
234 if (GET_CODE (target) == ZERO_EXTRACT)
237 for (last_insn = insns;
238 NEXT_INSN (last_insn) != NULL_RTX;
239 last_insn = NEXT_INSN (last_insn))
242 set = single_set (last_insn);
246 if (! rtx_equal_p (SET_DEST (set), target)
247 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
248 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
249 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
252 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
253 besides the last insn. */
254 if (reg_overlap_mentioned_p (target, op0)
255 || (op1 && reg_overlap_mentioned_p (target, op1)))
257 insn = PREV_INSN (last_insn);
258 while (insn != NULL_RTX)
260 if (reg_set_p (target, insn))
263 insn = PREV_INSN (insn);
267 if (GET_RTX_CLASS (code) == RTX_UNARY)
268 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
270 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
272 set_unique_reg_note (last_insn, REG_EQUAL, note);
277 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
278 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
279 not actually do a sign-extend or zero-extend, but can leave the
280 higher-order bits of the result rtx undefined, for example, in the case
281 of logical operations, but not right shifts. */
284 widen_operand (rtx op, enum machine_mode mode, enum machine_mode oldmode,
285 int unsignedp, int no_extend)
289 /* If we don't have to extend and this is a constant, return it. */
290 if (no_extend && GET_MODE (op) == VOIDmode)
293 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
294 extend since it will be more efficient to do so unless the signedness of
295 a promoted object differs from our extension. */
297 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
298 && SUBREG_PROMOTED_UNSIGNED_P (op) == unsignedp))
299 return convert_modes (mode, oldmode, op, unsignedp);
301 /* If MODE is no wider than a single word, we return a paradoxical
303 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
304 return gen_rtx_SUBREG (mode, force_reg (GET_MODE (op), op), 0);
306 /* Otherwise, get an object of MODE, clobber it, and set the low-order
309 result = gen_reg_rtx (mode);
310 emit_clobber (result);
311 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
315 /* Return the optab used for computing the operation given by the tree code,
316 CODE and the tree EXP. This function is not always usable (for example, it
317 cannot give complete results for multiplication or division) but probably
318 ought to be relied on more widely throughout the expander. */
320 optab_for_tree_code (enum tree_code code, const_tree type,
321 enum optab_subtype subtype)
333 return one_cmpl_optab;
342 return TYPE_UNSIGNED (type) ? umod_optab : smod_optab;
350 if (TYPE_SATURATING(type))
351 return TYPE_UNSIGNED(type) ? usdiv_optab : ssdiv_optab;
352 return TYPE_UNSIGNED (type) ? udiv_optab : sdiv_optab;
355 if (VECTOR_MODE_P (TYPE_MODE (type)))
357 if (subtype == optab_vector)
358 return TYPE_SATURATING (type) ? NULL : vashl_optab;
360 gcc_assert (subtype == optab_scalar);
362 if (TYPE_SATURATING(type))
363 return TYPE_UNSIGNED(type) ? usashl_optab : ssashl_optab;
367 if (VECTOR_MODE_P (TYPE_MODE (type)))
369 if (subtype == optab_vector)
370 return TYPE_UNSIGNED (type) ? vlshr_optab : vashr_optab;
372 gcc_assert (subtype == optab_scalar);
374 return TYPE_UNSIGNED (type) ? lshr_optab : ashr_optab;
377 if (VECTOR_MODE_P (TYPE_MODE (type)))
379 if (subtype == optab_vector)
382 gcc_assert (subtype == optab_scalar);
387 if (VECTOR_MODE_P (TYPE_MODE (type)))
389 if (subtype == optab_vector)
392 gcc_assert (subtype == optab_scalar);
397 return TYPE_UNSIGNED (type) ? umax_optab : smax_optab;
400 return TYPE_UNSIGNED (type) ? umin_optab : smin_optab;
402 case REALIGN_LOAD_EXPR:
403 return vec_realign_load_optab;
406 return TYPE_UNSIGNED (type) ? usum_widen_optab : ssum_widen_optab;
409 return TYPE_UNSIGNED (type) ? udot_prod_optab : sdot_prod_optab;
412 return TYPE_UNSIGNED (type) ? reduc_umax_optab : reduc_smax_optab;
415 return TYPE_UNSIGNED (type) ? reduc_umin_optab : reduc_smin_optab;
417 case REDUC_PLUS_EXPR:
418 return TYPE_UNSIGNED (type) ? reduc_uplus_optab : reduc_splus_optab;
420 case VEC_LSHIFT_EXPR:
421 return vec_shl_optab;
423 case VEC_RSHIFT_EXPR:
424 return vec_shr_optab;
426 case VEC_WIDEN_MULT_HI_EXPR:
427 return TYPE_UNSIGNED (type) ?
428 vec_widen_umult_hi_optab : vec_widen_smult_hi_optab;
430 case VEC_WIDEN_MULT_LO_EXPR:
431 return TYPE_UNSIGNED (type) ?
432 vec_widen_umult_lo_optab : vec_widen_smult_lo_optab;
434 case VEC_UNPACK_HI_EXPR:
435 return TYPE_UNSIGNED (type) ?
436 vec_unpacku_hi_optab : vec_unpacks_hi_optab;
438 case VEC_UNPACK_LO_EXPR:
439 return TYPE_UNSIGNED (type) ?
440 vec_unpacku_lo_optab : vec_unpacks_lo_optab;
442 case VEC_UNPACK_FLOAT_HI_EXPR:
443 /* The signedness is determined from input operand. */
444 return TYPE_UNSIGNED (type) ?
445 vec_unpacku_float_hi_optab : vec_unpacks_float_hi_optab;
447 case VEC_UNPACK_FLOAT_LO_EXPR:
448 /* The signedness is determined from input operand. */
449 return TYPE_UNSIGNED (type) ?
450 vec_unpacku_float_lo_optab : vec_unpacks_float_lo_optab;
452 case VEC_PACK_TRUNC_EXPR:
453 return vec_pack_trunc_optab;
455 case VEC_PACK_SAT_EXPR:
456 return TYPE_UNSIGNED (type) ? vec_pack_usat_optab : vec_pack_ssat_optab;
458 case VEC_PACK_FIX_TRUNC_EXPR:
459 /* The signedness is determined from output operand. */
460 return TYPE_UNSIGNED (type) ?
461 vec_pack_ufix_trunc_optab : vec_pack_sfix_trunc_optab;
467 trapv = INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type);
470 case POINTER_PLUS_EXPR:
472 if (TYPE_SATURATING(type))
473 return TYPE_UNSIGNED(type) ? usadd_optab : ssadd_optab;
474 return trapv ? addv_optab : add_optab;
477 if (TYPE_SATURATING(type))
478 return TYPE_UNSIGNED(type) ? ussub_optab : sssub_optab;
479 return trapv ? subv_optab : sub_optab;
482 if (TYPE_SATURATING(type))
483 return TYPE_UNSIGNED(type) ? usmul_optab : ssmul_optab;
484 return trapv ? smulv_optab : smul_optab;
487 if (TYPE_SATURATING(type))
488 return TYPE_UNSIGNED(type) ? usneg_optab : ssneg_optab;
489 return trapv ? negv_optab : neg_optab;
492 return trapv ? absv_optab : abs_optab;
494 case VEC_EXTRACT_EVEN_EXPR:
495 return vec_extract_even_optab;
497 case VEC_EXTRACT_ODD_EXPR:
498 return vec_extract_odd_optab;
500 case VEC_INTERLEAVE_HIGH_EXPR:
501 return vec_interleave_high_optab;
503 case VEC_INTERLEAVE_LOW_EXPR:
504 return vec_interleave_low_optab;
512 /* Expand vector widening operations.
514 There are two different classes of operations handled here:
515 1) Operations whose result is wider than all the arguments to the operation.
516 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
517 In this case OP0 and optionally OP1 would be initialized,
518 but WIDE_OP wouldn't (not relevant for this case).
519 2) Operations whose result is of the same size as the last argument to the
520 operation, but wider than all the other arguments to the operation.
521 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
522 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
524 E.g, when called to expand the following operations, this is how
525 the arguments will be initialized:
527 widening-sum 2 oprnd0 - oprnd1
528 widening-dot-product 3 oprnd0 oprnd1 oprnd2
529 widening-mult 2 oprnd0 oprnd1 -
530 type-promotion (vec-unpack) 1 oprnd0 - - */
533 expand_widen_pattern_expr (sepops ops, rtx op0, rtx op1, rtx wide_op,
534 rtx target, int unsignedp)
536 tree oprnd0, oprnd1, oprnd2;
537 enum machine_mode wmode = VOIDmode, tmode0, tmode1 = VOIDmode;
538 optab widen_pattern_optab;
540 enum machine_mode xmode0, xmode1 = VOIDmode, wxmode = VOIDmode;
543 rtx xop0, xop1, wxop;
544 int nops = TREE_CODE_LENGTH (ops->code);
547 tmode0 = TYPE_MODE (TREE_TYPE (oprnd0));
548 widen_pattern_optab =
549 optab_for_tree_code (ops->code, TREE_TYPE (oprnd0), optab_default);
550 icode = (int) optab_handler (widen_pattern_optab, tmode0)->insn_code;
551 gcc_assert (icode != CODE_FOR_nothing);
552 xmode0 = insn_data[icode].operand[1].mode;
557 tmode1 = TYPE_MODE (TREE_TYPE (oprnd1));
558 xmode1 = insn_data[icode].operand[2].mode;
561 /* The last operand is of a wider mode than the rest of the operands. */
569 gcc_assert (tmode1 == tmode0);
572 wmode = TYPE_MODE (TREE_TYPE (oprnd2));
573 wxmode = insn_data[icode].operand[3].mode;
577 wmode = wxmode = insn_data[icode].operand[0].mode;
580 || ! (*insn_data[icode].operand[0].predicate) (target, wmode))
581 temp = gen_reg_rtx (wmode);
589 /* In case the insn wants input operands in modes different from
590 those of the actual operands, convert the operands. It would
591 seem that we don't need to convert CONST_INTs, but we do, so
592 that they're properly zero-extended, sign-extended or truncated
595 if (GET_MODE (op0) != xmode0 && xmode0 != VOIDmode)
596 xop0 = convert_modes (xmode0,
597 GET_MODE (op0) != VOIDmode
603 if (GET_MODE (op1) != xmode1 && xmode1 != VOIDmode)
604 xop1 = convert_modes (xmode1,
605 GET_MODE (op1) != VOIDmode
611 if (GET_MODE (wide_op) != wxmode && wxmode != VOIDmode)
612 wxop = convert_modes (wxmode,
613 GET_MODE (wide_op) != VOIDmode
618 /* Now, if insn's predicates don't allow our operands, put them into
621 if (! (*insn_data[icode].operand[1].predicate) (xop0, xmode0)
622 && xmode0 != VOIDmode)
623 xop0 = copy_to_mode_reg (xmode0, xop0);
627 if (! (*insn_data[icode].operand[2].predicate) (xop1, xmode1)
628 && xmode1 != VOIDmode)
629 xop1 = copy_to_mode_reg (xmode1, xop1);
633 if (! (*insn_data[icode].operand[3].predicate) (wxop, wxmode)
634 && wxmode != VOIDmode)
635 wxop = copy_to_mode_reg (wxmode, wxop);
637 pat = GEN_FCN (icode) (temp, xop0, xop1, wxop);
640 pat = GEN_FCN (icode) (temp, xop0, xop1);
646 if (! (*insn_data[icode].operand[2].predicate) (wxop, wxmode)
647 && wxmode != VOIDmode)
648 wxop = copy_to_mode_reg (wxmode, wxop);
650 pat = GEN_FCN (icode) (temp, xop0, wxop);
653 pat = GEN_FCN (icode) (temp, xop0);
660 /* Generate code to perform an operation specified by TERNARY_OPTAB
661 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
663 UNSIGNEDP is for the case where we have to widen the operands
664 to perform the operation. It says to use zero-extension.
666 If TARGET is nonzero, the value
667 is generated there, if it is convenient to do so.
668 In all cases an rtx is returned for the locus of the value;
669 this may or may not be TARGET. */
672 expand_ternary_op (enum machine_mode mode, optab ternary_optab, rtx op0,
673 rtx op1, rtx op2, rtx target, int unsignedp)
675 int icode = (int) optab_handler (ternary_optab, mode)->insn_code;
676 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
677 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
678 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
681 rtx xop0 = op0, xop1 = op1, xop2 = op2;
683 gcc_assert (optab_handler (ternary_optab, mode)->insn_code
684 != CODE_FOR_nothing);
686 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
687 temp = gen_reg_rtx (mode);
691 /* In case the insn wants input operands in modes different from
692 those of the actual operands, convert the operands. It would
693 seem that we don't need to convert CONST_INTs, but we do, so
694 that they're properly zero-extended, sign-extended or truncated
697 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
698 xop0 = convert_modes (mode0,
699 GET_MODE (op0) != VOIDmode
704 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
705 xop1 = convert_modes (mode1,
706 GET_MODE (op1) != VOIDmode
711 if (GET_MODE (op2) != mode2 && mode2 != VOIDmode)
712 xop2 = convert_modes (mode2,
713 GET_MODE (op2) != VOIDmode
718 /* Now, if insn's predicates don't allow our operands, put them into
721 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
722 && mode0 != VOIDmode)
723 xop0 = copy_to_mode_reg (mode0, xop0);
725 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
726 && mode1 != VOIDmode)
727 xop1 = copy_to_mode_reg (mode1, xop1);
729 if (!insn_data[icode].operand[3].predicate (xop2, mode2)
730 && mode2 != VOIDmode)
731 xop2 = copy_to_mode_reg (mode2, xop2);
733 pat = GEN_FCN (icode) (temp, xop0, xop1, xop2);
740 /* Like expand_binop, but return a constant rtx if the result can be
741 calculated at compile time. The arguments and return value are
742 otherwise the same as for expand_binop. */
745 simplify_expand_binop (enum machine_mode mode, optab binoptab,
746 rtx op0, rtx op1, rtx target, int unsignedp,
747 enum optab_methods methods)
749 if (CONSTANT_P (op0) && CONSTANT_P (op1))
751 rtx x = simplify_binary_operation (binoptab->code, mode, op0, op1);
757 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
760 /* Like simplify_expand_binop, but always put the result in TARGET.
761 Return true if the expansion succeeded. */
764 force_expand_binop (enum machine_mode mode, optab binoptab,
765 rtx op0, rtx op1, rtx target, int unsignedp,
766 enum optab_methods methods)
768 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
769 target, unsignedp, methods);
773 emit_move_insn (target, x);
777 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
780 expand_vec_shift_expr (sepops ops, rtx target)
782 enum insn_code icode;
783 rtx rtx_op1, rtx_op2;
784 enum machine_mode mode1;
785 enum machine_mode mode2;
786 enum machine_mode mode = TYPE_MODE (ops->type);
787 tree vec_oprnd = ops->op0;
788 tree shift_oprnd = ops->op1;
794 case VEC_RSHIFT_EXPR:
795 shift_optab = vec_shr_optab;
797 case VEC_LSHIFT_EXPR:
798 shift_optab = vec_shl_optab;
804 icode = optab_handler (shift_optab, mode)->insn_code;
805 gcc_assert (icode != CODE_FOR_nothing);
807 mode1 = insn_data[icode].operand[1].mode;
808 mode2 = insn_data[icode].operand[2].mode;
810 rtx_op1 = expand_normal (vec_oprnd);
811 if (!(*insn_data[icode].operand[1].predicate) (rtx_op1, mode1)
812 && mode1 != VOIDmode)
813 rtx_op1 = force_reg (mode1, rtx_op1);
815 rtx_op2 = expand_normal (shift_oprnd);
816 if (!(*insn_data[icode].operand[2].predicate) (rtx_op2, mode2)
817 && mode2 != VOIDmode)
818 rtx_op2 = force_reg (mode2, rtx_op2);
821 || ! (*insn_data[icode].operand[0].predicate) (target, mode))
822 target = gen_reg_rtx (mode);
824 /* Emit instruction */
825 pat = GEN_FCN (icode) (target, rtx_op1, rtx_op2);
832 /* This subroutine of expand_doubleword_shift handles the cases in which
833 the effective shift value is >= BITS_PER_WORD. The arguments and return
834 value are the same as for the parent routine, except that SUPERWORD_OP1
835 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
836 INTO_TARGET may be null if the caller has decided to calculate it. */
839 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
840 rtx outof_target, rtx into_target,
841 int unsignedp, enum optab_methods methods)
843 if (into_target != 0)
844 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
845 into_target, unsignedp, methods))
848 if (outof_target != 0)
850 /* For a signed right shift, we must fill OUTOF_TARGET with copies
851 of the sign bit, otherwise we must fill it with zeros. */
852 if (binoptab != ashr_optab)
853 emit_move_insn (outof_target, CONST0_RTX (word_mode));
855 if (!force_expand_binop (word_mode, binoptab,
856 outof_input, GEN_INT (BITS_PER_WORD - 1),
857 outof_target, unsignedp, methods))
863 /* This subroutine of expand_doubleword_shift handles the cases in which
864 the effective shift value is < BITS_PER_WORD. The arguments and return
865 value are the same as for the parent routine. */
868 expand_subword_shift (enum machine_mode op1_mode, optab binoptab,
869 rtx outof_input, rtx into_input, rtx op1,
870 rtx outof_target, rtx into_target,
871 int unsignedp, enum optab_methods methods,
872 unsigned HOST_WIDE_INT shift_mask)
874 optab reverse_unsigned_shift, unsigned_shift;
877 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
878 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
880 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
881 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
882 the opposite direction to BINOPTAB. */
883 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
885 carries = outof_input;
886 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
887 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
892 /* We must avoid shifting by BITS_PER_WORD bits since that is either
893 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
894 has unknown behavior. Do a single shift first, then shift by the
895 remainder. It's OK to use ~OP1 as the remainder if shift counts
896 are truncated to the mode size. */
897 carries = expand_binop (word_mode, reverse_unsigned_shift,
898 outof_input, const1_rtx, 0, unsignedp, methods);
899 if (shift_mask == BITS_PER_WORD - 1)
901 tmp = immed_double_const (-1, -1, op1_mode);
902 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
907 tmp = immed_double_const (BITS_PER_WORD - 1, 0, op1_mode);
908 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
912 if (tmp == 0 || carries == 0)
914 carries = expand_binop (word_mode, reverse_unsigned_shift,
915 carries, tmp, 0, unsignedp, methods);
919 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
920 so the result can go directly into INTO_TARGET if convenient. */
921 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
922 into_target, unsignedp, methods);
926 /* Now OR in the bits carried over from OUTOF_INPUT. */
927 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
928 into_target, unsignedp, methods))
931 /* Use a standard word_mode shift for the out-of half. */
932 if (outof_target != 0)
933 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
934 outof_target, unsignedp, methods))
941 #ifdef HAVE_conditional_move
942 /* Try implementing expand_doubleword_shift using conditional moves.
943 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
944 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
945 are the shift counts to use in the former and latter case. All other
946 arguments are the same as the parent routine. */
949 expand_doubleword_shift_condmove (enum machine_mode op1_mode, optab binoptab,
950 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
951 rtx outof_input, rtx into_input,
952 rtx subword_op1, rtx superword_op1,
953 rtx outof_target, rtx into_target,
954 int unsignedp, enum optab_methods methods,
955 unsigned HOST_WIDE_INT shift_mask)
957 rtx outof_superword, into_superword;
959 /* Put the superword version of the output into OUTOF_SUPERWORD and
961 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
962 if (outof_target != 0 && subword_op1 == superword_op1)
964 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
965 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
966 into_superword = outof_target;
967 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
968 outof_superword, 0, unsignedp, methods))
973 into_superword = gen_reg_rtx (word_mode);
974 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
975 outof_superword, into_superword,
980 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
981 if (!expand_subword_shift (op1_mode, binoptab,
982 outof_input, into_input, subword_op1,
983 outof_target, into_target,
984 unsignedp, methods, shift_mask))
987 /* Select between them. Do the INTO half first because INTO_SUPERWORD
988 might be the current value of OUTOF_TARGET. */
989 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
990 into_target, into_superword, word_mode, false))
993 if (outof_target != 0)
994 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
995 outof_target, outof_superword,
1003 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
1004 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
1005 input operand; the shift moves bits in the direction OUTOF_INPUT->
1006 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
1007 of the target. OP1 is the shift count and OP1_MODE is its mode.
1008 If OP1 is constant, it will have been truncated as appropriate
1009 and is known to be nonzero.
1011 If SHIFT_MASK is zero, the result of word shifts is undefined when the
1012 shift count is outside the range [0, BITS_PER_WORD). This routine must
1013 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
1015 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
1016 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
1017 fill with zeros or sign bits as appropriate.
1019 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
1020 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
1021 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
1022 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
1025 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
1026 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
1027 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
1028 function wants to calculate it itself.
1030 Return true if the shift could be successfully synthesized. */
1033 expand_doubleword_shift (enum machine_mode op1_mode, optab binoptab,
1034 rtx outof_input, rtx into_input, rtx op1,
1035 rtx outof_target, rtx into_target,
1036 int unsignedp, enum optab_methods methods,
1037 unsigned HOST_WIDE_INT shift_mask)
1039 rtx superword_op1, tmp, cmp1, cmp2;
1040 rtx subword_label, done_label;
1041 enum rtx_code cmp_code;
1043 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
1044 fill the result with sign or zero bits as appropriate. If so, the value
1045 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
1046 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
1047 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
1049 This isn't worthwhile for constant shifts since the optimizers will
1050 cope better with in-range shift counts. */
1051 if (shift_mask >= BITS_PER_WORD
1052 && outof_target != 0
1053 && !CONSTANT_P (op1))
1055 if (!expand_doubleword_shift (op1_mode, binoptab,
1056 outof_input, into_input, op1,
1058 unsignedp, methods, shift_mask))
1060 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
1061 outof_target, unsignedp, methods))
1066 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
1067 is true when the effective shift value is less than BITS_PER_WORD.
1068 Set SUPERWORD_OP1 to the shift count that should be used to shift
1069 OUTOF_INPUT into INTO_TARGET when the condition is false. */
1070 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
1071 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
1073 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
1074 is a subword shift count. */
1075 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
1077 cmp2 = CONST0_RTX (op1_mode);
1079 superword_op1 = op1;
1083 /* Set CMP1 to OP1 - BITS_PER_WORD. */
1084 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
1086 cmp2 = CONST0_RTX (op1_mode);
1088 superword_op1 = cmp1;
1093 /* If we can compute the condition at compile time, pick the
1094 appropriate subroutine. */
1095 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
1096 if (tmp != 0 && CONST_INT_P (tmp))
1098 if (tmp == const0_rtx)
1099 return expand_superword_shift (binoptab, outof_input, superword_op1,
1100 outof_target, into_target,
1101 unsignedp, methods);
1103 return expand_subword_shift (op1_mode, binoptab,
1104 outof_input, into_input, op1,
1105 outof_target, into_target,
1106 unsignedp, methods, shift_mask);
1109 #ifdef HAVE_conditional_move
1110 /* Try using conditional moves to generate straight-line code. */
1112 rtx start = get_last_insn ();
1113 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
1114 cmp_code, cmp1, cmp2,
1115 outof_input, into_input,
1117 outof_target, into_target,
1118 unsignedp, methods, shift_mask))
1120 delete_insns_since (start);
1124 /* As a last resort, use branches to select the correct alternative. */
1125 subword_label = gen_label_rtx ();
1126 done_label = gen_label_rtx ();
1129 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
1130 0, 0, subword_label, -1);
1133 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
1134 outof_target, into_target,
1135 unsignedp, methods))
1138 emit_jump_insn (gen_jump (done_label));
1140 emit_label (subword_label);
1142 if (!expand_subword_shift (op1_mode, binoptab,
1143 outof_input, into_input, op1,
1144 outof_target, into_target,
1145 unsignedp, methods, shift_mask))
1148 emit_label (done_label);
1152 /* Subroutine of expand_binop. Perform a double word multiplication of
1153 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
1154 as the target's word_mode. This function return NULL_RTX if anything
1155 goes wrong, in which case it may have already emitted instructions
1156 which need to be deleted.
1158 If we want to multiply two two-word values and have normal and widening
1159 multiplies of single-word values, we can do this with three smaller
1162 The multiplication proceeds as follows:
1163 _______________________
1164 [__op0_high_|__op0_low__]
1165 _______________________
1166 * [__op1_high_|__op1_low__]
1167 _______________________________________________
1168 _______________________
1169 (1) [__op0_low__*__op1_low__]
1170 _______________________
1171 (2a) [__op0_low__*__op1_high_]
1172 _______________________
1173 (2b) [__op0_high_*__op1_low__]
1174 _______________________
1175 (3) [__op0_high_*__op1_high_]
1178 This gives a 4-word result. Since we are only interested in the
1179 lower 2 words, partial result (3) and the upper words of (2a) and
1180 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1181 calculated using non-widening multiplication.
1183 (1), however, needs to be calculated with an unsigned widening
1184 multiplication. If this operation is not directly supported we
1185 try using a signed widening multiplication and adjust the result.
1186 This adjustment works as follows:
1188 If both operands are positive then no adjustment is needed.
1190 If the operands have different signs, for example op0_low < 0 and
1191 op1_low >= 0, the instruction treats the most significant bit of
1192 op0_low as a sign bit instead of a bit with significance
1193 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1194 with 2**BITS_PER_WORD - op0_low, and two's complements the
1195 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1198 Similarly, if both operands are negative, we need to add
1199 (op0_low + op1_low) * 2**BITS_PER_WORD.
1201 We use a trick to adjust quickly. We logically shift op0_low right
1202 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1203 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1204 logical shift exists, we do an arithmetic right shift and subtract
1208 expand_doubleword_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
1209 bool umulp, enum optab_methods methods)
1211 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
1212 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
1213 rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
1214 rtx product, adjust, product_high, temp;
1216 rtx op0_high = operand_subword_force (op0, high, mode);
1217 rtx op0_low = operand_subword_force (op0, low, mode);
1218 rtx op1_high = operand_subword_force (op1, high, mode);
1219 rtx op1_low = operand_subword_force (op1, low, mode);
1221 /* If we're using an unsigned multiply to directly compute the product
1222 of the low-order words of the operands and perform any required
1223 adjustments of the operands, we begin by trying two more multiplications
1224 and then computing the appropriate sum.
1226 We have checked above that the required addition is provided.
1227 Full-word addition will normally always succeed, especially if
1228 it is provided at all, so we don't worry about its failure. The
1229 multiplication may well fail, however, so we do handle that. */
1233 /* ??? This could be done with emit_store_flag where available. */
1234 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
1235 NULL_RTX, 1, methods);
1237 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
1238 NULL_RTX, 0, OPTAB_DIRECT);
1241 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
1242 NULL_RTX, 0, methods);
1245 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
1246 NULL_RTX, 0, OPTAB_DIRECT);
1253 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
1254 NULL_RTX, 0, OPTAB_DIRECT);
1258 /* OP0_HIGH should now be dead. */
1262 /* ??? This could be done with emit_store_flag where available. */
1263 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
1264 NULL_RTX, 1, methods);
1266 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
1267 NULL_RTX, 0, OPTAB_DIRECT);
1270 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
1271 NULL_RTX, 0, methods);
1274 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
1275 NULL_RTX, 0, OPTAB_DIRECT);
1282 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
1283 NULL_RTX, 0, OPTAB_DIRECT);
1287 /* OP1_HIGH should now be dead. */
1289 adjust = expand_binop (word_mode, add_optab, adjust, temp,
1290 adjust, 0, OPTAB_DIRECT);
1292 if (target && !REG_P (target))
1296 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
1297 target, 1, OPTAB_DIRECT);
1299 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
1300 target, 1, OPTAB_DIRECT);
1305 product_high = operand_subword (product, high, 1, mode);
1306 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
1307 REG_P (product_high) ? product_high : adjust,
1309 emit_move_insn (product_high, adjust);
1313 /* Wrapper around expand_binop which takes an rtx code to specify
1314 the operation to perform, not an optab pointer. All other
1315 arguments are the same. */
1317 expand_simple_binop (enum machine_mode mode, enum rtx_code code, rtx op0,
1318 rtx op1, rtx target, int unsignedp,
1319 enum optab_methods methods)
1321 optab binop = code_to_optab[(int) code];
1324 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
1327 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1328 binop. Order them according to commutative_operand_precedence and, if
1329 possible, try to put TARGET or a pseudo first. */
1331 swap_commutative_operands_with_target (rtx target, rtx op0, rtx op1)
1333 int op0_prec = commutative_operand_precedence (op0);
1334 int op1_prec = commutative_operand_precedence (op1);
1336 if (op0_prec < op1_prec)
1339 if (op0_prec > op1_prec)
1342 /* With equal precedence, both orders are ok, but it is better if the
1343 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
1344 if (target == 0 || REG_P (target))
1345 return (REG_P (op1) && !REG_P (op0)) || target == op1;
1347 return rtx_equal_p (op1, target);
1350 /* Return true if BINOPTAB implements a shift operation. */
1353 shift_optab_p (optab binoptab)
1355 switch (binoptab->code)
1371 /* Return true if BINOPTAB implements a commutative binary operation. */
1374 commutative_optab_p (optab binoptab)
1376 return (GET_RTX_CLASS (binoptab->code) == RTX_COMM_ARITH
1377 || binoptab == smul_widen_optab
1378 || binoptab == umul_widen_optab
1379 || binoptab == smul_highpart_optab
1380 || binoptab == umul_highpart_optab);
1383 /* X is to be used in mode MODE as an operand to BINOPTAB. If we're
1384 optimizing, and if the operand is a constant that costs more than
1385 1 instruction, force the constant into a register and return that
1386 register. Return X otherwise. UNSIGNEDP says whether X is unsigned. */
1389 avoid_expensive_constant (enum machine_mode mode, optab binoptab,
1390 rtx x, bool unsignedp)
1392 bool speed = optimize_insn_for_speed_p ();
1394 if (mode != VOIDmode
1397 && rtx_cost (x, binoptab->code, speed) > rtx_cost (x, SET, speed))
1399 if (CONST_INT_P (x))
1401 HOST_WIDE_INT intval = trunc_int_for_mode (INTVAL (x), mode);
1402 if (intval != INTVAL (x))
1403 x = GEN_INT (intval);
1406 x = convert_modes (mode, VOIDmode, x, unsignedp);
1407 x = force_reg (mode, x);
1412 /* Helper function for expand_binop: handle the case where there
1413 is an insn that directly implements the indicated operation.
1414 Returns null if this is not possible. */
1416 expand_binop_directly (enum machine_mode mode, optab binoptab,
1418 rtx target, int unsignedp, enum optab_methods methods,
1421 int icode = (int) optab_handler (binoptab, mode)->insn_code;
1422 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
1423 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
1424 enum machine_mode tmp_mode;
1427 rtx xop0 = op0, xop1 = op1;
1434 temp = gen_reg_rtx (mode);
1436 /* If it is a commutative operator and the modes would match
1437 if we would swap the operands, we can save the conversions. */
1438 commutative_p = commutative_optab_p (binoptab);
1440 && GET_MODE (xop0) != mode0 && GET_MODE (xop1) != mode1
1441 && GET_MODE (xop0) == mode1 && GET_MODE (xop1) == mode1)
1448 /* If we are optimizing, force expensive constants into a register. */
1449 xop0 = avoid_expensive_constant (mode0, binoptab, xop0, unsignedp);
1450 if (!shift_optab_p (binoptab))
1451 xop1 = avoid_expensive_constant (mode1, binoptab, xop1, unsignedp);
1453 /* In case the insn wants input operands in modes different from
1454 those of the actual operands, convert the operands. It would
1455 seem that we don't need to convert CONST_INTs, but we do, so
1456 that they're properly zero-extended, sign-extended or truncated
1459 if (GET_MODE (xop0) != mode0 && mode0 != VOIDmode)
1460 xop0 = convert_modes (mode0,
1461 GET_MODE (xop0) != VOIDmode
1466 if (GET_MODE (xop1) != mode1 && mode1 != VOIDmode)
1467 xop1 = convert_modes (mode1,
1468 GET_MODE (xop1) != VOIDmode
1473 /* If operation is commutative,
1474 try to make the first operand a register.
1475 Even better, try to make it the same as the target.
1476 Also try to make the last operand a constant. */
1478 && swap_commutative_operands_with_target (target, xop0, xop1))
1485 /* Now, if insn's predicates don't allow our operands, put them into
1488 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
1489 && mode0 != VOIDmode)
1490 xop0 = copy_to_mode_reg (mode0, xop0);
1492 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
1493 && mode1 != VOIDmode)
1494 xop1 = copy_to_mode_reg (mode1, xop1);
1496 if (binoptab == vec_pack_trunc_optab
1497 || binoptab == vec_pack_usat_optab
1498 || binoptab == vec_pack_ssat_optab
1499 || binoptab == vec_pack_ufix_trunc_optab
1500 || binoptab == vec_pack_sfix_trunc_optab)
1502 /* The mode of the result is different then the mode of the
1504 tmp_mode = insn_data[icode].operand[0].mode;
1505 if (GET_MODE_NUNITS (tmp_mode) != 2 * GET_MODE_NUNITS (mode))
1511 if (!insn_data[icode].operand[0].predicate (temp, tmp_mode))
1512 temp = gen_reg_rtx (tmp_mode);
1514 pat = GEN_FCN (icode) (temp, xop0, xop1);
1517 /* If PAT is composed of more than one insn, try to add an appropriate
1518 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1519 operand, call expand_binop again, this time without a target. */
1520 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1521 && ! add_equal_note (pat, temp, binoptab->code, xop0, xop1))
1523 delete_insns_since (last);
1524 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1525 unsignedp, methods);
1532 delete_insns_since (last);
1536 /* Generate code to perform an operation specified by BINOPTAB
1537 on operands OP0 and OP1, with result having machine-mode MODE.
1539 UNSIGNEDP is for the case where we have to widen the operands
1540 to perform the operation. It says to use zero-extension.
1542 If TARGET is nonzero, the value
1543 is generated there, if it is convenient to do so.
1544 In all cases an rtx is returned for the locus of the value;
1545 this may or may not be TARGET. */
1548 expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1,
1549 rtx target, int unsignedp, enum optab_methods methods)
1551 enum optab_methods next_methods
1552 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1553 ? OPTAB_WIDEN : methods);
1554 enum mode_class mclass;
1555 enum machine_mode wider_mode;
1558 rtx entry_last = get_last_insn ();
1561 mclass = GET_MODE_CLASS (mode);
1563 /* If subtracting an integer constant, convert this into an addition of
1564 the negated constant. */
1566 if (binoptab == sub_optab && CONST_INT_P (op1))
1568 op1 = negate_rtx (mode, op1);
1569 binoptab = add_optab;
1572 /* Record where to delete back to if we backtrack. */
1573 last = get_last_insn ();
1575 /* If we can do it with a three-operand insn, do so. */
1577 if (methods != OPTAB_MUST_WIDEN
1578 && optab_handler (binoptab, mode)->insn_code != CODE_FOR_nothing)
1580 temp = expand_binop_directly (mode, binoptab, op0, op1, target,
1581 unsignedp, methods, last);
1586 /* If we were trying to rotate, and that didn't work, try rotating
1587 the other direction before falling back to shifts and bitwise-or. */
1588 if (((binoptab == rotl_optab
1589 && optab_handler (rotr_optab, mode)->insn_code != CODE_FOR_nothing)
1590 || (binoptab == rotr_optab
1591 && optab_handler (rotl_optab, mode)->insn_code != CODE_FOR_nothing))
1592 && mclass == MODE_INT)
1594 optab otheroptab = (binoptab == rotl_optab ? rotr_optab : rotl_optab);
1596 unsigned int bits = GET_MODE_BITSIZE (mode);
1598 if (CONST_INT_P (op1))
1599 newop1 = GEN_INT (bits - INTVAL (op1));
1600 else if (targetm.shift_truncation_mask (mode) == bits - 1)
1601 newop1 = negate_rtx (GET_MODE (op1), op1);
1603 newop1 = expand_binop (GET_MODE (op1), sub_optab,
1604 GEN_INT (bits), op1,
1605 NULL_RTX, unsignedp, OPTAB_DIRECT);
1607 temp = expand_binop_directly (mode, otheroptab, op0, newop1,
1608 target, unsignedp, methods, last);
1613 /* If this is a multiply, see if we can do a widening operation that
1614 takes operands of this mode and makes a wider mode. */
1616 if (binoptab == smul_optab
1617 && GET_MODE_WIDER_MODE (mode) != VOIDmode
1618 && ((optab_handler ((unsignedp ? umul_widen_optab : smul_widen_optab),
1619 GET_MODE_WIDER_MODE (mode))->insn_code)
1620 != CODE_FOR_nothing))
1622 temp = expand_binop (GET_MODE_WIDER_MODE (mode),
1623 unsignedp ? umul_widen_optab : smul_widen_optab,
1624 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1628 if (GET_MODE_CLASS (mode) == MODE_INT
1629 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1630 GET_MODE_BITSIZE (GET_MODE (temp))))
1631 return gen_lowpart (mode, temp);
1633 return convert_to_mode (mode, temp, unsignedp);
1637 /* Look for a wider mode of the same class for which we think we
1638 can open-code the operation. Check for a widening multiply at the
1639 wider mode as well. */
1641 if (CLASS_HAS_WIDER_MODES_P (mclass)
1642 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1643 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1644 wider_mode != VOIDmode;
1645 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1647 if (optab_handler (binoptab, wider_mode)->insn_code != CODE_FOR_nothing
1648 || (binoptab == smul_optab
1649 && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
1650 && ((optab_handler ((unsignedp ? umul_widen_optab
1651 : smul_widen_optab),
1652 GET_MODE_WIDER_MODE (wider_mode))->insn_code)
1653 != CODE_FOR_nothing)))
1655 rtx xop0 = op0, xop1 = op1;
1658 /* For certain integer operations, we need not actually extend
1659 the narrow operands, as long as we will truncate
1660 the results to the same narrowness. */
1662 if ((binoptab == ior_optab || binoptab == and_optab
1663 || binoptab == xor_optab
1664 || binoptab == add_optab || binoptab == sub_optab
1665 || binoptab == smul_optab || binoptab == ashl_optab)
1666 && mclass == MODE_INT)
1669 xop0 = avoid_expensive_constant (mode, binoptab,
1671 if (binoptab != ashl_optab)
1672 xop1 = avoid_expensive_constant (mode, binoptab,
1676 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1678 /* The second operand of a shift must always be extended. */
1679 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1680 no_extend && binoptab != ashl_optab);
1682 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1683 unsignedp, OPTAB_DIRECT);
1686 if (mclass != MODE_INT
1687 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1688 GET_MODE_BITSIZE (wider_mode)))
1691 target = gen_reg_rtx (mode);
1692 convert_move (target, temp, 0);
1696 return gen_lowpart (mode, temp);
1699 delete_insns_since (last);
1703 /* If operation is commutative,
1704 try to make the first operand a register.
1705 Even better, try to make it the same as the target.
1706 Also try to make the last operand a constant. */
1707 if (commutative_optab_p (binoptab)
1708 && swap_commutative_operands_with_target (target, op0, op1))
1715 /* These can be done a word at a time. */
1716 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1717 && mclass == MODE_INT
1718 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
1719 && optab_handler (binoptab, word_mode)->insn_code != CODE_FOR_nothing)
1724 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1725 won't be accurate, so use a new target. */
1726 if (target == 0 || target == op0 || target == op1)
1727 target = gen_reg_rtx (mode);
1731 /* Do the actual arithmetic. */
1732 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
1734 rtx target_piece = operand_subword (target, i, 1, mode);
1735 rtx x = expand_binop (word_mode, binoptab,
1736 operand_subword_force (op0, i, mode),
1737 operand_subword_force (op1, i, mode),
1738 target_piece, unsignedp, next_methods);
1743 if (target_piece != x)
1744 emit_move_insn (target_piece, x);
1747 insns = get_insns ();
1750 if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
1757 /* Synthesize double word shifts from single word shifts. */
1758 if ((binoptab == lshr_optab || binoptab == ashl_optab
1759 || binoptab == ashr_optab)
1760 && mclass == MODE_INT
1761 && (CONST_INT_P (op1) || optimize_insn_for_speed_p ())
1762 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1763 && optab_handler (binoptab, word_mode)->insn_code != CODE_FOR_nothing
1764 && optab_handler (ashl_optab, word_mode)->insn_code != CODE_FOR_nothing
1765 && optab_handler (lshr_optab, word_mode)->insn_code != CODE_FOR_nothing)
1767 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1768 enum machine_mode op1_mode;
1770 double_shift_mask = targetm.shift_truncation_mask (mode);
1771 shift_mask = targetm.shift_truncation_mask (word_mode);
1772 op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode;
1774 /* Apply the truncation to constant shifts. */
1775 if (double_shift_mask > 0 && CONST_INT_P (op1))
1776 op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
1778 if (op1 == CONST0_RTX (op1_mode))
1781 /* Make sure that this is a combination that expand_doubleword_shift
1782 can handle. See the comments there for details. */
1783 if (double_shift_mask == 0
1784 || (shift_mask == BITS_PER_WORD - 1
1785 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1788 rtx into_target, outof_target;
1789 rtx into_input, outof_input;
1790 int left_shift, outof_word;
1792 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1793 won't be accurate, so use a new target. */
1794 if (target == 0 || target == op0 || target == op1)
1795 target = gen_reg_rtx (mode);
1799 /* OUTOF_* is the word we are shifting bits away from, and
1800 INTO_* is the word that we are shifting bits towards, thus
1801 they differ depending on the direction of the shift and
1802 WORDS_BIG_ENDIAN. */
1804 left_shift = binoptab == ashl_optab;
1805 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1807 outof_target = operand_subword (target, outof_word, 1, mode);
1808 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1810 outof_input = operand_subword_force (op0, outof_word, mode);
1811 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1813 if (expand_doubleword_shift (op1_mode, binoptab,
1814 outof_input, into_input, op1,
1815 outof_target, into_target,
1816 unsignedp, next_methods, shift_mask))
1818 insns = get_insns ();
1828 /* Synthesize double word rotates from single word shifts. */
1829 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1830 && mclass == MODE_INT
1831 && CONST_INT_P (op1)
1832 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1833 && optab_handler (ashl_optab, word_mode)->insn_code != CODE_FOR_nothing
1834 && optab_handler (lshr_optab, word_mode)->insn_code != CODE_FOR_nothing)
1837 rtx into_target, outof_target;
1838 rtx into_input, outof_input;
1840 int shift_count, left_shift, outof_word;
1842 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1843 won't be accurate, so use a new target. Do this also if target is not
1844 a REG, first because having a register instead may open optimization
1845 opportunities, and second because if target and op0 happen to be MEMs
1846 designating the same location, we would risk clobbering it too early
1847 in the code sequence we generate below. */
1848 if (target == 0 || target == op0 || target == op1 || ! REG_P (target))
1849 target = gen_reg_rtx (mode);
1853 shift_count = INTVAL (op1);
1855 /* OUTOF_* is the word we are shifting bits away from, and
1856 INTO_* is the word that we are shifting bits towards, thus
1857 they differ depending on the direction of the shift and
1858 WORDS_BIG_ENDIAN. */
1860 left_shift = (binoptab == rotl_optab);
1861 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1863 outof_target = operand_subword (target, outof_word, 1, mode);
1864 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1866 outof_input = operand_subword_force (op0, outof_word, mode);
1867 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1869 if (shift_count == BITS_PER_WORD)
1871 /* This is just a word swap. */
1872 emit_move_insn (outof_target, into_input);
1873 emit_move_insn (into_target, outof_input);
1878 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1879 rtx first_shift_count, second_shift_count;
1880 optab reverse_unsigned_shift, unsigned_shift;
1882 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1883 ? lshr_optab : ashl_optab);
1885 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1886 ? ashl_optab : lshr_optab);
1888 if (shift_count > BITS_PER_WORD)
1890 first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1891 second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1895 first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1896 second_shift_count = GEN_INT (shift_count);
1899 into_temp1 = expand_binop (word_mode, unsigned_shift,
1900 outof_input, first_shift_count,
1901 NULL_RTX, unsignedp, next_methods);
1902 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1903 into_input, second_shift_count,
1904 NULL_RTX, unsignedp, next_methods);
1906 if (into_temp1 != 0 && into_temp2 != 0)
1907 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1908 into_target, unsignedp, next_methods);
1912 if (inter != 0 && inter != into_target)
1913 emit_move_insn (into_target, inter);
1915 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1916 into_input, first_shift_count,
1917 NULL_RTX, unsignedp, next_methods);
1918 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1919 outof_input, second_shift_count,
1920 NULL_RTX, unsignedp, next_methods);
1922 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1923 inter = expand_binop (word_mode, ior_optab,
1924 outof_temp1, outof_temp2,
1925 outof_target, unsignedp, next_methods);
1927 if (inter != 0 && inter != outof_target)
1928 emit_move_insn (outof_target, inter);
1931 insns = get_insns ();
1941 /* These can be done a word at a time by propagating carries. */
1942 if ((binoptab == add_optab || binoptab == sub_optab)
1943 && mclass == MODE_INT
1944 && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
1945 && optab_handler (binoptab, word_mode)->insn_code != CODE_FOR_nothing)
1948 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1949 const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
1950 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1951 rtx xop0, xop1, xtarget;
1953 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1954 value is one of those, use it. Otherwise, use 1 since it is the
1955 one easiest to get. */
1956 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1957 int normalizep = STORE_FLAG_VALUE;
1962 /* Prepare the operands. */
1963 xop0 = force_reg (mode, op0);
1964 xop1 = force_reg (mode, op1);
1966 xtarget = gen_reg_rtx (mode);
1968 if (target == 0 || !REG_P (target))
1971 /* Indicate for flow that the entire target reg is being set. */
1973 emit_clobber (xtarget);
1975 /* Do the actual arithmetic. */
1976 for (i = 0; i < nwords; i++)
1978 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1979 rtx target_piece = operand_subword (xtarget, index, 1, mode);
1980 rtx op0_piece = operand_subword_force (xop0, index, mode);
1981 rtx op1_piece = operand_subword_force (xop1, index, mode);
1984 /* Main add/subtract of the input operands. */
1985 x = expand_binop (word_mode, binoptab,
1986 op0_piece, op1_piece,
1987 target_piece, unsignedp, next_methods);
1993 /* Store carry from main add/subtract. */
1994 carry_out = gen_reg_rtx (word_mode);
1995 carry_out = emit_store_flag_force (carry_out,
1996 (binoptab == add_optab
1999 word_mode, 1, normalizep);
2006 /* Add/subtract previous carry to main result. */
2007 newx = expand_binop (word_mode,
2008 normalizep == 1 ? binoptab : otheroptab,
2010 NULL_RTX, 1, next_methods);
2014 /* Get out carry from adding/subtracting carry in. */
2015 rtx carry_tmp = gen_reg_rtx (word_mode);
2016 carry_tmp = emit_store_flag_force (carry_tmp,
2017 (binoptab == add_optab
2020 word_mode, 1, normalizep);
2022 /* Logical-ior the two poss. carry together. */
2023 carry_out = expand_binop (word_mode, ior_optab,
2024 carry_out, carry_tmp,
2025 carry_out, 0, next_methods);
2029 emit_move_insn (target_piece, newx);
2033 if (x != target_piece)
2034 emit_move_insn (target_piece, x);
2037 carry_in = carry_out;
2040 if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
2042 if (optab_handler (mov_optab, mode)->insn_code != CODE_FOR_nothing
2043 || ! rtx_equal_p (target, xtarget))
2045 rtx temp = emit_move_insn (target, xtarget);
2047 set_unique_reg_note (temp,
2049 gen_rtx_fmt_ee (binoptab->code, mode,
2060 delete_insns_since (last);
2063 /* Attempt to synthesize double word multiplies using a sequence of word
2064 mode multiplications. We first attempt to generate a sequence using a
2065 more efficient unsigned widening multiply, and if that fails we then
2066 try using a signed widening multiply. */
2068 if (binoptab == smul_optab
2069 && mclass == MODE_INT
2070 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
2071 && optab_handler (smul_optab, word_mode)->insn_code != CODE_FOR_nothing
2072 && optab_handler (add_optab, word_mode)->insn_code != CODE_FOR_nothing)
2074 rtx product = NULL_RTX;
2076 if (optab_handler (umul_widen_optab, mode)->insn_code
2077 != CODE_FOR_nothing)
2079 product = expand_doubleword_mult (mode, op0, op1, target,
2082 delete_insns_since (last);
2085 if (product == NULL_RTX
2086 && optab_handler (smul_widen_optab, mode)->insn_code
2087 != CODE_FOR_nothing)
2089 product = expand_doubleword_mult (mode, op0, op1, target,
2092 delete_insns_since (last);
2095 if (product != NULL_RTX)
2097 if (optab_handler (mov_optab, mode)->insn_code != CODE_FOR_nothing)
2099 temp = emit_move_insn (target ? target : product, product);
2100 set_unique_reg_note (temp,
2102 gen_rtx_fmt_ee (MULT, mode,
2110 /* It can't be open-coded in this mode.
2111 Use a library call if one is available and caller says that's ok. */
2113 libfunc = optab_libfunc (binoptab, mode);
2115 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
2119 enum machine_mode op1_mode = mode;
2124 if (shift_optab_p (binoptab))
2126 op1_mode = targetm.libgcc_shift_count_mode ();
2127 /* Specify unsigned here,
2128 since negative shift counts are meaningless. */
2129 op1x = convert_to_mode (op1_mode, op1, 1);
2132 if (GET_MODE (op0) != VOIDmode
2133 && GET_MODE (op0) != mode)
2134 op0 = convert_to_mode (mode, op0, unsignedp);
2136 /* Pass 1 for NO_QUEUE so we don't lose any increments
2137 if the libcall is cse'd or moved. */
2138 value = emit_library_call_value (libfunc,
2139 NULL_RTX, LCT_CONST, mode, 2,
2140 op0, mode, op1x, op1_mode);
2142 insns = get_insns ();
2145 target = gen_reg_rtx (mode);
2146 emit_libcall_block (insns, target, value,
2147 gen_rtx_fmt_ee (binoptab->code, mode, op0, op1));
2152 delete_insns_since (last);
2154 /* It can't be done in this mode. Can we do it in a wider mode? */
2156 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
2157 || methods == OPTAB_MUST_WIDEN))
2159 /* Caller says, don't even try. */
2160 delete_insns_since (entry_last);
2164 /* Compute the value of METHODS to pass to recursive calls.
2165 Don't allow widening to be tried recursively. */
2167 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
2169 /* Look for a wider mode of the same class for which it appears we can do
2172 if (CLASS_HAS_WIDER_MODES_P (mclass))
2174 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2175 wider_mode != VOIDmode;
2176 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2178 if ((optab_handler (binoptab, wider_mode)->insn_code
2179 != CODE_FOR_nothing)
2180 || (methods == OPTAB_LIB
2181 && optab_libfunc (binoptab, wider_mode)))
2183 rtx xop0 = op0, xop1 = op1;
2186 /* For certain integer operations, we need not actually extend
2187 the narrow operands, as long as we will truncate
2188 the results to the same narrowness. */
2190 if ((binoptab == ior_optab || binoptab == and_optab
2191 || binoptab == xor_optab
2192 || binoptab == add_optab || binoptab == sub_optab
2193 || binoptab == smul_optab || binoptab == ashl_optab)
2194 && mclass == MODE_INT)
2197 xop0 = widen_operand (xop0, wider_mode, mode,
2198 unsignedp, no_extend);
2200 /* The second operand of a shift must always be extended. */
2201 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
2202 no_extend && binoptab != ashl_optab);
2204 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
2205 unsignedp, methods);
2208 if (mclass != MODE_INT
2209 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
2210 GET_MODE_BITSIZE (wider_mode)))
2213 target = gen_reg_rtx (mode);
2214 convert_move (target, temp, 0);
2218 return gen_lowpart (mode, temp);
2221 delete_insns_since (last);
2226 delete_insns_since (entry_last);
2230 /* Expand a binary operator which has both signed and unsigned forms.
2231 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2234 If we widen unsigned operands, we may use a signed wider operation instead
2235 of an unsigned wider operation, since the result would be the same. */
2238 sign_expand_binop (enum machine_mode mode, optab uoptab, optab soptab,
2239 rtx op0, rtx op1, rtx target, int unsignedp,
2240 enum optab_methods methods)
2243 optab direct_optab = unsignedp ? uoptab : soptab;
2244 struct optab_d wide_soptab;
2246 /* Do it without widening, if possible. */
2247 temp = expand_binop (mode, direct_optab, op0, op1, target,
2248 unsignedp, OPTAB_DIRECT);
2249 if (temp || methods == OPTAB_DIRECT)
2252 /* Try widening to a signed int. Make a fake signed optab that
2253 hides any signed insn for direct use. */
2254 wide_soptab = *soptab;
2255 optab_handler (&wide_soptab, mode)->insn_code = CODE_FOR_nothing;
2256 /* We don't want to generate new hash table entries from this fake
2258 wide_soptab.libcall_gen = NULL;
2260 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2261 unsignedp, OPTAB_WIDEN);
2263 /* For unsigned operands, try widening to an unsigned int. */
2264 if (temp == 0 && unsignedp)
2265 temp = expand_binop (mode, uoptab, op0, op1, target,
2266 unsignedp, OPTAB_WIDEN);
2267 if (temp || methods == OPTAB_WIDEN)
2270 /* Use the right width libcall if that exists. */
2271 temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_LIB);
2272 if (temp || methods == OPTAB_LIB)
2275 /* Must widen and use a libcall, use either signed or unsigned. */
2276 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2277 unsignedp, methods);
2281 return expand_binop (mode, uoptab, op0, op1, target,
2282 unsignedp, methods);
2286 /* Generate code to perform an operation specified by UNOPPTAB
2287 on operand OP0, with two results to TARG0 and TARG1.
2288 We assume that the order of the operands for the instruction
2289 is TARG0, TARG1, OP0.
2291 Either TARG0 or TARG1 may be zero, but what that means is that
2292 the result is not actually wanted. We will generate it into
2293 a dummy pseudo-reg and discard it. They may not both be zero.
2295 Returns 1 if this operation can be performed; 0 if not. */
2298 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
2301 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2302 enum mode_class mclass;
2303 enum machine_mode wider_mode;
2304 rtx entry_last = get_last_insn ();
2307 mclass = GET_MODE_CLASS (mode);
2310 targ0 = gen_reg_rtx (mode);
2312 targ1 = gen_reg_rtx (mode);
2314 /* Record where to go back to if we fail. */
2315 last = get_last_insn ();
2317 if (optab_handler (unoptab, mode)->insn_code != CODE_FOR_nothing)
2319 int icode = (int) optab_handler (unoptab, mode)->insn_code;
2320 enum machine_mode mode0 = insn_data[icode].operand[2].mode;
2324 if (GET_MODE (xop0) != VOIDmode
2325 && GET_MODE (xop0) != mode0)
2326 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2328 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2329 if (!insn_data[icode].operand[2].predicate (xop0, mode0))
2330 xop0 = copy_to_mode_reg (mode0, xop0);
2332 /* We could handle this, but we should always be called with a pseudo
2333 for our targets and all insns should take them as outputs. */
2334 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2335 gcc_assert (insn_data[icode].operand[1].predicate (targ1, mode));
2337 pat = GEN_FCN (icode) (targ0, targ1, xop0);
2344 delete_insns_since (last);
2347 /* It can't be done in this mode. Can we do it in a wider mode? */
2349 if (CLASS_HAS_WIDER_MODES_P (mclass))
2351 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2352 wider_mode != VOIDmode;
2353 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2355 if (optab_handler (unoptab, wider_mode)->insn_code
2356 != CODE_FOR_nothing)
2358 rtx t0 = gen_reg_rtx (wider_mode);
2359 rtx t1 = gen_reg_rtx (wider_mode);
2360 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2362 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
2364 convert_move (targ0, t0, unsignedp);
2365 convert_move (targ1, t1, unsignedp);
2369 delete_insns_since (last);
2374 delete_insns_since (entry_last);
2378 /* Generate code to perform an operation specified by BINOPTAB
2379 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2380 We assume that the order of the operands for the instruction
2381 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2382 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2384 Either TARG0 or TARG1 may be zero, but what that means is that
2385 the result is not actually wanted. We will generate it into
2386 a dummy pseudo-reg and discard it. They may not both be zero.
2388 Returns 1 if this operation can be performed; 0 if not. */
2391 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
2394 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2395 enum mode_class mclass;
2396 enum machine_mode wider_mode;
2397 rtx entry_last = get_last_insn ();
2400 mclass = GET_MODE_CLASS (mode);
2403 targ0 = gen_reg_rtx (mode);
2405 targ1 = gen_reg_rtx (mode);
2407 /* Record where to go back to if we fail. */
2408 last = get_last_insn ();
2410 if (optab_handler (binoptab, mode)->insn_code != CODE_FOR_nothing)
2412 int icode = (int) optab_handler (binoptab, mode)->insn_code;
2413 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2414 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
2416 rtx xop0 = op0, xop1 = op1;
2418 /* If we are optimizing, force expensive constants into a register. */
2419 xop0 = avoid_expensive_constant (mode0, binoptab, xop0, unsignedp);
2420 xop1 = avoid_expensive_constant (mode1, binoptab, xop1, unsignedp);
2422 /* In case the insn wants input operands in modes different from
2423 those of the actual operands, convert the operands. It would
2424 seem that we don't need to convert CONST_INTs, but we do, so
2425 that they're properly zero-extended, sign-extended or truncated
2428 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
2429 xop0 = convert_modes (mode0,
2430 GET_MODE (op0) != VOIDmode
2435 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
2436 xop1 = convert_modes (mode1,
2437 GET_MODE (op1) != VOIDmode
2442 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2443 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2444 xop0 = copy_to_mode_reg (mode0, xop0);
2446 if (!insn_data[icode].operand[2].predicate (xop1, mode1))
2447 xop1 = copy_to_mode_reg (mode1, xop1);
2449 /* We could handle this, but we should always be called with a pseudo
2450 for our targets and all insns should take them as outputs. */
2451 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2452 gcc_assert (insn_data[icode].operand[3].predicate (targ1, mode));
2454 pat = GEN_FCN (icode) (targ0, xop0, xop1, targ1);
2461 delete_insns_since (last);
2464 /* It can't be done in this mode. Can we do it in a wider mode? */
2466 if (CLASS_HAS_WIDER_MODES_P (mclass))
2468 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2469 wider_mode != VOIDmode;
2470 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2472 if (optab_handler (binoptab, wider_mode)->insn_code
2473 != CODE_FOR_nothing)
2475 rtx t0 = gen_reg_rtx (wider_mode);
2476 rtx t1 = gen_reg_rtx (wider_mode);
2477 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2478 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2480 if (expand_twoval_binop (binoptab, cop0, cop1,
2483 convert_move (targ0, t0, unsignedp);
2484 convert_move (targ1, t1, unsignedp);
2488 delete_insns_since (last);
2493 delete_insns_since (entry_last);
2497 /* Expand the two-valued library call indicated by BINOPTAB, but
2498 preserve only one of the values. If TARG0 is non-NULL, the first
2499 value is placed into TARG0; otherwise the second value is placed
2500 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2501 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2502 This routine assumes that the value returned by the library call is
2503 as if the return value was of an integral mode twice as wide as the
2504 mode of OP0. Returns 1 if the call was successful. */
2507 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2508 rtx targ0, rtx targ1, enum rtx_code code)
2510 enum machine_mode mode;
2511 enum machine_mode libval_mode;
2516 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2517 gcc_assert (!targ0 != !targ1);
2519 mode = GET_MODE (op0);
2520 libfunc = optab_libfunc (binoptab, mode);
2524 /* The value returned by the library function will have twice as
2525 many bits as the nominal MODE. */
2526 libval_mode = smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode),
2529 libval = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
2533 /* Get the part of VAL containing the value that we want. */
2534 libval = simplify_gen_subreg (mode, libval, libval_mode,
2535 targ0 ? 0 : GET_MODE_SIZE (mode));
2536 insns = get_insns ();
2538 /* Move the into the desired location. */
2539 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2540 gen_rtx_fmt_ee (code, mode, op0, op1));
2546 /* Wrapper around expand_unop which takes an rtx code to specify
2547 the operation to perform, not an optab pointer. All other
2548 arguments are the same. */
2550 expand_simple_unop (enum machine_mode mode, enum rtx_code code, rtx op0,
2551 rtx target, int unsignedp)
2553 optab unop = code_to_optab[(int) code];
2556 return expand_unop (mode, unop, op0, target, unsignedp);
2562 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2564 widen_clz (enum machine_mode mode, rtx op0, rtx target)
2566 enum mode_class mclass = GET_MODE_CLASS (mode);
2567 if (CLASS_HAS_WIDER_MODES_P (mclass))
2569 enum machine_mode wider_mode;
2570 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2571 wider_mode != VOIDmode;
2572 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2574 if (optab_handler (clz_optab, wider_mode)->insn_code
2575 != CODE_FOR_nothing)
2577 rtx xop0, temp, last;
2579 last = get_last_insn ();
2582 target = gen_reg_rtx (mode);
2583 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2584 temp = expand_unop (wider_mode, clz_optab, xop0, NULL_RTX, true);
2586 temp = expand_binop (wider_mode, sub_optab, temp,
2587 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2588 - GET_MODE_BITSIZE (mode)),
2589 target, true, OPTAB_DIRECT);
2591 delete_insns_since (last);
2600 /* Try calculating clz of a double-word quantity as two clz's of word-sized
2601 quantities, choosing which based on whether the high word is nonzero. */
2603 expand_doubleword_clz (enum machine_mode mode, rtx op0, rtx target)
2605 rtx xop0 = force_reg (mode, op0);
2606 rtx subhi = gen_highpart (word_mode, xop0);
2607 rtx sublo = gen_lowpart (word_mode, xop0);
2608 rtx hi0_label = gen_label_rtx ();
2609 rtx after_label = gen_label_rtx ();
2610 rtx seq, temp, result;
2612 /* If we were not given a target, use a word_mode register, not a
2613 'mode' register. The result will fit, and nobody is expecting
2614 anything bigger (the return type of __builtin_clz* is int). */
2616 target = gen_reg_rtx (word_mode);
2618 /* In any case, write to a word_mode scratch in both branches of the
2619 conditional, so we can ensure there is a single move insn setting
2620 'target' to tag a REG_EQUAL note on. */
2621 result = gen_reg_rtx (word_mode);
2625 /* If the high word is not equal to zero,
2626 then clz of the full value is clz of the high word. */
2627 emit_cmp_and_jump_insns (subhi, CONST0_RTX (word_mode), EQ, 0,
2628 word_mode, true, hi0_label);
2630 temp = expand_unop_direct (word_mode, clz_optab, subhi, result, true);
2635 convert_move (result, temp, true);
2637 emit_jump_insn (gen_jump (after_label));
2640 /* Else clz of the full value is clz of the low word plus the number
2641 of bits in the high word. */
2642 emit_label (hi0_label);
2644 temp = expand_unop_direct (word_mode, clz_optab, sublo, 0, true);
2647 temp = expand_binop (word_mode, add_optab, temp,
2648 GEN_INT (GET_MODE_BITSIZE (word_mode)),
2649 result, true, OPTAB_DIRECT);
2653 convert_move (result, temp, true);
2655 emit_label (after_label);
2656 convert_move (target, result, true);
2661 add_equal_note (seq, target, CLZ, xop0, 0);
2673 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2675 widen_bswap (enum machine_mode mode, rtx op0, rtx target)
2677 enum mode_class mclass = GET_MODE_CLASS (mode);
2678 enum machine_mode wider_mode;
2681 if (!CLASS_HAS_WIDER_MODES_P (mclass))
2684 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2685 wider_mode != VOIDmode;
2686 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2687 if (optab_handler (bswap_optab, wider_mode)->insn_code != CODE_FOR_nothing)
2692 last = get_last_insn ();
2694 x = widen_operand (op0, wider_mode, mode, true, true);
2695 x = expand_unop (wider_mode, bswap_optab, x, NULL_RTX, true);
2698 x = expand_shift (RSHIFT_EXPR, wider_mode, x,
2699 size_int (GET_MODE_BITSIZE (wider_mode)
2700 - GET_MODE_BITSIZE (mode)),
2706 target = gen_reg_rtx (mode);
2707 emit_move_insn (target, gen_lowpart (mode, x));
2710 delete_insns_since (last);
2715 /* Try calculating bswap as two bswaps of two word-sized operands. */
2718 expand_doubleword_bswap (enum machine_mode mode, rtx op, rtx target)
2722 t1 = expand_unop (word_mode, bswap_optab,
2723 operand_subword_force (op, 0, mode), NULL_RTX, true);
2724 t0 = expand_unop (word_mode, bswap_optab,
2725 operand_subword_force (op, 1, mode), NULL_RTX, true);
2728 target = gen_reg_rtx (mode);
2730 emit_clobber (target);
2731 emit_move_insn (operand_subword (target, 0, 1, mode), t0);
2732 emit_move_insn (operand_subword (target, 1, 1, mode), t1);
2737 /* Try calculating (parity x) as (and (popcount x) 1), where
2738 popcount can also be done in a wider mode. */
2740 expand_parity (enum machine_mode mode, rtx op0, rtx target)
2742 enum mode_class mclass = GET_MODE_CLASS (mode);
2743 if (CLASS_HAS_WIDER_MODES_P (mclass))
2745 enum machine_mode wider_mode;
2746 for (wider_mode = mode; wider_mode != VOIDmode;
2747 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2749 if (optab_handler (popcount_optab, wider_mode)->insn_code
2750 != CODE_FOR_nothing)
2752 rtx xop0, temp, last;
2754 last = get_last_insn ();
2757 target = gen_reg_rtx (mode);
2758 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2759 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2762 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2763 target, true, OPTAB_DIRECT);
2765 delete_insns_since (last);
2774 /* Try calculating ctz(x) as K - clz(x & -x) ,
2775 where K is GET_MODE_BITSIZE(mode) - 1.
2777 Both __builtin_ctz and __builtin_clz are undefined at zero, so we
2778 don't have to worry about what the hardware does in that case. (If
2779 the clz instruction produces the usual value at 0, which is K, the
2780 result of this code sequence will be -1; expand_ffs, below, relies
2781 on this. It might be nice to have it be K instead, for consistency
2782 with the (very few) processors that provide a ctz with a defined
2783 value, but that would take one more instruction, and it would be
2784 less convenient for expand_ffs anyway. */
2787 expand_ctz (enum machine_mode mode, rtx op0, rtx target)
2791 if (optab_handler (clz_optab, mode)->insn_code == CODE_FOR_nothing)
2796 temp = expand_unop_direct (mode, neg_optab, op0, NULL_RTX, true);
2798 temp = expand_binop (mode, and_optab, op0, temp, NULL_RTX,
2799 true, OPTAB_DIRECT);
2801 temp = expand_unop_direct (mode, clz_optab, temp, NULL_RTX, true);
2803 temp = expand_binop (mode, sub_optab, GEN_INT (GET_MODE_BITSIZE (mode) - 1),
2805 true, OPTAB_DIRECT);
2815 add_equal_note (seq, temp, CTZ, op0, 0);
2821 /* Try calculating ffs(x) using ctz(x) if we have that instruction, or
2822 else with the sequence used by expand_clz.
2824 The ffs builtin promises to return zero for a zero value and ctz/clz
2825 may have an undefined value in that case. If they do not give us a
2826 convenient value, we have to generate a test and branch. */
2828 expand_ffs (enum machine_mode mode, rtx op0, rtx target)
2830 HOST_WIDE_INT val = 0;
2831 bool defined_at_zero = false;
2834 if (optab_handler (ctz_optab, mode)->insn_code != CODE_FOR_nothing)
2838 temp = expand_unop_direct (mode, ctz_optab, op0, 0, true);
2842 defined_at_zero = (CTZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2);
2844 else if (optab_handler (clz_optab, mode)->insn_code != CODE_FOR_nothing)
2847 temp = expand_ctz (mode, op0, 0);
2851 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2)
2853 defined_at_zero = true;
2854 val = (GET_MODE_BITSIZE (mode) - 1) - val;
2860 if (defined_at_zero && val == -1)
2861 /* No correction needed at zero. */;
2864 /* We don't try to do anything clever with the situation found
2865 on some processors (eg Alpha) where ctz(0:mode) ==
2866 bitsize(mode). If someone can think of a way to send N to -1
2867 and leave alone all values in the range 0..N-1 (where N is a
2868 power of two), cheaper than this test-and-branch, please add it.
2870 The test-and-branch is done after the operation itself, in case
2871 the operation sets condition codes that can be recycled for this.
2872 (This is true on i386, for instance.) */
2874 rtx nonzero_label = gen_label_rtx ();
2875 emit_cmp_and_jump_insns (op0, CONST0_RTX (mode), NE, 0,
2876 mode, true, nonzero_label);
2878 convert_move (temp, GEN_INT (-1), false);
2879 emit_label (nonzero_label);
2882 /* temp now has a value in the range -1..bitsize-1. ffs is supposed
2883 to produce a value in the range 0..bitsize. */
2884 temp = expand_binop (mode, add_optab, temp, GEN_INT (1),
2885 target, false, OPTAB_DIRECT);
2892 add_equal_note (seq, temp, FFS, op0, 0);
2901 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2902 conditions, VAL may already be a SUBREG against which we cannot generate
2903 a further SUBREG. In this case, we expect forcing the value into a
2904 register will work around the situation. */
2907 lowpart_subreg_maybe_copy (enum machine_mode omode, rtx val,
2908 enum machine_mode imode)
2911 ret = lowpart_subreg (omode, val, imode);
2914 val = force_reg (imode, val);
2915 ret = lowpart_subreg (omode, val, imode);
2916 gcc_assert (ret != NULL);
2921 /* Expand a floating point absolute value or negation operation via a
2922 logical operation on the sign bit. */
2925 expand_absneg_bit (enum rtx_code code, enum machine_mode mode,
2926 rtx op0, rtx target)
2928 const struct real_format *fmt;
2929 int bitpos, word, nwords, i;
2930 enum machine_mode imode;
2931 HOST_WIDE_INT hi, lo;
2934 /* The format has to have a simple sign bit. */
2935 fmt = REAL_MODE_FORMAT (mode);
2939 bitpos = fmt->signbit_rw;
2943 /* Don't create negative zeros if the format doesn't support them. */
2944 if (code == NEG && !fmt->has_signed_zero)
2947 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2949 imode = int_mode_for_mode (mode);
2950 if (imode == BLKmode)
2959 if (FLOAT_WORDS_BIG_ENDIAN)
2960 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2962 word = bitpos / BITS_PER_WORD;
2963 bitpos = bitpos % BITS_PER_WORD;
2964 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2967 if (bitpos < HOST_BITS_PER_WIDE_INT)
2970 lo = (HOST_WIDE_INT) 1 << bitpos;
2974 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2980 if (target == 0 || target == op0)
2981 target = gen_reg_rtx (mode);
2987 for (i = 0; i < nwords; ++i)
2989 rtx targ_piece = operand_subword (target, i, 1, mode);
2990 rtx op0_piece = operand_subword_force (op0, i, mode);
2994 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2996 immed_double_const (lo, hi, imode),
2997 targ_piece, 1, OPTAB_LIB_WIDEN);
2998 if (temp != targ_piece)
2999 emit_move_insn (targ_piece, temp);
3002 emit_move_insn (targ_piece, op0_piece);
3005 insns = get_insns ();
3012 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
3013 gen_lowpart (imode, op0),
3014 immed_double_const (lo, hi, imode),
3015 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3016 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3018 set_unique_reg_note (get_last_insn (), REG_EQUAL,
3019 gen_rtx_fmt_e (code, mode, copy_rtx (op0)));
3025 /* As expand_unop, but will fail rather than attempt the operation in a
3026 different mode or with a libcall. */
3028 expand_unop_direct (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
3031 if (optab_handler (unoptab, mode)->insn_code != CODE_FOR_nothing)
3033 int icode = (int) optab_handler (unoptab, mode)->insn_code;
3034 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
3036 rtx last = get_last_insn ();
3042 temp = gen_reg_rtx (mode);
3044 if (GET_MODE (xop0) != VOIDmode
3045 && GET_MODE (xop0) != mode0)
3046 xop0 = convert_to_mode (mode0, xop0, unsignedp);
3048 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
3050 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
3051 xop0 = copy_to_mode_reg (mode0, xop0);
3053 if (!insn_data[icode].operand[0].predicate (temp, mode))
3054 temp = gen_reg_rtx (mode);
3056 pat = GEN_FCN (icode) (temp, xop0);
3059 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
3060 && ! add_equal_note (pat, temp, unoptab->code, xop0, NULL_RTX))
3062 delete_insns_since (last);
3063 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
3071 delete_insns_since (last);
3076 /* Generate code to perform an operation specified by UNOPTAB
3077 on operand OP0, with result having machine-mode MODE.
3079 UNSIGNEDP is for the case where we have to widen the operands
3080 to perform the operation. It says to use zero-extension.
3082 If TARGET is nonzero, the value
3083 is generated there, if it is convenient to do so.
3084 In all cases an rtx is returned for the locus of the value;
3085 this may or may not be TARGET. */
3088 expand_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
3091 enum mode_class mclass = GET_MODE_CLASS (mode);
3092 enum machine_mode wider_mode;
3096 temp = expand_unop_direct (mode, unoptab, op0, target, unsignedp);
3100 /* It can't be done in this mode. Can we open-code it in a wider mode? */
3102 /* Widening (or narrowing) clz needs special treatment. */
3103 if (unoptab == clz_optab)
3105 temp = widen_clz (mode, op0, target);
3109 if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
3110 && optab_handler (unoptab, word_mode)->insn_code != CODE_FOR_nothing)
3112 temp = expand_doubleword_clz (mode, op0, target);
3120 /* Widening (or narrowing) bswap needs special treatment. */
3121 if (unoptab == bswap_optab)
3123 temp = widen_bswap (mode, op0, target);
3127 if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
3128 && optab_handler (unoptab, word_mode)->insn_code != CODE_FOR_nothing)
3130 temp = expand_doubleword_bswap (mode, op0, target);
3138 if (CLASS_HAS_WIDER_MODES_P (mclass))
3139 for (wider_mode = GET_MODE_WIDER_MODE (mode);
3140 wider_mode != VOIDmode;
3141 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
3143 if (optab_handler (unoptab, wider_mode)->insn_code != CODE_FOR_nothing)
3146 rtx last = get_last_insn ();
3148 /* For certain operations, we need not actually extend
3149 the narrow operand, as long as we will truncate the
3150 results to the same narrowness. */
3152 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
3153 (unoptab == neg_optab
3154 || unoptab == one_cmpl_optab)
3155 && mclass == MODE_INT);
3157 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
3162 if (mclass != MODE_INT
3163 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
3164 GET_MODE_BITSIZE (wider_mode)))
3167 target = gen_reg_rtx (mode);
3168 convert_move (target, temp, 0);
3172 return gen_lowpart (mode, temp);
3175 delete_insns_since (last);
3179 /* These can be done a word at a time. */
3180 if (unoptab == one_cmpl_optab
3181 && mclass == MODE_INT
3182 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
3183 && optab_handler (unoptab, word_mode)->insn_code != CODE_FOR_nothing)
3188 if (target == 0 || target == op0)
3189 target = gen_reg_rtx (mode);
3193 /* Do the actual arithmetic. */
3194 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
3196 rtx target_piece = operand_subword (target, i, 1, mode);
3197 rtx x = expand_unop (word_mode, unoptab,
3198 operand_subword_force (op0, i, mode),
3199 target_piece, unsignedp);
3201 if (target_piece != x)
3202 emit_move_insn (target_piece, x);
3205 insns = get_insns ();
3212 if (unoptab->code == NEG)
3214 /* Try negating floating point values by flipping the sign bit. */
3215 if (SCALAR_FLOAT_MODE_P (mode))
3217 temp = expand_absneg_bit (NEG, mode, op0, target);
3222 /* If there is no negation pattern, and we have no negative zero,
3223 try subtracting from zero. */
3224 if (!HONOR_SIGNED_ZEROS (mode))
3226 temp = expand_binop (mode, (unoptab == negv_optab
3227 ? subv_optab : sub_optab),
3228 CONST0_RTX (mode), op0, target,
3229 unsignedp, OPTAB_DIRECT);
3235 /* Try calculating parity (x) as popcount (x) % 2. */
3236 if (unoptab == parity_optab)
3238 temp = expand_parity (mode, op0, target);
3243 /* Try implementing ffs (x) in terms of clz (x). */
3244 if (unoptab == ffs_optab)
3246 temp = expand_ffs (mode, op0, target);
3251 /* Try implementing ctz (x) in terms of clz (x). */
3252 if (unoptab == ctz_optab)
3254 temp = expand_ctz (mode, op0, target);
3260 /* Now try a library call in this mode. */
3261 libfunc = optab_libfunc (unoptab, mode);
3267 enum machine_mode outmode = mode;
3269 /* All of these functions return small values. Thus we choose to
3270 have them return something that isn't a double-word. */
3271 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
3272 || unoptab == popcount_optab || unoptab == parity_optab)
3274 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node),
3275 optab_libfunc (unoptab, mode)));
3279 /* Pass 1 for NO_QUEUE so we don't lose any increments
3280 if the libcall is cse'd or moved. */
3281 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, outmode,
3283 insns = get_insns ();
3286 target = gen_reg_rtx (outmode);
3287 eq_value = gen_rtx_fmt_e (unoptab->code, mode, op0);
3288 if (GET_MODE_SIZE (outmode) < GET_MODE_SIZE (mode))
3289 eq_value = simplify_gen_unary (TRUNCATE, outmode, eq_value, mode);
3290 else if (GET_MODE_SIZE (outmode) > GET_MODE_SIZE (mode))
3291 eq_value = simplify_gen_unary (ZERO_EXTEND, outmode, eq_value, mode);
3292 emit_libcall_block (insns, target, value, eq_value);
3297 /* It can't be done in this mode. Can we do it in a wider mode? */
3299 if (CLASS_HAS_WIDER_MODES_P (mclass))
3301 for (wider_mode = GET_MODE_WIDER_MODE (mode);
3302 wider_mode != VOIDmode;
3303 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
3305 if ((optab_handler (unoptab, wider_mode)->insn_code
3306 != CODE_FOR_nothing)
3307 || optab_libfunc (unoptab, wider_mode))
3310 rtx last = get_last_insn ();
3312 /* For certain operations, we need not actually extend
3313 the narrow operand, as long as we will truncate the
3314 results to the same narrowness. */
3316 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
3317 (unoptab == neg_optab
3318 || unoptab == one_cmpl_optab)
3319 && mclass == MODE_INT);
3321 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
3324 /* If we are generating clz using wider mode, adjust the
3326 if (unoptab == clz_optab && temp != 0)
3327 temp = expand_binop (wider_mode, sub_optab, temp,
3328 GEN_INT (GET_MODE_BITSIZE (wider_mode)
3329 - GET_MODE_BITSIZE (mode)),
3330 target, true, OPTAB_DIRECT);
3334 if (mclass != MODE_INT)
3337 target = gen_reg_rtx (mode);
3338 convert_move (target, temp, 0);
3342 return gen_lowpart (mode, temp);
3345 delete_insns_since (last);
3350 /* One final attempt at implementing negation via subtraction,
3351 this time allowing widening of the operand. */
3352 if (unoptab->code == NEG && !HONOR_SIGNED_ZEROS (mode))
3355 temp = expand_binop (mode,
3356 unoptab == negv_optab ? subv_optab : sub_optab,
3357 CONST0_RTX (mode), op0,
3358 target, unsignedp, OPTAB_LIB_WIDEN);
3366 /* Emit code to compute the absolute value of OP0, with result to
3367 TARGET if convenient. (TARGET may be 0.) The return value says
3368 where the result actually is to be found.
3370 MODE is the mode of the operand; the mode of the result is
3371 different but can be deduced from MODE.
3376 expand_abs_nojump (enum machine_mode mode, rtx op0, rtx target,
3377 int result_unsignedp)
3382 result_unsignedp = 1;
3384 /* First try to do it with a special abs instruction. */
3385 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
3390 /* For floating point modes, try clearing the sign bit. */
3391 if (SCALAR_FLOAT_MODE_P (mode))
3393 temp = expand_absneg_bit (ABS, mode, op0, target);
3398 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3399 if (optab_handler (smax_optab, mode)->insn_code != CODE_FOR_nothing
3400 && !HONOR_SIGNED_ZEROS (mode))
3402 rtx last = get_last_insn ();
3404 temp = expand_unop (mode, neg_optab, op0, NULL_RTX, 0);
3406 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3412 delete_insns_since (last);
3415 /* If this machine has expensive jumps, we can do integer absolute
3416 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3417 where W is the width of MODE. */
3419 if (GET_MODE_CLASS (mode) == MODE_INT
3420 && BRANCH_COST (optimize_insn_for_speed_p (),
3423 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
3424 size_int (GET_MODE_BITSIZE (mode) - 1),
3427 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
3430 temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab,
3431 temp, extended, target, 0, OPTAB_LIB_WIDEN);
3441 expand_abs (enum machine_mode mode, rtx op0, rtx target,
3442 int result_unsignedp, int safe)
3447 result_unsignedp = 1;
3449 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
3453 /* If that does not win, use conditional jump and negate. */
3455 /* It is safe to use the target if it is the same
3456 as the source if this is also a pseudo register */
3457 if (op0 == target && REG_P (op0)
3458 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
3461 op1 = gen_label_rtx ();
3462 if (target == 0 || ! safe
3463 || GET_MODE (target) != mode
3464 || (MEM_P (target) && MEM_VOLATILE_P (target))
3466 && REGNO (target) < FIRST_PSEUDO_REGISTER))
3467 target = gen_reg_rtx (mode);
3469 emit_move_insn (target, op0);
3472 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
3473 NULL_RTX, NULL_RTX, op1, -1);
3475 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3478 emit_move_insn (target, op0);
3484 /* Emit code to compute the one's complement absolute value of OP0
3485 (if (OP0 < 0) OP0 = ~OP0), with result to TARGET if convenient.
3486 (TARGET may be NULL_RTX.) The return value says where the result
3487 actually is to be found.
3489 MODE is the mode of the operand; the mode of the result is
3490 different but can be deduced from MODE. */
3493 expand_one_cmpl_abs_nojump (enum machine_mode mode, rtx op0, rtx target)
3497 /* Not applicable for floating point modes. */
3498 if (FLOAT_MODE_P (mode))
3501 /* If we have a MAX insn, we can do this as MAX (x, ~x). */
3502 if (optab_handler (smax_optab, mode)->insn_code != CODE_FOR_nothing)
3504 rtx last = get_last_insn ();
3506 temp = expand_unop (mode, one_cmpl_optab, op0, NULL_RTX, 0);
3508 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3514 delete_insns_since (last);
3517 /* If this machine has expensive jumps, we can do one's complement
3518 absolute value of X as (((signed) x >> (W-1)) ^ x). */
3520 if (GET_MODE_CLASS (mode) == MODE_INT
3521 && BRANCH_COST (optimize_insn_for_speed_p (),
3524 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
3525 size_int (GET_MODE_BITSIZE (mode) - 1),
3528 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
3538 /* A subroutine of expand_copysign, perform the copysign operation using the
3539 abs and neg primitives advertised to exist on the target. The assumption
3540 is that we have a split register file, and leaving op0 in fp registers,
3541 and not playing with subregs so much, will help the register allocator. */
3544 expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3545 int bitpos, bool op0_is_abs)
3547 enum machine_mode imode;
3554 /* Check if the back end provides an insn that handles signbit for the
3556 icode = (int) signbit_optab->handlers [(int) mode].insn_code;
3557 if (icode != CODE_FOR_nothing)
3559 imode = insn_data[icode].operand[0].mode;
3560 sign = gen_reg_rtx (imode);
3561 emit_unop_insn (icode, sign, op1, UNKNOWN);
3565 HOST_WIDE_INT hi, lo;
3567 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3569 imode = int_mode_for_mode (mode);
3570 if (imode == BLKmode)
3572 op1 = gen_lowpart (imode, op1);
3579 if (FLOAT_WORDS_BIG_ENDIAN)
3580 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3582 word = bitpos / BITS_PER_WORD;
3583 bitpos = bitpos % BITS_PER_WORD;
3584 op1 = operand_subword_force (op1, word, mode);
3587 if (bitpos < HOST_BITS_PER_WIDE_INT)
3590 lo = (HOST_WIDE_INT) 1 << bitpos;
3594 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
3598 sign = gen_reg_rtx (imode);
3599 sign = expand_binop (imode, and_optab, op1,
3600 immed_double_const (lo, hi, imode),
3601 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3606 op0 = expand_unop (mode, abs_optab, op0, target, 0);
3613 if (target == NULL_RTX)
3614 target = copy_to_reg (op0);
3616 emit_move_insn (target, op0);
3619 label = gen_label_rtx ();
3620 emit_cmp_and_jump_insns (sign, const0_rtx, EQ, NULL_RTX, imode, 1, label);
3622 if (GET_CODE (op0) == CONST_DOUBLE)
3623 op0 = simplify_unary_operation (NEG, mode, op0, mode);
3625 op0 = expand_unop (mode, neg_optab, op0, target, 0);
3627 emit_move_insn (target, op0);
3635 /* A subroutine of expand_copysign, perform the entire copysign operation
3636 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3637 is true if op0 is known to have its sign bit clear. */
3640 expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3641 int bitpos, bool op0_is_abs)
3643 enum machine_mode imode;
3644 HOST_WIDE_INT hi, lo;
3645 int word, nwords, i;
3648 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3650 imode = int_mode_for_mode (mode);
3651 if (imode == BLKmode)
3660 if (FLOAT_WORDS_BIG_ENDIAN)
3661 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3663 word = bitpos / BITS_PER_WORD;
3664 bitpos = bitpos % BITS_PER_WORD;
3665 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
3668 if (bitpos < HOST_BITS_PER_WIDE_INT)
3671 lo = (HOST_WIDE_INT) 1 << bitpos;
3675 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
3679 if (target == 0 || target == op0 || target == op1)
3680 target = gen_reg_rtx (mode);
3686 for (i = 0; i < nwords; ++i)
3688 rtx targ_piece = operand_subword (target, i, 1, mode);
3689 rtx op0_piece = operand_subword_force (op0, i, mode);
3694 op0_piece = expand_binop (imode, and_optab, op0_piece,
3695 immed_double_const (~lo, ~hi, imode),
3696 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3698 op1 = expand_binop (imode, and_optab,
3699 operand_subword_force (op1, i, mode),
3700 immed_double_const (lo, hi, imode),
3701 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3703 temp = expand_binop (imode, ior_optab, op0_piece, op1,
3704 targ_piece, 1, OPTAB_LIB_WIDEN);
3705 if (temp != targ_piece)
3706 emit_move_insn (targ_piece, temp);
3709 emit_move_insn (targ_piece, op0_piece);
3712 insns = get_insns ();
3719 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
3720 immed_double_const (lo, hi, imode),
3721 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3723 op0 = gen_lowpart (imode, op0);
3725 op0 = expand_binop (imode, and_optab, op0,
3726 immed_double_const (~lo, ~hi, imode),
3727 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3729 temp = expand_binop (imode, ior_optab, op0, op1,
3730 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3731 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3737 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3738 scalar floating point mode. Return NULL if we do not know how to
3739 expand the operation inline. */
3742 expand_copysign (rtx op0, rtx op1, rtx target)
3744 enum machine_mode mode = GET_MODE (op0);
3745 const struct real_format *fmt;
3749 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3750 gcc_assert (GET_MODE (op1) == mode);
3752 /* First try to do it with a special instruction. */
3753 temp = expand_binop (mode, copysign_optab, op0, op1,
3754 target, 0, OPTAB_DIRECT);
3758 fmt = REAL_MODE_FORMAT (mode);
3759 if (fmt == NULL || !fmt->has_signed_zero)
3763 if (GET_CODE (op0) == CONST_DOUBLE)
3765 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
3766 op0 = simplify_unary_operation (ABS, mode, op0, mode);
3770 if (fmt->signbit_ro >= 0
3771 && (GET_CODE (op0) == CONST_DOUBLE
3772 || (optab_handler (neg_optab, mode)->insn_code != CODE_FOR_nothing
3773 && optab_handler (abs_optab, mode)->insn_code != CODE_FOR_nothing)))
3775 temp = expand_copysign_absneg (mode, op0, op1, target,
3776 fmt->signbit_ro, op0_is_abs);
3781 if (fmt->signbit_rw < 0)
3783 return expand_copysign_bit (mode, op0, op1, target,
3784 fmt->signbit_rw, op0_is_abs);
3787 /* Generate an instruction whose insn-code is INSN_CODE,
3788 with two operands: an output TARGET and an input OP0.
3789 TARGET *must* be nonzero, and the output is always stored there.
3790 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3791 the value that is stored into TARGET.
3793 Return false if expansion failed. */
3796 maybe_emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code)
3799 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
3801 rtx last = get_last_insn ();
3805 /* Now, if insn does not accept our operands, put them into pseudos. */
3807 if (!insn_data[icode].operand[1].predicate (op0, mode0))
3808 op0 = copy_to_mode_reg (mode0, op0);
3810 if (!insn_data[icode].operand[0].predicate (temp, GET_MODE (temp)))
3811 temp = gen_reg_rtx (GET_MODE (temp));
3813 pat = GEN_FCN (icode) (temp, op0);
3816 delete_insns_since (last);
3820 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && code != UNKNOWN)
3821 add_equal_note (pat, temp, code, op0, NULL_RTX);
3826 emit_move_insn (target, temp);
3829 /* Generate an instruction whose insn-code is INSN_CODE,
3830 with two operands: an output TARGET and an input OP0.
3831 TARGET *must* be nonzero, and the output is always stored there.
3832 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3833 the value that is stored into TARGET. */
3836 emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code)
3838 bool ok = maybe_emit_unop_insn (icode, target, op0, code);
3842 struct no_conflict_data
3844 rtx target, first, insn;
3848 /* Called via note_stores by emit_libcall_block. Set P->must_stay if
3849 the currently examined clobber / store has to stay in the list of
3850 insns that constitute the actual libcall block. */
3852 no_conflict_move_test (rtx dest, const_rtx set, void *p0)
3854 struct no_conflict_data *p= (struct no_conflict_data *) p0;
3856 /* If this inns directly contributes to setting the target, it must stay. */
3857 if (reg_overlap_mentioned_p (p->target, dest))
3858 p->must_stay = true;
3859 /* If we haven't committed to keeping any other insns in the list yet,
3860 there is nothing more to check. */
3861 else if (p->insn == p->first)
3863 /* If this insn sets / clobbers a register that feeds one of the insns
3864 already in the list, this insn has to stay too. */
3865 else if (reg_overlap_mentioned_p (dest, PATTERN (p->first))
3866 || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest)))
3867 || reg_used_between_p (dest, p->first, p->insn)
3868 /* Likewise if this insn depends on a register set by a previous
3869 insn in the list, or if it sets a result (presumably a hard
3870 register) that is set or clobbered by a previous insn.
3871 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3872 SET_DEST perform the former check on the address, and the latter
3873 check on the MEM. */
3874 || (GET_CODE (set) == SET
3875 && (modified_in_p (SET_SRC (set), p->first)
3876 || modified_in_p (SET_DEST (set), p->first)
3877 || modified_between_p (SET_SRC (set), p->first, p->insn)
3878 || modified_between_p (SET_DEST (set), p->first, p->insn))))
3879 p->must_stay = true;
3883 /* Emit code to make a call to a constant function or a library call.
3885 INSNS is a list containing all insns emitted in the call.
3886 These insns leave the result in RESULT. Our block is to copy RESULT
3887 to TARGET, which is logically equivalent to EQUIV.
3889 We first emit any insns that set a pseudo on the assumption that these are
3890 loading constants into registers; doing so allows them to be safely cse'ed
3891 between blocks. Then we emit all the other insns in the block, followed by
3892 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3893 note with an operand of EQUIV. */
3896 emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv)
3898 rtx final_dest = target;
3899 rtx next, last, insn;
3901 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3902 into a MEM later. Protect the libcall block from this change. */
3903 if (! REG_P (target) || REG_USERVAR_P (target))
3904 target = gen_reg_rtx (GET_MODE (target));
3906 /* If we're using non-call exceptions, a libcall corresponding to an
3907 operation that may trap may also trap. */
3908 /* ??? See the comment in front of make_reg_eh_region_note. */
3909 if (flag_non_call_exceptions && may_trap_p (equiv))
3911 for (insn = insns; insn; insn = NEXT_INSN (insn))
3914 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3917 int lp_nr = INTVAL (XEXP (note, 0));
3918 if (lp_nr == 0 || lp_nr == INT_MIN)
3919 remove_note (insn, note);
3925 /* Look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3926 reg note to indicate that this call cannot throw or execute a nonlocal
3927 goto (unless there is already a REG_EH_REGION note, in which case
3929 for (insn = insns; insn; insn = NEXT_INSN (insn))
3931 make_reg_eh_region_note_nothrow_nononlocal (insn);
3934 /* First emit all insns that set pseudos. Remove them from the list as
3935 we go. Avoid insns that set pseudos which were referenced in previous
3936 insns. These can be generated by move_by_pieces, for example,
3937 to update an address. Similarly, avoid insns that reference things
3938 set in previous insns. */
3940 for (insn = insns; insn; insn = next)
3942 rtx set = single_set (insn);
3944 next = NEXT_INSN (insn);
3946 if (set != 0 && REG_P (SET_DEST (set))
3947 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3949 struct no_conflict_data data;
3951 data.target = const0_rtx;
3955 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3956 if (! data.must_stay)
3958 if (PREV_INSN (insn))
3959 NEXT_INSN (PREV_INSN (insn)) = next;
3964 PREV_INSN (next) = PREV_INSN (insn);
3970 /* Some ports use a loop to copy large arguments onto the stack.
3971 Don't move anything outside such a loop. */
3976 /* Write the remaining insns followed by the final copy. */
3977 for (insn = insns; insn; insn = next)
3979 next = NEXT_INSN (insn);
3984 last = emit_move_insn (target, result);
3985 if (optab_handler (mov_optab, GET_MODE (target))->insn_code
3986 != CODE_FOR_nothing)
3987 set_unique_reg_note (last, REG_EQUAL, copy_rtx (equiv));
3989 if (final_dest != target)
3990 emit_move_insn (final_dest, target);
3993 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3994 PURPOSE describes how this comparison will be used. CODE is the rtx
3995 comparison code we will be using.
3997 ??? Actually, CODE is slightly weaker than that. A target is still
3998 required to implement all of the normal bcc operations, but not
3999 required to implement all (or any) of the unordered bcc operations. */
4002 can_compare_p (enum rtx_code code, enum machine_mode mode,
4003 enum can_compare_purpose purpose)
4006 test = gen_rtx_fmt_ee (code, mode, const0_rtx, const0_rtx);
4011 if (purpose == ccp_jump
4012 && (icode = optab_handler (cbranch_optab, mode)->insn_code) != CODE_FOR_nothing
4013 && insn_data[icode].operand[0].predicate (test, mode))
4015 if (purpose == ccp_store_flag
4016 && (icode = optab_handler (cstore_optab, mode)->insn_code) != CODE_FOR_nothing
4017 && insn_data[icode].operand[1].predicate (test, mode))
4019 if (purpose == ccp_cmov
4020 && optab_handler (cmov_optab, mode)->insn_code != CODE_FOR_nothing)
4023 mode = GET_MODE_WIDER_MODE (mode);
4024 PUT_MODE (test, mode);
4026 while (mode != VOIDmode);
4031 /* This function is called when we are going to emit a compare instruction that
4032 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
4034 *PMODE is the mode of the inputs (in case they are const_int).
4035 *PUNSIGNEDP nonzero says that the operands are unsigned;
4036 this matters if they need to be widened (as given by METHODS).
4038 If they have mode BLKmode, then SIZE specifies the size of both operands.
4040 This function performs all the setup necessary so that the caller only has
4041 to emit a single comparison insn. This setup can involve doing a BLKmode
4042 comparison or emitting a library call to perform the comparison if no insn
4043 is available to handle it.
4044 The values which are passed in through pointers can be modified; the caller
4045 should perform the comparison on the modified values. Constant
4046 comparisons must have already been folded. */
4049 prepare_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
4050 int unsignedp, enum optab_methods methods,
4051 rtx *ptest, enum machine_mode *pmode)
4053 enum machine_mode mode = *pmode;
4055 enum machine_mode cmp_mode;
4056 enum mode_class mclass;
4058 /* The other methods are not needed. */
4059 gcc_assert (methods == OPTAB_DIRECT || methods == OPTAB_WIDEN
4060 || methods == OPTAB_LIB_WIDEN);
4062 /* If we are optimizing, force expensive constants into a register. */
4063 if (CONSTANT_P (x) && optimize
4064 && (rtx_cost (x, COMPARE, optimize_insn_for_speed_p ())
4065 > COSTS_N_INSNS (1)))
4066 x = force_reg (mode, x);
4068 if (CONSTANT_P (y) && optimize
4069 && (rtx_cost (y, COMPARE, optimize_insn_for_speed_p ())
4070 > COSTS_N_INSNS (1)))
4071 y = force_reg (mode, y);
4074 /* Make sure if we have a canonical comparison. The RTL
4075 documentation states that canonical comparisons are required only
4076 for targets which have cc0. */
4077 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
4080 /* Don't let both operands fail to indicate the mode. */
4081 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
4082 x = force_reg (mode, x);
4083 if (mode == VOIDmode)
4084 mode = GET_MODE (x) != VOIDmode ? GET_MODE (x) : GET_MODE (y);
4086 /* Handle all BLKmode compares. */
4088 if (mode == BLKmode)
4090 enum machine_mode result_mode;
4091 enum insn_code cmp_code;
4096 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);