1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
45 #include "basic-block.h"
48 /* Each optab contains info on how this target machine
49 can perform a particular operation
50 for all sizes and kinds of operands.
52 The operation to be performed is often specified
53 by passing one of these optabs as an argument.
55 See expr.h for documentation of these optabs. */
57 optab optab_table[OTI_MAX];
59 rtx libfunc_table[LTI_MAX];
61 /* Tables of patterns for converting one mode to another. */
62 convert_optab convert_optab_table[COI_MAX];
64 /* Contains the optab used for each rtx code. */
65 optab code_to_optab[NUM_RTX_CODE + 1];
67 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
68 gives the gen_function to make a branch to test that condition. */
70 rtxfun bcc_gen_fctn[NUM_RTX_CODE];
72 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
73 gives the insn code to make a store-condition insn
74 to test that condition. */
76 enum insn_code setcc_gen_code[NUM_RTX_CODE];
78 #ifdef HAVE_conditional_move
79 /* Indexed by the machine mode, gives the insn code to make a conditional
80 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
81 setcc_gen_code to cut down on the number of named patterns. Consider a day
82 when a lot more rtx codes are conditional (eg: for the ARM). */
84 enum insn_code movcc_gen_code[NUM_MACHINE_MODES];
87 /* Indexed by the machine mode, gives the insn code for vector conditional
90 enum insn_code vcond_gen_code[NUM_MACHINE_MODES];
91 enum insn_code vcondu_gen_code[NUM_MACHINE_MODES];
93 /* The insn generating function can not take an rtx_code argument.
94 TRAP_RTX is used as an rtx argument. Its code is replaced with
95 the code to be used in the trap insn and all other fields are ignored. */
96 static GTY(()) rtx trap_rtx;
98 static void prepare_float_lib_cmp (rtx *, rtx *, enum rtx_code *,
99 enum machine_mode *, int *);
100 static rtx expand_unop_direct (enum machine_mode, optab, rtx, rtx, int);
102 /* Current libcall id. It doesn't matter what these are, as long
103 as they are unique to each libcall that is emitted. */
104 static HOST_WIDE_INT libcall_id = 0;
106 /* Debug facility for use in GDB. */
107 void debug_optab_libfuncs (void);
109 #ifndef HAVE_conditional_trap
110 #define HAVE_conditional_trap 0
111 #define gen_conditional_trap(a,b) (gcc_unreachable (), NULL_RTX)
114 /* Prefixes for the current version of decimal floating point (BID vs. DPD) */
115 #if ENABLE_DECIMAL_BID_FORMAT
116 #define DECIMAL_PREFIX "bid_"
118 #define DECIMAL_PREFIX "dpd_"
122 /* Info about libfunc. We use same hashtable for normal optabs and conversion
123 optab. In the first case mode2 is unused. */
124 struct libfunc_entry GTY(())
127 enum machine_mode mode1, mode2;
131 /* Hash table used to convert declarations into nodes. */
132 static GTY((param_is (struct libfunc_entry))) htab_t libfunc_hash;
134 /* Used for attribute_hash. */
137 hash_libfunc (const void *p)
139 const struct libfunc_entry *const e = (const struct libfunc_entry *) p;
141 return (((int) e->mode1 + (int) e->mode2 * NUM_MACHINE_MODES)
145 /* Used for optab_hash. */
148 eq_libfunc (const void *p, const void *q)
150 const struct libfunc_entry *const e1 = (const struct libfunc_entry *) p;
151 const struct libfunc_entry *const e2 = (const struct libfunc_entry *) q;
153 return (e1->optab == e2->optab
154 && e1->mode1 == e2->mode1
155 && e1->mode2 == e2->mode2);
158 /* Return libfunc corresponding operation defined by OPTAB converting
159 from MODE2 to MODE1. Trigger lazy initialization if needed, return NULL
160 if no libfunc is available. */
162 convert_optab_libfunc (convert_optab optab, enum machine_mode mode1,
163 enum machine_mode mode2)
165 struct libfunc_entry e;
166 struct libfunc_entry **slot;
168 e.optab = (size_t) (convert_optab_table[0] - optab);
171 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, NO_INSERT);
174 if (optab->libcall_gen)
176 optab->libcall_gen (optab, optab->libcall_basename, mode1, mode2);
177 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, NO_INSERT);
179 return (*slot)->libfunc;
185 return (*slot)->libfunc;
188 /* Return libfunc corresponding operation defined by OPTAB in MODE.
189 Trigger lazy initialization if needed, return NULL if no libfunc is
192 optab_libfunc (optab optab, enum machine_mode mode)
194 struct libfunc_entry e;
195 struct libfunc_entry **slot;
197 e.optab = (size_t) (optab_table[0] - optab);
200 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, NO_INSERT);
203 if (optab->libcall_gen)
205 optab->libcall_gen (optab, optab->libcall_basename,
206 optab->libcall_suffix, mode);
207 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash,
210 return (*slot)->libfunc;
216 return (*slot)->libfunc;
220 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
221 the result of operation CODE applied to OP0 (and OP1 if it is a binary
224 If the last insn does not set TARGET, don't do anything, but return 1.
226 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
227 don't add the REG_EQUAL note but return 0. Our caller can then try
228 again, ensuring that TARGET is not one of the operands. */
231 add_equal_note (rtx insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
233 rtx last_insn, insn, set;
236 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
238 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
239 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
240 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
241 && GET_RTX_CLASS (code) != RTX_COMPARE
242 && GET_RTX_CLASS (code) != RTX_UNARY)
245 if (GET_CODE (target) == ZERO_EXTRACT)
248 for (last_insn = insns;
249 NEXT_INSN (last_insn) != NULL_RTX;
250 last_insn = NEXT_INSN (last_insn))
253 set = single_set (last_insn);
257 if (! rtx_equal_p (SET_DEST (set), target)
258 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
259 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
260 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
263 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
264 besides the last insn. */
265 if (reg_overlap_mentioned_p (target, op0)
266 || (op1 && reg_overlap_mentioned_p (target, op1)))
268 insn = PREV_INSN (last_insn);
269 while (insn != NULL_RTX)
271 if (reg_set_p (target, insn))
274 insn = PREV_INSN (insn);
278 if (GET_RTX_CLASS (code) == RTX_UNARY)
279 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
281 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
283 set_unique_reg_note (last_insn, REG_EQUAL, note);
288 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
289 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
290 not actually do a sign-extend or zero-extend, but can leave the
291 higher-order bits of the result rtx undefined, for example, in the case
292 of logical operations, but not right shifts. */
295 widen_operand (rtx op, enum machine_mode mode, enum machine_mode oldmode,
296 int unsignedp, int no_extend)
300 /* If we don't have to extend and this is a constant, return it. */
301 if (no_extend && GET_MODE (op) == VOIDmode)
304 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
305 extend since it will be more efficient to do so unless the signedness of
306 a promoted object differs from our extension. */
308 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
309 && SUBREG_PROMOTED_UNSIGNED_P (op) == unsignedp))
310 return convert_modes (mode, oldmode, op, unsignedp);
312 /* If MODE is no wider than a single word, we return a paradoxical
314 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
315 return gen_rtx_SUBREG (mode, force_reg (GET_MODE (op), op), 0);
317 /* Otherwise, get an object of MODE, clobber it, and set the low-order
320 result = gen_reg_rtx (mode);
321 emit_insn (gen_rtx_CLOBBER (VOIDmode, result));
322 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
326 /* Return the optab used for computing the operation given by
327 the tree code, CODE. This function is not always usable (for
328 example, it cannot give complete results for multiplication
329 or division) but probably ought to be relied on more widely
330 throughout the expander. */
332 optab_for_tree_code (enum tree_code code, const_tree type)
344 return one_cmpl_optab;
353 return TYPE_UNSIGNED (type) ? umod_optab : smod_optab;
361 if (TYPE_SATURATING(type))
362 return TYPE_UNSIGNED(type) ? usdiv_optab : ssdiv_optab;
363 return TYPE_UNSIGNED (type) ? udiv_optab : sdiv_optab;
366 if (TYPE_SATURATING(type))
367 return TYPE_UNSIGNED(type) ? usashl_optab : ssashl_optab;
371 return TYPE_UNSIGNED (type) ? lshr_optab : ashr_optab;
380 return TYPE_UNSIGNED (type) ? umax_optab : smax_optab;
383 return TYPE_UNSIGNED (type) ? umin_optab : smin_optab;
385 case REALIGN_LOAD_EXPR:
386 return vec_realign_load_optab;
389 return TYPE_UNSIGNED (type) ? usum_widen_optab : ssum_widen_optab;
392 return TYPE_UNSIGNED (type) ? udot_prod_optab : sdot_prod_optab;
395 return TYPE_UNSIGNED (type) ? reduc_umax_optab : reduc_smax_optab;
398 return TYPE_UNSIGNED (type) ? reduc_umin_optab : reduc_smin_optab;
400 case REDUC_PLUS_EXPR:
401 return TYPE_UNSIGNED (type) ? reduc_uplus_optab : reduc_splus_optab;
403 case VEC_LSHIFT_EXPR:
404 return vec_shl_optab;
406 case VEC_RSHIFT_EXPR:
407 return vec_shr_optab;
409 case VEC_WIDEN_MULT_HI_EXPR:
410 return TYPE_UNSIGNED (type) ?
411 vec_widen_umult_hi_optab : vec_widen_smult_hi_optab;
413 case VEC_WIDEN_MULT_LO_EXPR:
414 return TYPE_UNSIGNED (type) ?
415 vec_widen_umult_lo_optab : vec_widen_smult_lo_optab;
417 case VEC_UNPACK_HI_EXPR:
418 return TYPE_UNSIGNED (type) ?
419 vec_unpacku_hi_optab : vec_unpacks_hi_optab;
421 case VEC_UNPACK_LO_EXPR:
422 return TYPE_UNSIGNED (type) ?
423 vec_unpacku_lo_optab : vec_unpacks_lo_optab;
425 case VEC_UNPACK_FLOAT_HI_EXPR:
426 /* The signedness is determined from input operand. */
427 return TYPE_UNSIGNED (type) ?
428 vec_unpacku_float_hi_optab : vec_unpacks_float_hi_optab;
430 case VEC_UNPACK_FLOAT_LO_EXPR:
431 /* The signedness is determined from input operand. */
432 return TYPE_UNSIGNED (type) ?
433 vec_unpacku_float_lo_optab : vec_unpacks_float_lo_optab;
435 case VEC_PACK_TRUNC_EXPR:
436 return vec_pack_trunc_optab;
438 case VEC_PACK_SAT_EXPR:
439 return TYPE_UNSIGNED (type) ? vec_pack_usat_optab : vec_pack_ssat_optab;
441 case VEC_PACK_FIX_TRUNC_EXPR:
442 /* The signedness is determined from output operand. */
443 return TYPE_UNSIGNED (type) ?
444 vec_pack_ufix_trunc_optab : vec_pack_sfix_trunc_optab;
450 trapv = INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type);
453 case POINTER_PLUS_EXPR:
455 if (TYPE_SATURATING(type))
456 return TYPE_UNSIGNED(type) ? usadd_optab : ssadd_optab;
457 return trapv ? addv_optab : add_optab;
460 if (TYPE_SATURATING(type))
461 return TYPE_UNSIGNED(type) ? ussub_optab : sssub_optab;
462 return trapv ? subv_optab : sub_optab;
465 if (TYPE_SATURATING(type))
466 return TYPE_UNSIGNED(type) ? usmul_optab : ssmul_optab;
467 return trapv ? smulv_optab : smul_optab;
470 if (TYPE_SATURATING(type))
471 return TYPE_UNSIGNED(type) ? usneg_optab : ssneg_optab;
472 return trapv ? negv_optab : neg_optab;
475 return trapv ? absv_optab : abs_optab;
477 case VEC_EXTRACT_EVEN_EXPR:
478 return vec_extract_even_optab;
480 case VEC_EXTRACT_ODD_EXPR:
481 return vec_extract_odd_optab;
483 case VEC_INTERLEAVE_HIGH_EXPR:
484 return vec_interleave_high_optab;
486 case VEC_INTERLEAVE_LOW_EXPR:
487 return vec_interleave_low_optab;
495 /* Expand vector widening operations.
497 There are two different classes of operations handled here:
498 1) Operations whose result is wider than all the arguments to the operation.
499 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
500 In this case OP0 and optionally OP1 would be initialized,
501 but WIDE_OP wouldn't (not relevant for this case).
502 2) Operations whose result is of the same size as the last argument to the
503 operation, but wider than all the other arguments to the operation.
504 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
505 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
507 E.g, when called to expand the following operations, this is how
508 the arguments will be initialized:
510 widening-sum 2 oprnd0 - oprnd1
511 widening-dot-product 3 oprnd0 oprnd1 oprnd2
512 widening-mult 2 oprnd0 oprnd1 -
513 type-promotion (vec-unpack) 1 oprnd0 - - */
516 expand_widen_pattern_expr (tree exp, rtx op0, rtx op1, rtx wide_op, rtx target,
519 tree oprnd0, oprnd1, oprnd2;
520 enum machine_mode wmode = 0, tmode0, tmode1 = 0;
521 optab widen_pattern_optab;
523 enum machine_mode xmode0, xmode1 = 0, wxmode = 0;
526 rtx xop0, xop1, wxop;
527 int nops = TREE_OPERAND_LENGTH (exp);
529 oprnd0 = TREE_OPERAND (exp, 0);
530 tmode0 = TYPE_MODE (TREE_TYPE (oprnd0));
531 widen_pattern_optab =
532 optab_for_tree_code (TREE_CODE (exp), TREE_TYPE (oprnd0));
533 icode = (int) optab_handler (widen_pattern_optab, tmode0)->insn_code;
534 gcc_assert (icode != CODE_FOR_nothing);
535 xmode0 = insn_data[icode].operand[1].mode;
539 oprnd1 = TREE_OPERAND (exp, 1);
540 tmode1 = TYPE_MODE (TREE_TYPE (oprnd1));
541 xmode1 = insn_data[icode].operand[2].mode;
544 /* The last operand is of a wider mode than the rest of the operands. */
552 gcc_assert (tmode1 == tmode0);
554 oprnd2 = TREE_OPERAND (exp, 2);
555 wmode = TYPE_MODE (TREE_TYPE (oprnd2));
556 wxmode = insn_data[icode].operand[3].mode;
560 wmode = wxmode = insn_data[icode].operand[0].mode;
563 || ! (*insn_data[icode].operand[0].predicate) (target, wmode))
564 temp = gen_reg_rtx (wmode);
572 /* In case the insn wants input operands in modes different from
573 those of the actual operands, convert the operands. It would
574 seem that we don't need to convert CONST_INTs, but we do, so
575 that they're properly zero-extended, sign-extended or truncated
578 if (GET_MODE (op0) != xmode0 && xmode0 != VOIDmode)
579 xop0 = convert_modes (xmode0,
580 GET_MODE (op0) != VOIDmode
586 if (GET_MODE (op1) != xmode1 && xmode1 != VOIDmode)
587 xop1 = convert_modes (xmode1,
588 GET_MODE (op1) != VOIDmode
594 if (GET_MODE (wide_op) != wxmode && wxmode != VOIDmode)
595 wxop = convert_modes (wxmode,
596 GET_MODE (wide_op) != VOIDmode
601 /* Now, if insn's predicates don't allow our operands, put them into
604 if (! (*insn_data[icode].operand[1].predicate) (xop0, xmode0)
605 && xmode0 != VOIDmode)
606 xop0 = copy_to_mode_reg (xmode0, xop0);
610 if (! (*insn_data[icode].operand[2].predicate) (xop1, xmode1)
611 && xmode1 != VOIDmode)
612 xop1 = copy_to_mode_reg (xmode1, xop1);
616 if (! (*insn_data[icode].operand[3].predicate) (wxop, wxmode)
617 && wxmode != VOIDmode)
618 wxop = copy_to_mode_reg (wxmode, wxop);
620 pat = GEN_FCN (icode) (temp, xop0, xop1, wxop);
623 pat = GEN_FCN (icode) (temp, xop0, xop1);
629 if (! (*insn_data[icode].operand[2].predicate) (wxop, wxmode)
630 && wxmode != VOIDmode)
631 wxop = copy_to_mode_reg (wxmode, wxop);
633 pat = GEN_FCN (icode) (temp, xop0, wxop);
636 pat = GEN_FCN (icode) (temp, xop0);
643 /* Generate code to perform an operation specified by TERNARY_OPTAB
644 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
646 UNSIGNEDP is for the case where we have to widen the operands
647 to perform the operation. It says to use zero-extension.
649 If TARGET is nonzero, the value
650 is generated there, if it is convenient to do so.
651 In all cases an rtx is returned for the locus of the value;
652 this may or may not be TARGET. */
655 expand_ternary_op (enum machine_mode mode, optab ternary_optab, rtx op0,
656 rtx op1, rtx op2, rtx target, int unsignedp)
658 int icode = (int) optab_handler (ternary_optab, mode)->insn_code;
659 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
660 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
661 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
664 rtx xop0 = op0, xop1 = op1, xop2 = op2;
666 gcc_assert (optab_handler (ternary_optab, mode)->insn_code
667 != CODE_FOR_nothing);
669 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
670 temp = gen_reg_rtx (mode);
674 /* In case the insn wants input operands in modes different from
675 those of the actual operands, convert the operands. It would
676 seem that we don't need to convert CONST_INTs, but we do, so
677 that they're properly zero-extended, sign-extended or truncated
680 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
681 xop0 = convert_modes (mode0,
682 GET_MODE (op0) != VOIDmode
687 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
688 xop1 = convert_modes (mode1,
689 GET_MODE (op1) != VOIDmode
694 if (GET_MODE (op2) != mode2 && mode2 != VOIDmode)
695 xop2 = convert_modes (mode2,
696 GET_MODE (op2) != VOIDmode
701 /* Now, if insn's predicates don't allow our operands, put them into
704 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
705 && mode0 != VOIDmode)
706 xop0 = copy_to_mode_reg (mode0, xop0);
708 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
709 && mode1 != VOIDmode)
710 xop1 = copy_to_mode_reg (mode1, xop1);
712 if (!insn_data[icode].operand[3].predicate (xop2, mode2)
713 && mode2 != VOIDmode)
714 xop2 = copy_to_mode_reg (mode2, xop2);
716 pat = GEN_FCN (icode) (temp, xop0, xop1, xop2);
723 /* Like expand_binop, but return a constant rtx if the result can be
724 calculated at compile time. The arguments and return value are
725 otherwise the same as for expand_binop. */
728 simplify_expand_binop (enum machine_mode mode, optab binoptab,
729 rtx op0, rtx op1, rtx target, int unsignedp,
730 enum optab_methods methods)
732 if (CONSTANT_P (op0) && CONSTANT_P (op1))
734 rtx x = simplify_binary_operation (binoptab->code, mode, op0, op1);
740 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
743 /* Like simplify_expand_binop, but always put the result in TARGET.
744 Return true if the expansion succeeded. */
747 force_expand_binop (enum machine_mode mode, optab binoptab,
748 rtx op0, rtx op1, rtx target, int unsignedp,
749 enum optab_methods methods)
751 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
752 target, unsignedp, methods);
756 emit_move_insn (target, x);
760 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
763 expand_vec_shift_expr (tree vec_shift_expr, rtx target)
765 enum insn_code icode;
766 rtx rtx_op1, rtx_op2;
767 enum machine_mode mode1;
768 enum machine_mode mode2;
769 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_shift_expr));
770 tree vec_oprnd = TREE_OPERAND (vec_shift_expr, 0);
771 tree shift_oprnd = TREE_OPERAND (vec_shift_expr, 1);
775 switch (TREE_CODE (vec_shift_expr))
777 case VEC_RSHIFT_EXPR:
778 shift_optab = vec_shr_optab;
780 case VEC_LSHIFT_EXPR:
781 shift_optab = vec_shl_optab;
787 icode = (int) optab_handler (shift_optab, mode)->insn_code;
788 gcc_assert (icode != CODE_FOR_nothing);
790 mode1 = insn_data[icode].operand[1].mode;
791 mode2 = insn_data[icode].operand[2].mode;
793 rtx_op1 = expand_normal (vec_oprnd);
794 if (!(*insn_data[icode].operand[1].predicate) (rtx_op1, mode1)
795 && mode1 != VOIDmode)
796 rtx_op1 = force_reg (mode1, rtx_op1);
798 rtx_op2 = expand_normal (shift_oprnd);
799 if (!(*insn_data[icode].operand[2].predicate) (rtx_op2, mode2)
800 && mode2 != VOIDmode)
801 rtx_op2 = force_reg (mode2, rtx_op2);
804 || ! (*insn_data[icode].operand[0].predicate) (target, mode))
805 target = gen_reg_rtx (mode);
807 /* Emit instruction */
808 pat = GEN_FCN (icode) (target, rtx_op1, rtx_op2);
815 /* This subroutine of expand_doubleword_shift handles the cases in which
816 the effective shift value is >= BITS_PER_WORD. The arguments and return
817 value are the same as for the parent routine, except that SUPERWORD_OP1
818 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
819 INTO_TARGET may be null if the caller has decided to calculate it. */
822 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
823 rtx outof_target, rtx into_target,
824 int unsignedp, enum optab_methods methods)
826 if (into_target != 0)
827 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
828 into_target, unsignedp, methods))
831 if (outof_target != 0)
833 /* For a signed right shift, we must fill OUTOF_TARGET with copies
834 of the sign bit, otherwise we must fill it with zeros. */
835 if (binoptab != ashr_optab)
836 emit_move_insn (outof_target, CONST0_RTX (word_mode));
838 if (!force_expand_binop (word_mode, binoptab,
839 outof_input, GEN_INT (BITS_PER_WORD - 1),
840 outof_target, unsignedp, methods))
846 /* This subroutine of expand_doubleword_shift handles the cases in which
847 the effective shift value is < BITS_PER_WORD. The arguments and return
848 value are the same as for the parent routine. */
851 expand_subword_shift (enum machine_mode op1_mode, optab binoptab,
852 rtx outof_input, rtx into_input, rtx op1,
853 rtx outof_target, rtx into_target,
854 int unsignedp, enum optab_methods methods,
855 unsigned HOST_WIDE_INT shift_mask)
857 optab reverse_unsigned_shift, unsigned_shift;
860 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
861 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
863 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
864 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
865 the opposite direction to BINOPTAB. */
866 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
868 carries = outof_input;
869 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
870 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
875 /* We must avoid shifting by BITS_PER_WORD bits since that is either
876 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
877 has unknown behavior. Do a single shift first, then shift by the
878 remainder. It's OK to use ~OP1 as the remainder if shift counts
879 are truncated to the mode size. */
880 carries = expand_binop (word_mode, reverse_unsigned_shift,
881 outof_input, const1_rtx, 0, unsignedp, methods);
882 if (shift_mask == BITS_PER_WORD - 1)
884 tmp = immed_double_const (-1, -1, op1_mode);
885 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
890 tmp = immed_double_const (BITS_PER_WORD - 1, 0, op1_mode);
891 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
895 if (tmp == 0 || carries == 0)
897 carries = expand_binop (word_mode, reverse_unsigned_shift,
898 carries, tmp, 0, unsignedp, methods);
902 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
903 so the result can go directly into INTO_TARGET if convenient. */
904 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
905 into_target, unsignedp, methods);
909 /* Now OR in the bits carried over from OUTOF_INPUT. */
910 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
911 into_target, unsignedp, methods))
914 /* Use a standard word_mode shift for the out-of half. */
915 if (outof_target != 0)
916 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
917 outof_target, unsignedp, methods))
924 #ifdef HAVE_conditional_move
925 /* Try implementing expand_doubleword_shift using conditional moves.
926 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
927 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
928 are the shift counts to use in the former and latter case. All other
929 arguments are the same as the parent routine. */
932 expand_doubleword_shift_condmove (enum machine_mode op1_mode, optab binoptab,
933 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
934 rtx outof_input, rtx into_input,
935 rtx subword_op1, rtx superword_op1,
936 rtx outof_target, rtx into_target,
937 int unsignedp, enum optab_methods methods,
938 unsigned HOST_WIDE_INT shift_mask)
940 rtx outof_superword, into_superword;
942 /* Put the superword version of the output into OUTOF_SUPERWORD and
944 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
945 if (outof_target != 0 && subword_op1 == superword_op1)
947 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
948 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
949 into_superword = outof_target;
950 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
951 outof_superword, 0, unsignedp, methods))
956 into_superword = gen_reg_rtx (word_mode);
957 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
958 outof_superword, into_superword,
963 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
964 if (!expand_subword_shift (op1_mode, binoptab,
965 outof_input, into_input, subword_op1,
966 outof_target, into_target,
967 unsignedp, methods, shift_mask))
970 /* Select between them. Do the INTO half first because INTO_SUPERWORD
971 might be the current value of OUTOF_TARGET. */
972 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
973 into_target, into_superword, word_mode, false))
976 if (outof_target != 0)
977 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
978 outof_target, outof_superword,
986 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
987 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
988 input operand; the shift moves bits in the direction OUTOF_INPUT->
989 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
990 of the target. OP1 is the shift count and OP1_MODE is its mode.
991 If OP1 is constant, it will have been truncated as appropriate
992 and is known to be nonzero.
994 If SHIFT_MASK is zero, the result of word shifts is undefined when the
995 shift count is outside the range [0, BITS_PER_WORD). This routine must
996 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
998 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
999 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
1000 fill with zeros or sign bits as appropriate.
1002 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
1003 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
1004 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
1005 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
1008 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
1009 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
1010 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
1011 function wants to calculate it itself.
1013 Return true if the shift could be successfully synthesized. */
1016 expand_doubleword_shift (enum machine_mode op1_mode, optab binoptab,
1017 rtx outof_input, rtx into_input, rtx op1,
1018 rtx outof_target, rtx into_target,
1019 int unsignedp, enum optab_methods methods,
1020 unsigned HOST_WIDE_INT shift_mask)
1022 rtx superword_op1, tmp, cmp1, cmp2;
1023 rtx subword_label, done_label;
1024 enum rtx_code cmp_code;
1026 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
1027 fill the result with sign or zero bits as appropriate. If so, the value
1028 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
1029 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
1030 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
1032 This isn't worthwhile for constant shifts since the optimizers will
1033 cope better with in-range shift counts. */
1034 if (shift_mask >= BITS_PER_WORD
1035 && outof_target != 0
1036 && !CONSTANT_P (op1))
1038 if (!expand_doubleword_shift (op1_mode, binoptab,
1039 outof_input, into_input, op1,
1041 unsignedp, methods, shift_mask))
1043 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
1044 outof_target, unsignedp, methods))
1049 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
1050 is true when the effective shift value is less than BITS_PER_WORD.
1051 Set SUPERWORD_OP1 to the shift count that should be used to shift
1052 OUTOF_INPUT into INTO_TARGET when the condition is false. */
1053 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
1054 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
1056 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
1057 is a subword shift count. */
1058 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
1060 cmp2 = CONST0_RTX (op1_mode);
1062 superword_op1 = op1;
1066 /* Set CMP1 to OP1 - BITS_PER_WORD. */
1067 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
1069 cmp2 = CONST0_RTX (op1_mode);
1071 superword_op1 = cmp1;
1076 /* If we can compute the condition at compile time, pick the
1077 appropriate subroutine. */
1078 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
1079 if (tmp != 0 && GET_CODE (tmp) == CONST_INT)
1081 if (tmp == const0_rtx)
1082 return expand_superword_shift (binoptab, outof_input, superword_op1,
1083 outof_target, into_target,
1084 unsignedp, methods);
1086 return expand_subword_shift (op1_mode, binoptab,
1087 outof_input, into_input, op1,
1088 outof_target, into_target,
1089 unsignedp, methods, shift_mask);
1092 #ifdef HAVE_conditional_move
1093 /* Try using conditional moves to generate straight-line code. */
1095 rtx start = get_last_insn ();
1096 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
1097 cmp_code, cmp1, cmp2,
1098 outof_input, into_input,
1100 outof_target, into_target,
1101 unsignedp, methods, shift_mask))
1103 delete_insns_since (start);
1107 /* As a last resort, use branches to select the correct alternative. */
1108 subword_label = gen_label_rtx ();
1109 done_label = gen_label_rtx ();
1112 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
1113 0, 0, subword_label);
1116 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
1117 outof_target, into_target,
1118 unsignedp, methods))
1121 emit_jump_insn (gen_jump (done_label));
1123 emit_label (subword_label);
1125 if (!expand_subword_shift (op1_mode, binoptab,
1126 outof_input, into_input, op1,
1127 outof_target, into_target,
1128 unsignedp, methods, shift_mask))
1131 emit_label (done_label);
1135 /* Subroutine of expand_binop. Perform a double word multiplication of
1136 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
1137 as the target's word_mode. This function return NULL_RTX if anything
1138 goes wrong, in which case it may have already emitted instructions
1139 which need to be deleted.
1141 If we want to multiply two two-word values and have normal and widening
1142 multiplies of single-word values, we can do this with three smaller
1143 multiplications. Note that we do not make a REG_NO_CONFLICT block here
1144 because we are not operating on one word at a time.
1146 The multiplication proceeds as follows:
1147 _______________________
1148 [__op0_high_|__op0_low__]
1149 _______________________
1150 * [__op1_high_|__op1_low__]
1151 _______________________________________________
1152 _______________________
1153 (1) [__op0_low__*__op1_low__]
1154 _______________________
1155 (2a) [__op0_low__*__op1_high_]
1156 _______________________
1157 (2b) [__op0_high_*__op1_low__]
1158 _______________________
1159 (3) [__op0_high_*__op1_high_]
1162 This gives a 4-word result. Since we are only interested in the
1163 lower 2 words, partial result (3) and the upper words of (2a) and
1164 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1165 calculated using non-widening multiplication.
1167 (1), however, needs to be calculated with an unsigned widening
1168 multiplication. If this operation is not directly supported we
1169 try using a signed widening multiplication and adjust the result.
1170 This adjustment works as follows:
1172 If both operands are positive then no adjustment is needed.
1174 If the operands have different signs, for example op0_low < 0 and
1175 op1_low >= 0, the instruction treats the most significant bit of
1176 op0_low as a sign bit instead of a bit with significance
1177 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1178 with 2**BITS_PER_WORD - op0_low, and two's complements the
1179 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1182 Similarly, if both operands are negative, we need to add
1183 (op0_low + op1_low) * 2**BITS_PER_WORD.
1185 We use a trick to adjust quickly. We logically shift op0_low right
1186 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1187 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1188 logical shift exists, we do an arithmetic right shift and subtract
1192 expand_doubleword_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
1193 bool umulp, enum optab_methods methods)
1195 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
1196 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
1197 rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
1198 rtx product, adjust, product_high, temp;
1200 rtx op0_high = operand_subword_force (op0, high, mode);
1201 rtx op0_low = operand_subword_force (op0, low, mode);
1202 rtx op1_high = operand_subword_force (op1, high, mode);
1203 rtx op1_low = operand_subword_force (op1, low, mode);
1205 /* If we're using an unsigned multiply to directly compute the product
1206 of the low-order words of the operands and perform any required
1207 adjustments of the operands, we begin by trying two more multiplications
1208 and then computing the appropriate sum.
1210 We have checked above that the required addition is provided.
1211 Full-word addition will normally always succeed, especially if
1212 it is provided at all, so we don't worry about its failure. The
1213 multiplication may well fail, however, so we do handle that. */
1217 /* ??? This could be done with emit_store_flag where available. */
1218 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
1219 NULL_RTX, 1, methods);
1221 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
1222 NULL_RTX, 0, OPTAB_DIRECT);
1225 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
1226 NULL_RTX, 0, methods);
1229 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
1230 NULL_RTX, 0, OPTAB_DIRECT);
1237 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
1238 NULL_RTX, 0, OPTAB_DIRECT);
1242 /* OP0_HIGH should now be dead. */
1246 /* ??? This could be done with emit_store_flag where available. */
1247 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
1248 NULL_RTX, 1, methods);
1250 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
1251 NULL_RTX, 0, OPTAB_DIRECT);
1254 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
1255 NULL_RTX, 0, methods);
1258 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
1259 NULL_RTX, 0, OPTAB_DIRECT);
1266 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
1267 NULL_RTX, 0, OPTAB_DIRECT);
1271 /* OP1_HIGH should now be dead. */
1273 adjust = expand_binop (word_mode, add_optab, adjust, temp,
1274 adjust, 0, OPTAB_DIRECT);
1276 if (target && !REG_P (target))
1280 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
1281 target, 1, OPTAB_DIRECT);
1283 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
1284 target, 1, OPTAB_DIRECT);
1289 product_high = operand_subword (product, high, 1, mode);
1290 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
1291 REG_P (product_high) ? product_high : adjust,
1293 emit_move_insn (product_high, adjust);
1297 /* Wrapper around expand_binop which takes an rtx code to specify
1298 the operation to perform, not an optab pointer. All other
1299 arguments are the same. */
1301 expand_simple_binop (enum machine_mode mode, enum rtx_code code, rtx op0,
1302 rtx op1, rtx target, int unsignedp,
1303 enum optab_methods methods)
1305 optab binop = code_to_optab[(int) code];
1308 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
1311 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1312 binop. Order them according to commutative_operand_precedence and, if
1313 possible, try to put TARGET or a pseudo first. */
1315 swap_commutative_operands_with_target (rtx target, rtx op0, rtx op1)
1317 int op0_prec = commutative_operand_precedence (op0);
1318 int op1_prec = commutative_operand_precedence (op1);
1320 if (op0_prec < op1_prec)
1323 if (op0_prec > op1_prec)
1326 /* With equal precedence, both orders are ok, but it is better if the
1327 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
1328 if (target == 0 || REG_P (target))
1329 return (REG_P (op1) && !REG_P (op0)) || target == op1;
1331 return rtx_equal_p (op1, target);
1334 /* Return true if BINOPTAB implements a shift operation. */
1337 shift_optab_p (optab binoptab)
1339 switch (binoptab->code)
1355 /* Return true if BINOPTAB implements a commutative binary operation. */
1358 commutative_optab_p (optab binoptab)
1360 return (GET_RTX_CLASS (binoptab->code) == RTX_COMM_ARITH
1361 || binoptab == smul_widen_optab
1362 || binoptab == umul_widen_optab
1363 || binoptab == smul_highpart_optab
1364 || binoptab == umul_highpart_optab);
1367 /* X is to be used in mode MODE as an operand to BINOPTAB. If we're
1368 optimizing, and if the operand is a constant that costs more than
1369 1 instruction, force the constant into a register and return that
1370 register. Return X otherwise. UNSIGNEDP says whether X is unsigned. */
1373 avoid_expensive_constant (enum machine_mode mode, optab binoptab,
1374 rtx x, bool unsignedp)
1376 if (mode != VOIDmode
1379 && rtx_cost (x, binoptab->code) > COSTS_N_INSNS (1))
1381 if (GET_CODE (x) == CONST_INT)
1383 HOST_WIDE_INT intval = trunc_int_for_mode (INTVAL (x), mode);
1384 if (intval != INTVAL (x))
1385 x = GEN_INT (intval);
1388 x = convert_modes (mode, VOIDmode, x, unsignedp);
1389 x = force_reg (mode, x);
1394 /* Helper function for expand_binop: handle the case where there
1395 is an insn that directly implements the indicated operation.
1396 Returns null if this is not possible. */
1398 expand_binop_directly (enum machine_mode mode, optab binoptab,
1400 rtx target, int unsignedp, enum optab_methods methods,
1403 int icode = (int) optab_handler (binoptab, mode)->insn_code;
1404 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
1405 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
1406 enum machine_mode tmp_mode;
1409 rtx xop0 = op0, xop1 = op1;
1416 temp = gen_reg_rtx (mode);
1418 /* If it is a commutative operator and the modes would match
1419 if we would swap the operands, we can save the conversions. */
1420 commutative_p = commutative_optab_p (binoptab);
1422 && GET_MODE (xop0) != mode0 && GET_MODE (xop1) != mode1
1423 && GET_MODE (xop0) == mode1 && GET_MODE (xop1) == mode1)
1430 /* If we are optimizing, force expensive constants into a register. */
1431 xop0 = avoid_expensive_constant (mode0, binoptab, xop0, unsignedp);
1432 if (!shift_optab_p (binoptab))
1433 xop1 = avoid_expensive_constant (mode1, binoptab, xop1, unsignedp);
1435 /* In case the insn wants input operands in modes different from
1436 those of the actual operands, convert the operands. It would
1437 seem that we don't need to convert CONST_INTs, but we do, so
1438 that they're properly zero-extended, sign-extended or truncated
1441 if (GET_MODE (xop0) != mode0 && mode0 != VOIDmode)
1442 xop0 = convert_modes (mode0,
1443 GET_MODE (xop0) != VOIDmode
1448 if (GET_MODE (xop1) != mode1 && mode1 != VOIDmode)
1449 xop1 = convert_modes (mode1,
1450 GET_MODE (xop1) != VOIDmode
1455 /* If operation is commutative,
1456 try to make the first operand a register.
1457 Even better, try to make it the same as the target.
1458 Also try to make the last operand a constant. */
1460 && swap_commutative_operands_with_target (target, xop0, xop1))
1467 /* Now, if insn's predicates don't allow our operands, put them into
1470 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
1471 && mode0 != VOIDmode)
1472 xop0 = copy_to_mode_reg (mode0, xop0);
1474 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
1475 && mode1 != VOIDmode)
1476 xop1 = copy_to_mode_reg (mode1, xop1);
1478 if (binoptab == vec_pack_trunc_optab
1479 || binoptab == vec_pack_usat_optab
1480 || binoptab == vec_pack_ssat_optab
1481 || binoptab == vec_pack_ufix_trunc_optab
1482 || binoptab == vec_pack_sfix_trunc_optab)
1484 /* The mode of the result is different then the mode of the
1486 tmp_mode = insn_data[icode].operand[0].mode;
1487 if (GET_MODE_NUNITS (tmp_mode) != 2 * GET_MODE_NUNITS (mode))
1493 if (!insn_data[icode].operand[0].predicate (temp, tmp_mode))
1494 temp = gen_reg_rtx (tmp_mode);
1496 pat = GEN_FCN (icode) (temp, xop0, xop1);
1499 /* If PAT is composed of more than one insn, try to add an appropriate
1500 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1501 operand, call expand_binop again, this time without a target. */
1502 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1503 && ! add_equal_note (pat, temp, binoptab->code, xop0, xop1))
1505 delete_insns_since (last);
1506 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1507 unsignedp, methods);
1514 delete_insns_since (last);
1518 /* Generate code to perform an operation specified by BINOPTAB
1519 on operands OP0 and OP1, with result having machine-mode MODE.
1521 UNSIGNEDP is for the case where we have to widen the operands
1522 to perform the operation. It says to use zero-extension.
1524 If TARGET is nonzero, the value
1525 is generated there, if it is convenient to do so.
1526 In all cases an rtx is returned for the locus of the value;
1527 this may or may not be TARGET. */
1530 expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1,
1531 rtx target, int unsignedp, enum optab_methods methods)
1533 enum optab_methods next_methods
1534 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1535 ? OPTAB_WIDEN : methods);
1536 enum mode_class class;
1537 enum machine_mode wider_mode;
1540 rtx entry_last = get_last_insn ();
1543 class = GET_MODE_CLASS (mode);
1545 /* If subtracting an integer constant, convert this into an addition of
1546 the negated constant. */
1548 if (binoptab == sub_optab && GET_CODE (op1) == CONST_INT)
1550 op1 = negate_rtx (mode, op1);
1551 binoptab = add_optab;
1554 /* Record where to delete back to if we backtrack. */
1555 last = get_last_insn ();
1557 /* If we can do it with a three-operand insn, do so. */
1559 if (methods != OPTAB_MUST_WIDEN
1560 && optab_handler (binoptab, mode)->insn_code != CODE_FOR_nothing)
1562 temp = expand_binop_directly (mode, binoptab, op0, op1, target,
1563 unsignedp, methods, last);
1568 /* If we were trying to rotate, and that didn't work, try rotating
1569 the other direction before falling back to shifts and bitwise-or. */
1570 if (((binoptab == rotl_optab
1571 && optab_handler (rotr_optab, mode)->insn_code != CODE_FOR_nothing)
1572 || (binoptab == rotr_optab
1573 && optab_handler (rotl_optab, mode)->insn_code != CODE_FOR_nothing))
1574 && class == MODE_INT)
1576 optab otheroptab = (binoptab == rotl_optab ? rotr_optab : rotl_optab);
1578 unsigned int bits = GET_MODE_BITSIZE (mode);
1580 if (GET_CODE (op1) == CONST_INT)
1581 newop1 = GEN_INT (bits - INTVAL (op1));
1582 else if (targetm.shift_truncation_mask (mode) == bits - 1)
1583 newop1 = negate_rtx (mode, op1);
1585 newop1 = expand_binop (mode, sub_optab,
1586 GEN_INT (bits), op1,
1587 NULL_RTX, unsignedp, OPTAB_DIRECT);
1589 temp = expand_binop_directly (mode, otheroptab, op0, newop1,
1590 target, unsignedp, methods, last);
1595 /* If this is a multiply, see if we can do a widening operation that
1596 takes operands of this mode and makes a wider mode. */
1598 if (binoptab == smul_optab
1599 && GET_MODE_WIDER_MODE (mode) != VOIDmode
1600 && ((optab_handler ((unsignedp ? umul_widen_optab : smul_widen_optab),
1601 GET_MODE_WIDER_MODE (mode))->insn_code)
1602 != CODE_FOR_nothing))
1604 temp = expand_binop (GET_MODE_WIDER_MODE (mode),
1605 unsignedp ? umul_widen_optab : smul_widen_optab,
1606 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1610 if (GET_MODE_CLASS (mode) == MODE_INT
1611 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1612 GET_MODE_BITSIZE (GET_MODE (temp))))
1613 return gen_lowpart (mode, temp);
1615 return convert_to_mode (mode, temp, unsignedp);
1619 /* Look for a wider mode of the same class for which we think we
1620 can open-code the operation. Check for a widening multiply at the
1621 wider mode as well. */
1623 if (CLASS_HAS_WIDER_MODES_P (class)
1624 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1625 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1626 wider_mode != VOIDmode;
1627 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1629 if (optab_handler (binoptab, wider_mode)->insn_code != CODE_FOR_nothing
1630 || (binoptab == smul_optab
1631 && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
1632 && ((optab_handler ((unsignedp ? umul_widen_optab
1633 : smul_widen_optab),
1634 GET_MODE_WIDER_MODE (wider_mode))->insn_code)
1635 != CODE_FOR_nothing)))
1637 rtx xop0 = op0, xop1 = op1;
1640 /* For certain integer operations, we need not actually extend
1641 the narrow operands, as long as we will truncate
1642 the results to the same narrowness. */
1644 if ((binoptab == ior_optab || binoptab == and_optab
1645 || binoptab == xor_optab
1646 || binoptab == add_optab || binoptab == sub_optab
1647 || binoptab == smul_optab || binoptab == ashl_optab)
1648 && class == MODE_INT)
1651 xop0 = avoid_expensive_constant (mode, binoptab,
1653 if (binoptab != ashl_optab)
1654 xop1 = avoid_expensive_constant (mode, binoptab,
1658 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1660 /* The second operand of a shift must always be extended. */
1661 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1662 no_extend && binoptab != ashl_optab);
1664 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1665 unsignedp, OPTAB_DIRECT);
1668 if (class != MODE_INT
1669 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1670 GET_MODE_BITSIZE (wider_mode)))
1673 target = gen_reg_rtx (mode);
1674 convert_move (target, temp, 0);
1678 return gen_lowpart (mode, temp);
1681 delete_insns_since (last);
1685 /* If operation is commutative,
1686 try to make the first operand a register.
1687 Even better, try to make it the same as the target.
1688 Also try to make the last operand a constant. */
1689 if (commutative_optab_p (binoptab)
1690 && swap_commutative_operands_with_target (target, op0, op1))
1697 /* These can be done a word at a time. */
1698 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1699 && class == MODE_INT
1700 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
1701 && optab_handler (binoptab, word_mode)->insn_code != CODE_FOR_nothing)
1707 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1708 won't be accurate, so use a new target. */
1709 if (target == 0 || target == op0 || target == op1)
1710 target = gen_reg_rtx (mode);
1714 /* Do the actual arithmetic. */
1715 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
1717 rtx target_piece = operand_subword (target, i, 1, mode);
1718 rtx x = expand_binop (word_mode, binoptab,
1719 operand_subword_force (op0, i, mode),
1720 operand_subword_force (op1, i, mode),
1721 target_piece, unsignedp, next_methods);
1726 if (target_piece != x)
1727 emit_move_insn (target_piece, x);
1730 insns = get_insns ();
1733 if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
1735 if (binoptab->code != UNKNOWN)
1737 = gen_rtx_fmt_ee (binoptab->code, mode,
1738 copy_rtx (op0), copy_rtx (op1));
1742 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1747 /* Synthesize double word shifts from single word shifts. */
1748 if ((binoptab == lshr_optab || binoptab == ashl_optab
1749 || binoptab == ashr_optab)
1750 && class == MODE_INT
1751 && (GET_CODE (op1) == CONST_INT || !optimize_size)
1752 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1753 && optab_handler (binoptab, word_mode)->insn_code != CODE_FOR_nothing
1754 && optab_handler (ashl_optab, word_mode)->insn_code != CODE_FOR_nothing
1755 && optab_handler (lshr_optab, word_mode)->insn_code != CODE_FOR_nothing)
1757 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1758 enum machine_mode op1_mode;
1760 double_shift_mask = targetm.shift_truncation_mask (mode);
1761 shift_mask = targetm.shift_truncation_mask (word_mode);
1762 op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode;
1764 /* Apply the truncation to constant shifts. */
1765 if (double_shift_mask > 0 && GET_CODE (op1) == CONST_INT)
1766 op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
1768 if (op1 == CONST0_RTX (op1_mode))
1771 /* Make sure that this is a combination that expand_doubleword_shift
1772 can handle. See the comments there for details. */
1773 if (double_shift_mask == 0
1774 || (shift_mask == BITS_PER_WORD - 1
1775 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1777 rtx insns, equiv_value;
1778 rtx into_target, outof_target;
1779 rtx into_input, outof_input;
1780 int left_shift, outof_word;
1782 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1783 won't be accurate, so use a new target. */
1784 if (target == 0 || target == op0 || target == op1)
1785 target = gen_reg_rtx (mode);
1789 /* OUTOF_* is the word we are shifting bits away from, and
1790 INTO_* is the word that we are shifting bits towards, thus
1791 they differ depending on the direction of the shift and
1792 WORDS_BIG_ENDIAN. */
1794 left_shift = binoptab == ashl_optab;
1795 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1797 outof_target = operand_subword (target, outof_word, 1, mode);
1798 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1800 outof_input = operand_subword_force (op0, outof_word, mode);
1801 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1803 if (expand_doubleword_shift (op1_mode, binoptab,
1804 outof_input, into_input, op1,
1805 outof_target, into_target,
1806 unsignedp, next_methods, shift_mask))
1808 insns = get_insns ();
1811 equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, op0, op1);
1812 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1819 /* Synthesize double word rotates from single word shifts. */
1820 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1821 && class == MODE_INT
1822 && GET_CODE (op1) == CONST_INT
1823 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1824 && optab_handler (ashl_optab, word_mode)->insn_code != CODE_FOR_nothing
1825 && optab_handler (lshr_optab, word_mode)->insn_code != CODE_FOR_nothing)
1828 rtx into_target, outof_target;
1829 rtx into_input, outof_input;
1831 int shift_count, left_shift, outof_word;
1833 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1834 won't be accurate, so use a new target. Do this also if target is not
1835 a REG, first because having a register instead may open optimization
1836 opportunities, and second because if target and op0 happen to be MEMs
1837 designating the same location, we would risk clobbering it too early
1838 in the code sequence we generate below. */
1839 if (target == 0 || target == op0 || target == op1 || ! REG_P (target))
1840 target = gen_reg_rtx (mode);
1844 shift_count = INTVAL (op1);
1846 /* OUTOF_* is the word we are shifting bits away from, and
1847 INTO_* is the word that we are shifting bits towards, thus
1848 they differ depending on the direction of the shift and
1849 WORDS_BIG_ENDIAN. */
1851 left_shift = (binoptab == rotl_optab);
1852 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1854 outof_target = operand_subword (target, outof_word, 1, mode);
1855 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1857 outof_input = operand_subword_force (op0, outof_word, mode);
1858 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1860 if (shift_count == BITS_PER_WORD)
1862 /* This is just a word swap. */
1863 emit_move_insn (outof_target, into_input);
1864 emit_move_insn (into_target, outof_input);
1869 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1870 rtx first_shift_count, second_shift_count;
1871 optab reverse_unsigned_shift, unsigned_shift;
1873 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1874 ? lshr_optab : ashl_optab);
1876 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1877 ? ashl_optab : lshr_optab);
1879 if (shift_count > BITS_PER_WORD)
1881 first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1882 second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1886 first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1887 second_shift_count = GEN_INT (shift_count);
1890 into_temp1 = expand_binop (word_mode, unsigned_shift,
1891 outof_input, first_shift_count,
1892 NULL_RTX, unsignedp, next_methods);
1893 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1894 into_input, second_shift_count,
1895 NULL_RTX, unsignedp, next_methods);
1897 if (into_temp1 != 0 && into_temp2 != 0)
1898 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1899 into_target, unsignedp, next_methods);
1903 if (inter != 0 && inter != into_target)
1904 emit_move_insn (into_target, inter);
1906 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1907 into_input, first_shift_count,
1908 NULL_RTX, unsignedp, next_methods);
1909 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1910 outof_input, second_shift_count,
1911 NULL_RTX, unsignedp, next_methods);
1913 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1914 inter = expand_binop (word_mode, ior_optab,
1915 outof_temp1, outof_temp2,
1916 outof_target, unsignedp, next_methods);
1918 if (inter != 0 && inter != outof_target)
1919 emit_move_insn (outof_target, inter);
1922 insns = get_insns ();
1927 /* One may be tempted to wrap the insns in a REG_NO_CONFLICT
1928 block to help the register allocator a bit. But a multi-word
1929 rotate will need all the input bits when setting the output
1930 bits, so there clearly is a conflict between the input and
1931 output registers. So we can't use a no-conflict block here. */
1937 /* These can be done a word at a time by propagating carries. */
1938 if ((binoptab == add_optab || binoptab == sub_optab)
1939 && class == MODE_INT
1940 && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
1941 && optab_handler (binoptab, word_mode)->insn_code != CODE_FOR_nothing)
1944 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1945 const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
1946 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1947 rtx xop0, xop1, xtarget;
1949 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1950 value is one of those, use it. Otherwise, use 1 since it is the
1951 one easiest to get. */
1952 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1953 int normalizep = STORE_FLAG_VALUE;
1958 /* Prepare the operands. */
1959 xop0 = force_reg (mode, op0);
1960 xop1 = force_reg (mode, op1);
1962 xtarget = gen_reg_rtx (mode);
1964 if (target == 0 || !REG_P (target))
1967 /* Indicate for flow that the entire target reg is being set. */
1969 emit_insn (gen_rtx_CLOBBER (VOIDmode, xtarget));
1971 /* Do the actual arithmetic. */
1972 for (i = 0; i < nwords; i++)
1974 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1975 rtx target_piece = operand_subword (xtarget, index, 1, mode);
1976 rtx op0_piece = operand_subword_force (xop0, index, mode);
1977 rtx op1_piece = operand_subword_force (xop1, index, mode);
1980 /* Main add/subtract of the input operands. */
1981 x = expand_binop (word_mode, binoptab,
1982 op0_piece, op1_piece,
1983 target_piece, unsignedp, next_methods);
1989 /* Store carry from main add/subtract. */
1990 carry_out = gen_reg_rtx (word_mode);
1991 carry_out = emit_store_flag_force (carry_out,
1992 (binoptab == add_optab
1995 word_mode, 1, normalizep);
2002 /* Add/subtract previous carry to main result. */
2003 newx = expand_binop (word_mode,
2004 normalizep == 1 ? binoptab : otheroptab,
2006 NULL_RTX, 1, next_methods);
2010 /* Get out carry from adding/subtracting carry in. */
2011 rtx carry_tmp = gen_reg_rtx (word_mode);
2012 carry_tmp = emit_store_flag_force (carry_tmp,
2013 (binoptab == add_optab
2016 word_mode, 1, normalizep);
2018 /* Logical-ior the two poss. carry together. */
2019 carry_out = expand_binop (word_mode, ior_optab,
2020 carry_out, carry_tmp,
2021 carry_out, 0, next_methods);
2025 emit_move_insn (target_piece, newx);
2029 if (x != target_piece)
2030 emit_move_insn (target_piece, x);
2033 carry_in = carry_out;
2036 if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
2038 if (optab_handler (mov_optab, mode)->insn_code != CODE_FOR_nothing
2039 || ! rtx_equal_p (target, xtarget))
2041 rtx temp = emit_move_insn (target, xtarget);
2043 set_unique_reg_note (temp,
2045 gen_rtx_fmt_ee (binoptab->code, mode,
2056 delete_insns_since (last);
2059 /* Attempt to synthesize double word multiplies using a sequence of word
2060 mode multiplications. We first attempt to generate a sequence using a
2061 more efficient unsigned widening multiply, and if that fails we then
2062 try using a signed widening multiply. */
2064 if (binoptab == smul_optab
2065 && class == MODE_INT
2066 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
2067 && optab_handler (smul_optab, word_mode)->insn_code != CODE_FOR_nothing
2068 && optab_handler (add_optab, word_mode)->insn_code != CODE_FOR_nothing)
2070 rtx product = NULL_RTX;
2072 if (optab_handler (umul_widen_optab, mode)->insn_code
2073 != CODE_FOR_nothing)
2075 product = expand_doubleword_mult (mode, op0, op1, target,
2078 delete_insns_since (last);
2081 if (product == NULL_RTX
2082 && optab_handler (smul_widen_optab, mode)->insn_code
2083 != CODE_FOR_nothing)
2085 product = expand_doubleword_mult (mode, op0, op1, target,
2088 delete_insns_since (last);
2091 if (product != NULL_RTX)
2093 if (optab_handler (mov_optab, mode)->insn_code != CODE_FOR_nothing)
2095 temp = emit_move_insn (target ? target : product, product);
2096 set_unique_reg_note (temp,
2098 gen_rtx_fmt_ee (MULT, mode,
2106 /* It can't be open-coded in this mode.
2107 Use a library call if one is available and caller says that's ok. */
2109 libfunc = optab_libfunc (binoptab, mode);
2111 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
2115 enum machine_mode op1_mode = mode;
2120 if (shift_optab_p (binoptab))
2122 op1_mode = targetm.libgcc_shift_count_mode ();
2123 /* Specify unsigned here,
2124 since negative shift counts are meaningless. */
2125 op1x = convert_to_mode (op1_mode, op1, 1);
2128 if (GET_MODE (op0) != VOIDmode
2129 && GET_MODE (op0) != mode)
2130 op0 = convert_to_mode (mode, op0, unsignedp);
2132 /* Pass 1 for NO_QUEUE so we don't lose any increments
2133 if the libcall is cse'd or moved. */
2134 value = emit_library_call_value (libfunc,
2135 NULL_RTX, LCT_CONST, mode, 2,
2136 op0, mode, op1x, op1_mode);
2138 insns = get_insns ();
2141 target = gen_reg_rtx (mode);
2142 emit_libcall_block (insns, target, value,
2143 gen_rtx_fmt_ee (binoptab->code, mode, op0, op1));
2148 delete_insns_since (last);
2150 /* It can't be done in this mode. Can we do it in a wider mode? */
2152 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
2153 || methods == OPTAB_MUST_WIDEN))
2155 /* Caller says, don't even try. */
2156 delete_insns_since (entry_last);
2160 /* Compute the value of METHODS to pass to recursive calls.
2161 Don't allow widening to be tried recursively. */
2163 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
2165 /* Look for a wider mode of the same class for which it appears we can do
2168 if (CLASS_HAS_WIDER_MODES_P (class))
2170 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2171 wider_mode != VOIDmode;
2172 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2174 if ((optab_handler (binoptab, wider_mode)->insn_code
2175 != CODE_FOR_nothing)
2176 || (methods == OPTAB_LIB
2177 && optab_libfunc (binoptab, wider_mode)))
2179 rtx xop0 = op0, xop1 = op1;
2182 /* For certain integer operations, we need not actually extend
2183 the narrow operands, as long as we will truncate
2184 the results to the same narrowness. */
2186 if ((binoptab == ior_optab || binoptab == and_optab
2187 || binoptab == xor_optab
2188 || binoptab == add_optab || binoptab == sub_optab
2189 || binoptab == smul_optab || binoptab == ashl_optab)
2190 && class == MODE_INT)
2193 xop0 = widen_operand (xop0, wider_mode, mode,
2194 unsignedp, no_extend);
2196 /* The second operand of a shift must always be extended. */
2197 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
2198 no_extend && binoptab != ashl_optab);
2200 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
2201 unsignedp, methods);
2204 if (class != MODE_INT
2205 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
2206 GET_MODE_BITSIZE (wider_mode)))
2209 target = gen_reg_rtx (mode);
2210 convert_move (target, temp, 0);
2214 return gen_lowpart (mode, temp);
2217 delete_insns_since (last);
2222 delete_insns_since (entry_last);
2226 /* Expand a binary operator which has both signed and unsigned forms.
2227 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2230 If we widen unsigned operands, we may use a signed wider operation instead
2231 of an unsigned wider operation, since the result would be the same. */
2234 sign_expand_binop (enum machine_mode mode, optab uoptab, optab soptab,
2235 rtx op0, rtx op1, rtx target, int unsignedp,
2236 enum optab_methods methods)
2239 optab direct_optab = unsignedp ? uoptab : soptab;
2240 struct optab wide_soptab;
2242 /* Do it without widening, if possible. */
2243 temp = expand_binop (mode, direct_optab, op0, op1, target,
2244 unsignedp, OPTAB_DIRECT);
2245 if (temp || methods == OPTAB_DIRECT)
2248 /* Try widening to a signed int. Make a fake signed optab that
2249 hides any signed insn for direct use. */
2250 wide_soptab = *soptab;
2251 optab_handler (&wide_soptab, mode)->insn_code = CODE_FOR_nothing;
2253 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2254 unsignedp, OPTAB_WIDEN);
2256 /* For unsigned operands, try widening to an unsigned int. */
2257 if (temp == 0 && unsignedp)
2258 temp = expand_binop (mode, uoptab, op0, op1, target,
2259 unsignedp, OPTAB_WIDEN);
2260 if (temp || methods == OPTAB_WIDEN)
2263 /* Use the right width lib call if that exists. */
2264 temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_LIB);
2265 if (temp || methods == OPTAB_LIB)
2268 /* Must widen and use a lib call, use either signed or unsigned. */
2269 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2270 unsignedp, methods);
2274 return expand_binop (mode, uoptab, op0, op1, target,
2275 unsignedp, methods);
2279 /* Generate code to perform an operation specified by UNOPPTAB
2280 on operand OP0, with two results to TARG0 and TARG1.
2281 We assume that the order of the operands for the instruction
2282 is TARG0, TARG1, OP0.
2284 Either TARG0 or TARG1 may be zero, but what that means is that
2285 the result is not actually wanted. We will generate it into
2286 a dummy pseudo-reg and discard it. They may not both be zero.
2288 Returns 1 if this operation can be performed; 0 if not. */
2291 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
2294 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2295 enum mode_class class;
2296 enum machine_mode wider_mode;
2297 rtx entry_last = get_last_insn ();
2300 class = GET_MODE_CLASS (mode);
2303 targ0 = gen_reg_rtx (mode);
2305 targ1 = gen_reg_rtx (mode);
2307 /* Record where to go back to if we fail. */
2308 last = get_last_insn ();
2310 if (optab_handler (unoptab, mode)->insn_code != CODE_FOR_nothing)
2312 int icode = (int) optab_handler (unoptab, mode)->insn_code;
2313 enum machine_mode mode0 = insn_data[icode].operand[2].mode;
2317 if (GET_MODE (xop0) != VOIDmode
2318 && GET_MODE (xop0) != mode0)
2319 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2321 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2322 if (!insn_data[icode].operand[2].predicate (xop0, mode0))
2323 xop0 = copy_to_mode_reg (mode0, xop0);
2325 /* We could handle this, but we should always be called with a pseudo
2326 for our targets and all insns should take them as outputs. */
2327 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2328 gcc_assert (insn_data[icode].operand[1].predicate (targ1, mode));
2330 pat = GEN_FCN (icode) (targ0, targ1, xop0);
2337 delete_insns_since (last);
2340 /* It can't be done in this mode. Can we do it in a wider mode? */
2342 if (CLASS_HAS_WIDER_MODES_P (class))
2344 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2345 wider_mode != VOIDmode;
2346 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2348 if (optab_handler (unoptab, wider_mode)->insn_code
2349 != CODE_FOR_nothing)
2351 rtx t0 = gen_reg_rtx (wider_mode);
2352 rtx t1 = gen_reg_rtx (wider_mode);
2353 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2355 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
2357 convert_move (targ0, t0, unsignedp);
2358 convert_move (targ1, t1, unsignedp);
2362 delete_insns_since (last);
2367 delete_insns_since (entry_last);
2371 /* Generate code to perform an operation specified by BINOPTAB
2372 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2373 We assume that the order of the operands for the instruction
2374 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2375 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2377 Either TARG0 or TARG1 may be zero, but what that means is that
2378 the result is not actually wanted. We will generate it into
2379 a dummy pseudo-reg and discard it. They may not both be zero.
2381 Returns 1 if this operation can be performed; 0 if not. */
2384 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
2387 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2388 enum mode_class class;
2389 enum machine_mode wider_mode;
2390 rtx entry_last = get_last_insn ();
2393 class = GET_MODE_CLASS (mode);
2396 targ0 = gen_reg_rtx (mode);
2398 targ1 = gen_reg_rtx (mode);
2400 /* Record where to go back to if we fail. */
2401 last = get_last_insn ();
2403 if (optab_handler (binoptab, mode)->insn_code != CODE_FOR_nothing)
2405 int icode = (int) optab_handler (binoptab, mode)->insn_code;
2406 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2407 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
2409 rtx xop0 = op0, xop1 = op1;
2411 /* If we are optimizing, force expensive constants into a register. */
2412 xop0 = avoid_expensive_constant (mode0, binoptab, xop0, unsignedp);
2413 xop1 = avoid_expensive_constant (mode1, binoptab, xop1, unsignedp);
2415 /* In case the insn wants input operands in modes different from
2416 those of the actual operands, convert the operands. It would
2417 seem that we don't need to convert CONST_INTs, but we do, so
2418 that they're properly zero-extended, sign-extended or truncated
2421 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
2422 xop0 = convert_modes (mode0,
2423 GET_MODE (op0) != VOIDmode
2428 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
2429 xop1 = convert_modes (mode1,
2430 GET_MODE (op1) != VOIDmode
2435 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2436 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2437 xop0 = copy_to_mode_reg (mode0, xop0);
2439 if (!insn_data[icode].operand[2].predicate (xop1, mode1))
2440 xop1 = copy_to_mode_reg (mode1, xop1);
2442 /* We could handle this, but we should always be called with a pseudo
2443 for our targets and all insns should take them as outputs. */
2444 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2445 gcc_assert (insn_data[icode].operand[3].predicate (targ1, mode));
2447 pat = GEN_FCN (icode) (targ0, xop0, xop1, targ1);
2454 delete_insns_since (last);
2457 /* It can't be done in this mode. Can we do it in a wider mode? */
2459 if (CLASS_HAS_WIDER_MODES_P (class))
2461 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2462 wider_mode != VOIDmode;
2463 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2465 if (optab_handler (binoptab, wider_mode)->insn_code
2466 != CODE_FOR_nothing)
2468 rtx t0 = gen_reg_rtx (wider_mode);
2469 rtx t1 = gen_reg_rtx (wider_mode);
2470 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2471 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2473 if (expand_twoval_binop (binoptab, cop0, cop1,
2476 convert_move (targ0, t0, unsignedp);
2477 convert_move (targ1, t1, unsignedp);
2481 delete_insns_since (last);
2486 delete_insns_since (entry_last);
2490 /* Expand the two-valued library call indicated by BINOPTAB, but
2491 preserve only one of the values. If TARG0 is non-NULL, the first
2492 value is placed into TARG0; otherwise the second value is placed
2493 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2494 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2495 This routine assumes that the value returned by the library call is
2496 as if the return value was of an integral mode twice as wide as the
2497 mode of OP0. Returns 1 if the call was successful. */
2500 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2501 rtx targ0, rtx targ1, enum rtx_code code)
2503 enum machine_mode mode;
2504 enum machine_mode libval_mode;
2509 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2510 gcc_assert (!targ0 != !targ1);
2512 mode = GET_MODE (op0);
2513 libfunc = optab_libfunc (binoptab, mode);
2517 /* The value returned by the library function will have twice as
2518 many bits as the nominal MODE. */
2519 libval_mode = smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode),
2522 libval = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
2526 /* Get the part of VAL containing the value that we want. */
2527 libval = simplify_gen_subreg (mode, libval, libval_mode,
2528 targ0 ? 0 : GET_MODE_SIZE (mode));
2529 insns = get_insns ();
2531 /* Move the into the desired location. */
2532 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2533 gen_rtx_fmt_ee (code, mode, op0, op1));
2539 /* Wrapper around expand_unop which takes an rtx code to specify
2540 the operation to perform, not an optab pointer. All other
2541 arguments are the same. */
2543 expand_simple_unop (enum machine_mode mode, enum rtx_code code, rtx op0,
2544 rtx target, int unsignedp)
2546 optab unop = code_to_optab[(int) code];
2549 return expand_unop (mode, unop, op0, target, unsignedp);
2555 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2557 widen_clz (enum machine_mode mode, rtx op0, rtx target)
2559 enum mode_class class = GET_MODE_CLASS (mode);
2560 if (CLASS_HAS_WIDER_MODES_P (class))
2562 enum machine_mode wider_mode;
2563 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2564 wider_mode != VOIDmode;
2565 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2567 if (optab_handler (clz_optab, wider_mode)->insn_code
2568 != CODE_FOR_nothing)
2570 rtx xop0, temp, last;
2572 last = get_last_insn ();
2575 target = gen_reg_rtx (mode);
2576 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2577 temp = expand_unop (wider_mode, clz_optab, xop0, NULL_RTX, true);
2579 temp = expand_binop (wider_mode, sub_optab, temp,
2580 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2581 - GET_MODE_BITSIZE (mode)),
2582 target, true, OPTAB_DIRECT);
2584 delete_insns_since (last);
2593 /* Try calculating clz of a double-word quantity as two clz's of word-sized
2594 quantities, choosing which based on whether the high word is nonzero. */
2596 expand_doubleword_clz (enum machine_mode mode, rtx op0, rtx target)
2598 rtx xop0 = force_reg (mode, op0);
2599 rtx subhi = gen_highpart (word_mode, xop0);
2600 rtx sublo = gen_lowpart (word_mode, xop0);
2601 rtx hi0_label = gen_label_rtx ();
2602 rtx after_label = gen_label_rtx ();
2603 rtx seq, temp, result;
2605 /* If we were not given a target, use a word_mode register, not a
2606 'mode' register. The result will fit, and nobody is expecting
2607 anything bigger (the return type of __builtin_clz* is int). */
2609 target = gen_reg_rtx (word_mode);
2611 /* In any case, write to a word_mode scratch in both branches of the
2612 conditional, so we can ensure there is a single move insn setting
2613 'target' to tag a REG_EQUAL note on. */
2614 result = gen_reg_rtx (word_mode);
2618 /* If the high word is not equal to zero,
2619 then clz of the full value is clz of the high word. */
2620 emit_cmp_and_jump_insns (subhi, CONST0_RTX (word_mode), EQ, 0,
2621 word_mode, true, hi0_label);
2623 temp = expand_unop_direct (word_mode, clz_optab, subhi, result, true);
2628 convert_move (result, temp, true);
2630 emit_jump_insn (gen_jump (after_label));
2633 /* Else clz of the full value is clz of the low word plus the number
2634 of bits in the high word. */
2635 emit_label (hi0_label);
2637 temp = expand_unop_direct (word_mode, clz_optab, sublo, 0, true);
2640 temp = expand_binop (word_mode, add_optab, temp,
2641 GEN_INT (GET_MODE_BITSIZE (word_mode)),
2642 result, true, OPTAB_DIRECT);
2646 convert_move (result, temp, true);
2648 emit_label (after_label);
2649 convert_move (target, result, true);
2654 add_equal_note (seq, target, CLZ, xop0, 0);
2666 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2668 widen_bswap (enum machine_mode mode, rtx op0, rtx target)
2670 enum mode_class class = GET_MODE_CLASS (mode);
2671 enum machine_mode wider_mode;
2674 if (!CLASS_HAS_WIDER_MODES_P (class))
2677 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2678 wider_mode != VOIDmode;
2679 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2680 if (optab_handler (bswap_optab, wider_mode)->insn_code != CODE_FOR_nothing)
2685 last = get_last_insn ();
2687 x = widen_operand (op0, wider_mode, mode, true, true);
2688 x = expand_unop (wider_mode, bswap_optab, x, NULL_RTX, true);
2691 x = expand_shift (RSHIFT_EXPR, wider_mode, x,
2692 size_int (GET_MODE_BITSIZE (wider_mode)
2693 - GET_MODE_BITSIZE (mode)),
2699 target = gen_reg_rtx (mode);
2700 emit_move_insn (target, gen_lowpart (mode, x));
2703 delete_insns_since (last);
2708 /* Try calculating bswap as two bswaps of two word-sized operands. */
2711 expand_doubleword_bswap (enum machine_mode mode, rtx op, rtx target)
2715 t1 = expand_unop (word_mode, bswap_optab,
2716 operand_subword_force (op, 0, mode), NULL_RTX, true);
2717 t0 = expand_unop (word_mode, bswap_optab,
2718 operand_subword_force (op, 1, mode), NULL_RTX, true);
2721 target = gen_reg_rtx (mode);
2723 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
2724 emit_move_insn (operand_subword (target, 0, 1, mode), t0);
2725 emit_move_insn (operand_subword (target, 1, 1, mode), t1);
2730 /* Try calculating (parity x) as (and (popcount x) 1), where
2731 popcount can also be done in a wider mode. */
2733 expand_parity (enum machine_mode mode, rtx op0, rtx target)
2735 enum mode_class class = GET_MODE_CLASS (mode);
2736 if (CLASS_HAS_WIDER_MODES_P (class))
2738 enum machine_mode wider_mode;
2739 for (wider_mode = mode; wider_mode != VOIDmode;
2740 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2742 if (optab_handler (popcount_optab, wider_mode)->insn_code
2743 != CODE_FOR_nothing)
2745 rtx xop0, temp, last;
2747 last = get_last_insn ();
2750 target = gen_reg_rtx (mode);
2751 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2752 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2755 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2756 target, true, OPTAB_DIRECT);
2758 delete_insns_since (last);
2767 /* Try calculating ctz(x) as K - clz(x & -x) ,
2768 where K is GET_MODE_BITSIZE(mode) - 1.
2770 Both __builtin_ctz and __builtin_clz are undefined at zero, so we
2771 don't have to worry about what the hardware does in that case. (If
2772 the clz instruction produces the usual value at 0, which is K, the
2773 result of this code sequence will be -1; expand_ffs, below, relies
2774 on this. It might be nice to have it be K instead, for consistency
2775 with the (very few) processors that provide a ctz with a defined
2776 value, but that would take one more instruction, and it would be
2777 less convenient for expand_ffs anyway. */
2780 expand_ctz (enum machine_mode mode, rtx op0, rtx target)
2784 if (optab_handler (clz_optab, mode)->insn_code == CODE_FOR_nothing)
2789 temp = expand_unop_direct (mode, neg_optab, op0, NULL_RTX, true);
2791 temp = expand_binop (mode, and_optab, op0, temp, NULL_RTX,
2792 true, OPTAB_DIRECT);
2794 temp = expand_unop_direct (mode, clz_optab, temp, NULL_RTX, true);
2796 temp = expand_binop (mode, sub_optab, GEN_INT (GET_MODE_BITSIZE (mode) - 1),
2798 true, OPTAB_DIRECT);
2808 add_equal_note (seq, temp, CTZ, op0, 0);
2814 /* Try calculating ffs(x) using ctz(x) if we have that instruction, or
2815 else with the sequence used by expand_clz.
2817 The ffs builtin promises to return zero for a zero value and ctz/clz
2818 may have an undefined value in that case. If they do not give us a
2819 convenient value, we have to generate a test and branch. */
2821 expand_ffs (enum machine_mode mode, rtx op0, rtx target)
2823 HOST_WIDE_INT val = 0;
2824 bool defined_at_zero = false;
2827 if (optab_handler (ctz_optab, mode)->insn_code != CODE_FOR_nothing)
2831 temp = expand_unop_direct (mode, ctz_optab, op0, 0, true);
2835 defined_at_zero = (CTZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2);
2837 else if (optab_handler (clz_optab, mode)->insn_code != CODE_FOR_nothing)
2840 temp = expand_ctz (mode, op0, 0);
2844 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2)
2846 defined_at_zero = true;
2847 val = (GET_MODE_BITSIZE (mode) - 1) - val;
2853 if (defined_at_zero && val == -1)
2854 /* No correction needed at zero. */;
2857 /* We don't try to do anything clever with the situation found
2858 on some processors (eg Alpha) where ctz(0:mode) ==
2859 bitsize(mode). If someone can think of a way to send N to -1
2860 and leave alone all values in the range 0..N-1 (where N is a
2861 power of two), cheaper than this test-and-branch, please add it.
2863 The test-and-branch is done after the operation itself, in case
2864 the operation sets condition codes that can be recycled for this.
2865 (This is true on i386, for instance.) */
2867 rtx nonzero_label = gen_label_rtx ();
2868 emit_cmp_and_jump_insns (op0, CONST0_RTX (mode), NE, 0,
2869 mode, true, nonzero_label);
2871 convert_move (temp, GEN_INT (-1), false);
2872 emit_label (nonzero_label);
2875 /* temp now has a value in the range -1..bitsize-1. ffs is supposed
2876 to produce a value in the range 0..bitsize. */
2877 temp = expand_binop (mode, add_optab, temp, GEN_INT (1),
2878 target, false, OPTAB_DIRECT);
2885 add_equal_note (seq, temp, FFS, op0, 0);
2894 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2895 conditions, VAL may already be a SUBREG against which we cannot generate
2896 a further SUBREG. In this case, we expect forcing the value into a
2897 register will work around the situation. */
2900 lowpart_subreg_maybe_copy (enum machine_mode omode, rtx val,
2901 enum machine_mode imode)
2904 ret = lowpart_subreg (omode, val, imode);
2907 val = force_reg (imode, val);
2908 ret = lowpart_subreg (omode, val, imode);
2909 gcc_assert (ret != NULL);
2914 /* Expand a floating point absolute value or negation operation via a
2915 logical operation on the sign bit. */
2918 expand_absneg_bit (enum rtx_code code, enum machine_mode mode,
2919 rtx op0, rtx target)
2921 const struct real_format *fmt;
2922 int bitpos, word, nwords, i;
2923 enum machine_mode imode;
2924 HOST_WIDE_INT hi, lo;
2927 /* The format has to have a simple sign bit. */
2928 fmt = REAL_MODE_FORMAT (mode);
2932 bitpos = fmt->signbit_rw;
2936 /* Don't create negative zeros if the format doesn't support them. */
2937 if (code == NEG && !fmt->has_signed_zero)
2940 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2942 imode = int_mode_for_mode (mode);
2943 if (imode == BLKmode)
2952 if (FLOAT_WORDS_BIG_ENDIAN)
2953 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2955 word = bitpos / BITS_PER_WORD;
2956 bitpos = bitpos % BITS_PER_WORD;
2957 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2960 if (bitpos < HOST_BITS_PER_WIDE_INT)
2963 lo = (HOST_WIDE_INT) 1 << bitpos;
2967 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2973 if (target == 0 || target == op0)
2974 target = gen_reg_rtx (mode);
2980 for (i = 0; i < nwords; ++i)
2982 rtx targ_piece = operand_subword (target, i, 1, mode);
2983 rtx op0_piece = operand_subword_force (op0, i, mode);
2987 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2989 immed_double_const (lo, hi, imode),
2990 targ_piece, 1, OPTAB_LIB_WIDEN);
2991 if (temp != targ_piece)
2992 emit_move_insn (targ_piece, temp);
2995 emit_move_insn (targ_piece, op0_piece);
2998 insns = get_insns ();
3001 temp = gen_rtx_fmt_e (code, mode, copy_rtx (op0));
3002 emit_no_conflict_block (insns, target, op0, NULL_RTX, temp);
3006 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
3007 gen_lowpart (imode, op0),
3008 immed_double_const (lo, hi, imode),
3009 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3010 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3012 set_unique_reg_note (get_last_insn (), REG_EQUAL,
3013 gen_rtx_fmt_e (code, mode, copy_rtx (op0)));
3019 /* As expand_unop, but will fail rather than attempt the operation in a
3020 different mode or with a libcall. */
3022 expand_unop_direct (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
3025 if (optab_handler (unoptab, mode)->insn_code != CODE_FOR_nothing)
3027 int icode = (int) optab_handler (unoptab, mode)->insn_code;
3028 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
3030 rtx last = get_last_insn ();
3036 temp = gen_reg_rtx (mode);
3038 if (GET_MODE (xop0) != VOIDmode
3039 && GET_MODE (xop0) != mode0)
3040 xop0 = convert_to_mode (mode0, xop0, unsignedp);
3042 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
3044 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
3045 xop0 = copy_to_mode_reg (mode0, xop0);
3047 if (!insn_data[icode].operand[0].predicate (temp, mode))
3048 temp = gen_reg_rtx (mode);
3050 pat = GEN_FCN (icode) (temp, xop0);
3053 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
3054 && ! add_equal_note (pat, temp, unoptab->code, xop0, NULL_RTX))
3056 delete_insns_since (last);
3057 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
3065 delete_insns_since (last);
3070 /* Generate code to perform an operation specified by UNOPTAB
3071 on operand OP0, with result having machine-mode MODE.
3073 UNSIGNEDP is for the case where we have to widen the operands
3074 to perform the operation. It says to use zero-extension.
3076 If TARGET is nonzero, the value
3077 is generated there, if it is convenient to do so.
3078 In all cases an rtx is returned for the locus of the value;
3079 this may or may not be TARGET. */
3082 expand_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
3085 enum mode_class class = GET_MODE_CLASS (mode);
3086 enum machine_mode wider_mode;
3090 temp = expand_unop_direct (mode, unoptab, op0, target, unsignedp);
3094 /* It can't be done in this mode. Can we open-code it in a wider mode? */
3096 /* Widening (or narrowing) clz needs special treatment. */
3097 if (unoptab == clz_optab)
3099 temp = widen_clz (mode, op0, target);
3103 if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
3104 && optab_handler (unoptab, word_mode)->insn_code != CODE_FOR_nothing)
3106 temp = expand_doubleword_clz (mode, op0, target);
3114 /* Widening (or narrowing) bswap needs special treatment. */
3115 if (unoptab == bswap_optab)
3117 temp = widen_bswap (mode, op0, target);
3121 if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
3122 && optab_handler (unoptab, word_mode)->insn_code != CODE_FOR_nothing)
3124 temp = expand_doubleword_bswap (mode, op0, target);
3132 if (CLASS_HAS_WIDER_MODES_P (class))
3133 for (wider_mode = GET_MODE_WIDER_MODE (mode);
3134 wider_mode != VOIDmode;
3135 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
3137 if (optab_handler (unoptab, wider_mode)->insn_code != CODE_FOR_nothing)
3140 rtx last = get_last_insn ();
3142 /* For certain operations, we need not actually extend
3143 the narrow operand, as long as we will truncate the
3144 results to the same narrowness. */
3146 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
3147 (unoptab == neg_optab
3148 || unoptab == one_cmpl_optab)
3149 && class == MODE_INT);
3151 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
3156 if (class != MODE_INT
3157 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
3158 GET_MODE_BITSIZE (wider_mode)))
3161 target = gen_reg_rtx (mode);
3162 convert_move (target, temp, 0);
3166 return gen_lowpart (mode, temp);
3169 delete_insns_since (last);
3173 /* These can be done a word at a time. */
3174 if (unoptab == one_cmpl_optab
3175 && class == MODE_INT
3176 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
3177 && optab_handler (unoptab, word_mode)->insn_code != CODE_FOR_nothing)
3182 if (target == 0 || target == op0)
3183 target = gen_reg_rtx (mode);
3187 /* Do the actual arithmetic. */
3188 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
3190 rtx target_piece = operand_subword (target, i, 1, mode);
3191 rtx x = expand_unop (word_mode, unoptab,
3192 operand_subword_force (op0, i, mode),
3193 target_piece, unsignedp);
3195 if (target_piece != x)
3196 emit_move_insn (target_piece, x);
3199 insns = get_insns ();
3202 emit_no_conflict_block (insns, target, op0, NULL_RTX,
3203 gen_rtx_fmt_e (unoptab->code, mode,
3208 if (unoptab->code == NEG)
3210 /* Try negating floating point values by flipping the sign bit. */
3211 if (SCALAR_FLOAT_MODE_P (mode))
3213 temp = expand_absneg_bit (NEG, mode, op0, target);
3218 /* If there is no negation pattern, and we have no negative zero,
3219 try subtracting from zero. */
3220 if (!HONOR_SIGNED_ZEROS (mode))
3222 temp = expand_binop (mode, (unoptab == negv_optab
3223 ? subv_optab : sub_optab),
3224 CONST0_RTX (mode), op0, target,
3225 unsignedp, OPTAB_DIRECT);
3231 /* Try calculating parity (x) as popcount (x) % 2. */
3232 if (unoptab == parity_optab)
3234 temp = expand_parity (mode, op0, target);
3239 /* Try implementing ffs (x) in terms of clz (x). */
3240 if (unoptab == ffs_optab)
3242 temp = expand_ffs (mode, op0, target);
3247 /* Try implementing ctz (x) in terms of clz (x). */
3248 if (unoptab == ctz_optab)
3250 temp = expand_ctz (mode, op0, target);
3256 /* Now try a library call in this mode. */
3257 libfunc = optab_libfunc (unoptab, mode);
3262 enum machine_mode outmode = mode;
3264 /* All of these functions return small values. Thus we choose to
3265 have them return something that isn't a double-word. */
3266 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
3267 || unoptab == popcount_optab || unoptab == parity_optab)
3269 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node)));
3273 /* Pass 1 for NO_QUEUE so we don't lose any increments
3274 if the libcall is cse'd or moved. */
3275 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, outmode,
3277 insns = get_insns ();
3280 target = gen_reg_rtx (outmode);
3281 emit_libcall_block (insns, target, value,
3282 gen_rtx_fmt_e (unoptab->code, outmode, op0));
3287 /* It can't be done in this mode. Can we do it in a wider mode? */
3289 if (CLASS_HAS_WIDER_MODES_P (class))
3291 for (wider_mode = GET_MODE_WIDER_MODE (mode);
3292 wider_mode != VOIDmode;
3293 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
3295 if ((optab_handler (unoptab, wider_mode)->insn_code
3296 != CODE_FOR_nothing)
3297 || optab_libfunc (unoptab, wider_mode))
3300 rtx last = get_last_insn ();
3302 /* For certain operations, we need not actually extend
3303 the narrow operand, as long as we will truncate the
3304 results to the same narrowness. */
3306 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
3307 (unoptab == neg_optab
3308 || unoptab == one_cmpl_optab)
3309 && class == MODE_INT);
3311 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
3314 /* If we are generating clz using wider mode, adjust the
3316 if (unoptab == clz_optab && temp != 0)
3317 temp = expand_binop (wider_mode, sub_optab, temp,
3318 GEN_INT (GET_MODE_BITSIZE (wider_mode)
3319 - GET_MODE_BITSIZE (mode)),
3320 target, true, OPTAB_DIRECT);
3324 if (class != MODE_INT)
3327 target = gen_reg_rtx (mode);
3328 convert_move (target, temp, 0);
3332 return gen_lowpart (mode, temp);
3335 delete_insns_since (last);
3340 /* One final attempt at implementing negation via subtraction,
3341 this time allowing widening of the operand. */
3342 if (unoptab->code == NEG && !HONOR_SIGNED_ZEROS (mode))
3345 temp = expand_binop (mode,
3346 unoptab == negv_optab ? subv_optab : sub_optab,
3347 CONST0_RTX (mode), op0,
3348 target, unsignedp, OPTAB_LIB_WIDEN);
3356 /* Emit code to compute the absolute value of OP0, with result to
3357 TARGET if convenient. (TARGET may be 0.) The return value says
3358 where the result actually is to be found.
3360 MODE is the mode of the operand; the mode of the result is
3361 different but can be deduced from MODE.
3366 expand_abs_nojump (enum machine_mode mode, rtx op0, rtx target,
3367 int result_unsignedp)
3372 result_unsignedp = 1;
3374 /* First try to do it with a special abs instruction. */
3375 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
3380 /* For floating point modes, try clearing the sign bit. */
3381 if (SCALAR_FLOAT_MODE_P (mode))
3383 temp = expand_absneg_bit (ABS, mode, op0, target);
3388 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3389 if (optab_handler (smax_optab, mode)->insn_code != CODE_FOR_nothing
3390 && !HONOR_SIGNED_ZEROS (mode))
3392 rtx last = get_last_insn ();
3394 temp = expand_unop (mode, neg_optab, op0, NULL_RTX, 0);
3396 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3402 delete_insns_since (last);
3405 /* If this machine has expensive jumps, we can do integer absolute
3406 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3407 where W is the width of MODE. */
3409 if (GET_MODE_CLASS (mode) == MODE_INT && BRANCH_COST >= 2)
3411 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
3412 size_int (GET_MODE_BITSIZE (mode) - 1),
3415 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
3418 temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab,
3419 temp, extended, target, 0, OPTAB_LIB_WIDEN);
3429 expand_abs (enum machine_mode mode, rtx op0, rtx target,
3430 int result_unsignedp, int safe)
3435 result_unsignedp = 1;
3437 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
3441 /* If that does not win, use conditional jump and negate. */
3443 /* It is safe to use the target if it is the same
3444 as the source if this is also a pseudo register */
3445 if (op0 == target && REG_P (op0)
3446 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
3449 op1 = gen_label_rtx ();
3450 if (target == 0 || ! safe
3451 || GET_MODE (target) != mode
3452 || (MEM_P (target) && MEM_VOLATILE_P (target))
3454 && REGNO (target) < FIRST_PSEUDO_REGISTER))
3455 target = gen_reg_rtx (mode);
3457 emit_move_insn (target, op0);
3460 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
3461 NULL_RTX, NULL_RTX, op1);
3463 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3466 emit_move_insn (target, op0);