1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
25 #include "coretypes.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
45 #include "basic-block.h"
48 /* Each optab contains info on how this target machine
49 can perform a particular operation
50 for all sizes and kinds of operands.
52 The operation to be performed is often specified
53 by passing one of these optabs as an argument.
55 See expr.h for documentation of these optabs. */
57 optab optab_table[OTI_MAX];
59 rtx libfunc_table[LTI_MAX];
61 /* Tables of patterns for converting one mode to another. */
62 convert_optab convert_optab_table[COI_MAX];
64 /* Contains the optab used for each rtx code. */
65 optab code_to_optab[NUM_RTX_CODE + 1];
67 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
68 gives the gen_function to make a branch to test that condition. */
70 rtxfun bcc_gen_fctn[NUM_RTX_CODE];
72 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
73 gives the insn code to make a store-condition insn
74 to test that condition. */
76 enum insn_code setcc_gen_code[NUM_RTX_CODE];
78 #ifdef HAVE_conditional_move
79 /* Indexed by the machine mode, gives the insn code to make a conditional
80 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
81 setcc_gen_code to cut down on the number of named patterns. Consider a day
82 when a lot more rtx codes are conditional (eg: for the ARM). */
84 enum insn_code movcc_gen_code[NUM_MACHINE_MODES];
87 /* Indexed by the machine mode, gives the insn code for vector conditional
90 enum insn_code vcond_gen_code[NUM_MACHINE_MODES];
91 enum insn_code vcondu_gen_code[NUM_MACHINE_MODES];
93 /* The insn generating function can not take an rtx_code argument.
94 TRAP_RTX is used as an rtx argument. Its code is replaced with
95 the code to be used in the trap insn and all other fields are ignored. */
96 static GTY(()) rtx trap_rtx;
98 static int add_equal_note (rtx, rtx, enum rtx_code, rtx, rtx);
99 static rtx widen_operand (rtx, enum machine_mode, enum machine_mode, int,
101 static void prepare_cmp_insn (rtx *, rtx *, enum rtx_code *, rtx,
102 enum machine_mode *, int *,
103 enum can_compare_purpose);
104 static enum insn_code can_fix_p (enum machine_mode, enum machine_mode, int,
106 static enum insn_code can_float_p (enum machine_mode, enum machine_mode, int);
107 static optab new_optab (void);
108 static convert_optab new_convert_optab (void);
109 static inline optab init_optab (enum rtx_code);
110 static inline optab init_optabv (enum rtx_code);
111 static inline convert_optab init_convert_optab (enum rtx_code);
112 static void init_libfuncs (optab, int, int, const char *, int);
113 static void init_integral_libfuncs (optab, const char *, int);
114 static void init_floating_libfuncs (optab, const char *, int);
115 static void init_interclass_conv_libfuncs (convert_optab, const char *,
116 enum mode_class, enum mode_class);
117 static void init_intraclass_conv_libfuncs (convert_optab, const char *,
118 enum mode_class, bool);
119 static void emit_cmp_and_jump_insn_1 (rtx, rtx, enum machine_mode,
120 enum rtx_code, int, rtx);
121 static void prepare_float_lib_cmp (rtx *, rtx *, enum rtx_code *,
122 enum machine_mode *, int *);
123 static rtx widen_clz (enum machine_mode, rtx, rtx);
124 static rtx expand_parity (enum machine_mode, rtx, rtx);
125 static enum rtx_code get_rtx_code (enum tree_code, bool);
126 static rtx vector_compare_rtx (tree, bool, enum insn_code);
128 #ifndef HAVE_conditional_trap
129 #define HAVE_conditional_trap 0
130 #define gen_conditional_trap(a,b) (gcc_unreachable (), NULL_RTX)
133 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
134 the result of operation CODE applied to OP0 (and OP1 if it is a binary
137 If the last insn does not set TARGET, don't do anything, but return 1.
139 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
140 don't add the REG_EQUAL note but return 0. Our caller can then try
141 again, ensuring that TARGET is not one of the operands. */
144 add_equal_note (rtx insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
146 rtx last_insn, insn, set;
149 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
151 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
152 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
153 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
154 && GET_RTX_CLASS (code) != RTX_COMPARE
155 && GET_RTX_CLASS (code) != RTX_UNARY)
158 if (GET_CODE (target) == ZERO_EXTRACT)
161 for (last_insn = insns;
162 NEXT_INSN (last_insn) != NULL_RTX;
163 last_insn = NEXT_INSN (last_insn))
166 set = single_set (last_insn);
170 if (! rtx_equal_p (SET_DEST (set), target)
171 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
172 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
173 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
176 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
177 besides the last insn. */
178 if (reg_overlap_mentioned_p (target, op0)
179 || (op1 && reg_overlap_mentioned_p (target, op1)))
181 insn = PREV_INSN (last_insn);
182 while (insn != NULL_RTX)
184 if (reg_set_p (target, insn))
187 insn = PREV_INSN (insn);
191 if (GET_RTX_CLASS (code) == RTX_UNARY)
192 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
194 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
196 set_unique_reg_note (last_insn, REG_EQUAL, note);
201 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
202 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
203 not actually do a sign-extend or zero-extend, but can leave the
204 higher-order bits of the result rtx undefined, for example, in the case
205 of logical operations, but not right shifts. */
208 widen_operand (rtx op, enum machine_mode mode, enum machine_mode oldmode,
209 int unsignedp, int no_extend)
213 /* If we don't have to extend and this is a constant, return it. */
214 if (no_extend && GET_MODE (op) == VOIDmode)
217 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
218 extend since it will be more efficient to do so unless the signedness of
219 a promoted object differs from our extension. */
221 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
222 && SUBREG_PROMOTED_UNSIGNED_P (op) == unsignedp))
223 return convert_modes (mode, oldmode, op, unsignedp);
225 /* If MODE is no wider than a single word, we return a paradoxical
227 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
228 return gen_rtx_SUBREG (mode, force_reg (GET_MODE (op), op), 0);
230 /* Otherwise, get an object of MODE, clobber it, and set the low-order
233 result = gen_reg_rtx (mode);
234 emit_insn (gen_rtx_CLOBBER (VOIDmode, result));
235 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
239 /* Return the optab used for computing the operation given by
240 the tree code, CODE. This function is not always usable (for
241 example, it cannot give complete results for multiplication
242 or division) but probably ought to be relied on more widely
243 throughout the expander. */
245 optab_for_tree_code (enum tree_code code, tree type)
257 return one_cmpl_optab;
266 return TYPE_UNSIGNED (type) ? umod_optab : smod_optab;
274 return TYPE_UNSIGNED (type) ? udiv_optab : sdiv_optab;
280 return TYPE_UNSIGNED (type) ? lshr_optab : ashr_optab;
289 return TYPE_UNSIGNED (type) ? umax_optab : smax_optab;
292 return TYPE_UNSIGNED (type) ? umin_optab : smin_optab;
294 case REALIGN_LOAD_EXPR:
295 return vec_realign_load_optab;
298 return TYPE_UNSIGNED (type) ? reduc_umax_optab : reduc_smax_optab;
301 return TYPE_UNSIGNED (type) ? reduc_umin_optab : reduc_smin_optab;
303 case REDUC_PLUS_EXPR:
304 return TYPE_UNSIGNED (type) ? reduc_uplus_optab : reduc_splus_optab;
306 case VEC_LSHIFT_EXPR:
307 return vec_shl_optab;
309 case VEC_RSHIFT_EXPR:
310 return vec_shr_optab;
316 trapv = flag_trapv && INTEGRAL_TYPE_P (type) && !TYPE_UNSIGNED (type);
320 return trapv ? addv_optab : add_optab;
323 return trapv ? subv_optab : sub_optab;
326 return trapv ? smulv_optab : smul_optab;
329 return trapv ? negv_optab : neg_optab;
332 return trapv ? absv_optab : abs_optab;
340 /* Generate code to perform an operation specified by TERNARY_OPTAB
341 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
343 UNSIGNEDP is for the case where we have to widen the operands
344 to perform the operation. It says to use zero-extension.
346 If TARGET is nonzero, the value
347 is generated there, if it is convenient to do so.
348 In all cases an rtx is returned for the locus of the value;
349 this may or may not be TARGET. */
352 expand_ternary_op (enum machine_mode mode, optab ternary_optab, rtx op0,
353 rtx op1, rtx op2, rtx target, int unsignedp)
355 int icode = (int) ternary_optab->handlers[(int) mode].insn_code;
356 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
357 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
358 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
361 rtx xop0 = op0, xop1 = op1, xop2 = op2;
363 gcc_assert (ternary_optab->handlers[(int) mode].insn_code
364 != CODE_FOR_nothing);
366 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
367 temp = gen_reg_rtx (mode);
371 /* In case the insn wants input operands in modes different from
372 those of the actual operands, convert the operands. It would
373 seem that we don't need to convert CONST_INTs, but we do, so
374 that they're properly zero-extended, sign-extended or truncated
377 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
378 xop0 = convert_modes (mode0,
379 GET_MODE (op0) != VOIDmode
384 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
385 xop1 = convert_modes (mode1,
386 GET_MODE (op1) != VOIDmode
391 if (GET_MODE (op2) != mode2 && mode2 != VOIDmode)
392 xop2 = convert_modes (mode2,
393 GET_MODE (op2) != VOIDmode
398 /* Now, if insn's predicates don't allow our operands, put them into
401 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
402 && mode0 != VOIDmode)
403 xop0 = copy_to_mode_reg (mode0, xop0);
405 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
406 && mode1 != VOIDmode)
407 xop1 = copy_to_mode_reg (mode1, xop1);
409 if (!insn_data[icode].operand[3].predicate (xop2, mode2)
410 && mode2 != VOIDmode)
411 xop2 = copy_to_mode_reg (mode2, xop2);
413 pat = GEN_FCN (icode) (temp, xop0, xop1, xop2);
420 /* Like expand_binop, but return a constant rtx if the result can be
421 calculated at compile time. The arguments and return value are
422 otherwise the same as for expand_binop. */
425 simplify_expand_binop (enum machine_mode mode, optab binoptab,
426 rtx op0, rtx op1, rtx target, int unsignedp,
427 enum optab_methods methods)
429 if (CONSTANT_P (op0) && CONSTANT_P (op1))
430 return simplify_gen_binary (binoptab->code, mode, op0, op1);
432 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
435 /* Like simplify_expand_binop, but always put the result in TARGET.
436 Return true if the expansion succeeded. */
439 force_expand_binop (enum machine_mode mode, optab binoptab,
440 rtx op0, rtx op1, rtx target, int unsignedp,
441 enum optab_methods methods)
443 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
444 target, unsignedp, methods);
448 emit_move_insn (target, x);
452 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
455 expand_vec_shift_expr (tree vec_shift_expr, rtx target)
457 enum insn_code icode;
458 rtx rtx_op1, rtx_op2;
459 enum machine_mode mode1;
460 enum machine_mode mode2;
461 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_shift_expr));
462 tree vec_oprnd = TREE_OPERAND (vec_shift_expr, 0);
463 tree shift_oprnd = TREE_OPERAND (vec_shift_expr, 1);
467 switch (TREE_CODE (vec_shift_expr))
469 case VEC_RSHIFT_EXPR:
470 shift_optab = vec_shr_optab;
472 case VEC_LSHIFT_EXPR:
473 shift_optab = vec_shl_optab;
479 icode = (int) shift_optab->handlers[(int) mode].insn_code;
480 gcc_assert (icode != CODE_FOR_nothing);
482 mode1 = insn_data[icode].operand[1].mode;
483 mode2 = insn_data[icode].operand[2].mode;
485 rtx_op1 = expand_expr (vec_oprnd, NULL_RTX, VOIDmode, EXPAND_NORMAL);
486 if (!(*insn_data[icode].operand[1].predicate) (rtx_op1, mode1)
487 && mode1 != VOIDmode)
488 rtx_op1 = force_reg (mode1, rtx_op1);
490 rtx_op2 = expand_expr (shift_oprnd, NULL_RTX, VOIDmode, EXPAND_NORMAL);
491 if (!(*insn_data[icode].operand[2].predicate) (rtx_op2, mode2)
492 && mode2 != VOIDmode)
493 rtx_op2 = force_reg (mode2, rtx_op2);
496 || ! (*insn_data[icode].operand[0].predicate) (target, mode))
497 target = gen_reg_rtx (mode);
499 /* Emit instruction */
500 pat = GEN_FCN (icode) (target, rtx_op1, rtx_op2);
507 /* This subroutine of expand_doubleword_shift handles the cases in which
508 the effective shift value is >= BITS_PER_WORD. The arguments and return
509 value are the same as for the parent routine, except that SUPERWORD_OP1
510 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
511 INTO_TARGET may be null if the caller has decided to calculate it. */
514 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
515 rtx outof_target, rtx into_target,
516 int unsignedp, enum optab_methods methods)
518 if (into_target != 0)
519 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
520 into_target, unsignedp, methods))
523 if (outof_target != 0)
525 /* For a signed right shift, we must fill OUTOF_TARGET with copies
526 of the sign bit, otherwise we must fill it with zeros. */
527 if (binoptab != ashr_optab)
528 emit_move_insn (outof_target, CONST0_RTX (word_mode));
530 if (!force_expand_binop (word_mode, binoptab,
531 outof_input, GEN_INT (BITS_PER_WORD - 1),
532 outof_target, unsignedp, methods))
538 /* This subroutine of expand_doubleword_shift handles the cases in which
539 the effective shift value is < BITS_PER_WORD. The arguments and return
540 value are the same as for the parent routine. */
543 expand_subword_shift (enum machine_mode op1_mode, optab binoptab,
544 rtx outof_input, rtx into_input, rtx op1,
545 rtx outof_target, rtx into_target,
546 int unsignedp, enum optab_methods methods,
547 unsigned HOST_WIDE_INT shift_mask)
549 optab reverse_unsigned_shift, unsigned_shift;
552 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
553 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
555 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
556 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
557 the opposite direction to BINOPTAB. */
558 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
560 carries = outof_input;
561 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
562 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
567 /* We must avoid shifting by BITS_PER_WORD bits since that is either
568 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
569 has unknown behavior. Do a single shift first, then shift by the
570 remainder. It's OK to use ~OP1 as the remainder if shift counts
571 are truncated to the mode size. */
572 carries = expand_binop (word_mode, reverse_unsigned_shift,
573 outof_input, const1_rtx, 0, unsignedp, methods);
574 if (shift_mask == BITS_PER_WORD - 1)
576 tmp = immed_double_const (-1, -1, op1_mode);
577 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
582 tmp = immed_double_const (BITS_PER_WORD - 1, 0, op1_mode);
583 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
587 if (tmp == 0 || carries == 0)
589 carries = expand_binop (word_mode, reverse_unsigned_shift,
590 carries, tmp, 0, unsignedp, methods);
594 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
595 so the result can go directly into INTO_TARGET if convenient. */
596 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
597 into_target, unsignedp, methods);
601 /* Now OR in the bits carried over from OUTOF_INPUT. */
602 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
603 into_target, unsignedp, methods))
606 /* Use a standard word_mode shift for the out-of half. */
607 if (outof_target != 0)
608 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
609 outof_target, unsignedp, methods))
616 #ifdef HAVE_conditional_move
617 /* Try implementing expand_doubleword_shift using conditional moves.
618 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
619 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
620 are the shift counts to use in the former and latter case. All other
621 arguments are the same as the parent routine. */
624 expand_doubleword_shift_condmove (enum machine_mode op1_mode, optab binoptab,
625 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
626 rtx outof_input, rtx into_input,
627 rtx subword_op1, rtx superword_op1,
628 rtx outof_target, rtx into_target,
629 int unsignedp, enum optab_methods methods,
630 unsigned HOST_WIDE_INT shift_mask)
632 rtx outof_superword, into_superword;
634 /* Put the superword version of the output into OUTOF_SUPERWORD and
636 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
637 if (outof_target != 0 && subword_op1 == superword_op1)
639 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
640 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
641 into_superword = outof_target;
642 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
643 outof_superword, 0, unsignedp, methods))
648 into_superword = gen_reg_rtx (word_mode);
649 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
650 outof_superword, into_superword,
655 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
656 if (!expand_subword_shift (op1_mode, binoptab,
657 outof_input, into_input, subword_op1,
658 outof_target, into_target,
659 unsignedp, methods, shift_mask))
662 /* Select between them. Do the INTO half first because INTO_SUPERWORD
663 might be the current value of OUTOF_TARGET. */
664 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
665 into_target, into_superword, word_mode, false))
668 if (outof_target != 0)
669 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
670 outof_target, outof_superword,
678 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
679 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
680 input operand; the shift moves bits in the direction OUTOF_INPUT->
681 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
682 of the target. OP1 is the shift count and OP1_MODE is its mode.
683 If OP1 is constant, it will have been truncated as appropriate
684 and is known to be nonzero.
686 If SHIFT_MASK is zero, the result of word shifts is undefined when the
687 shift count is outside the range [0, BITS_PER_WORD). This routine must
688 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
690 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
691 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
692 fill with zeros or sign bits as appropriate.
694 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
695 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
696 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
697 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
700 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
701 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
702 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
703 function wants to calculate it itself.
705 Return true if the shift could be successfully synthesized. */
708 expand_doubleword_shift (enum machine_mode op1_mode, optab binoptab,
709 rtx outof_input, rtx into_input, rtx op1,
710 rtx outof_target, rtx into_target,
711 int unsignedp, enum optab_methods methods,
712 unsigned HOST_WIDE_INT shift_mask)
714 rtx superword_op1, tmp, cmp1, cmp2;
715 rtx subword_label, done_label;
716 enum rtx_code cmp_code;
718 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
719 fill the result with sign or zero bits as appropriate. If so, the value
720 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
721 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
722 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
724 This isn't worthwhile for constant shifts since the optimizers will
725 cope better with in-range shift counts. */
726 if (shift_mask >= BITS_PER_WORD
728 && !CONSTANT_P (op1))
730 if (!expand_doubleword_shift (op1_mode, binoptab,
731 outof_input, into_input, op1,
733 unsignedp, methods, shift_mask))
735 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
736 outof_target, unsignedp, methods))
741 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
742 is true when the effective shift value is less than BITS_PER_WORD.
743 Set SUPERWORD_OP1 to the shift count that should be used to shift
744 OUTOF_INPUT into INTO_TARGET when the condition is false. */
745 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
746 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
748 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
749 is a subword shift count. */
750 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
752 cmp2 = CONST0_RTX (op1_mode);
758 /* Set CMP1 to OP1 - BITS_PER_WORD. */
759 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
761 cmp2 = CONST0_RTX (op1_mode);
763 superword_op1 = cmp1;
768 /* If we can compute the condition at compile time, pick the
769 appropriate subroutine. */
770 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
771 if (tmp != 0 && GET_CODE (tmp) == CONST_INT)
773 if (tmp == const0_rtx)
774 return expand_superword_shift (binoptab, outof_input, superword_op1,
775 outof_target, into_target,
778 return expand_subword_shift (op1_mode, binoptab,
779 outof_input, into_input, op1,
780 outof_target, into_target,
781 unsignedp, methods, shift_mask);
784 #ifdef HAVE_conditional_move
785 /* Try using conditional moves to generate straight-line code. */
787 rtx start = get_last_insn ();
788 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
789 cmp_code, cmp1, cmp2,
790 outof_input, into_input,
792 outof_target, into_target,
793 unsignedp, methods, shift_mask))
795 delete_insns_since (start);
799 /* As a last resort, use branches to select the correct alternative. */
800 subword_label = gen_label_rtx ();
801 done_label = gen_label_rtx ();
803 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
804 0, 0, subword_label);
806 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
807 outof_target, into_target,
811 emit_jump_insn (gen_jump (done_label));
813 emit_label (subword_label);
815 if (!expand_subword_shift (op1_mode, binoptab,
816 outof_input, into_input, op1,
817 outof_target, into_target,
818 unsignedp, methods, shift_mask))
821 emit_label (done_label);
825 /* Subroutine of expand_binop. Perform a double word multiplication of
826 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
827 as the target's word_mode. This function return NULL_RTX if anything
828 goes wrong, in which case it may have already emitted instructions
829 which need to be deleted.
831 If we want to multiply two two-word values and have normal and widening
832 multiplies of single-word values, we can do this with three smaller
833 multiplications. Note that we do not make a REG_NO_CONFLICT block here
834 because we are not operating on one word at a time.
836 The multiplication proceeds as follows:
837 _______________________
838 [__op0_high_|__op0_low__]
839 _______________________
840 * [__op1_high_|__op1_low__]
841 _______________________________________________
842 _______________________
843 (1) [__op0_low__*__op1_low__]
844 _______________________
845 (2a) [__op0_low__*__op1_high_]
846 _______________________
847 (2b) [__op0_high_*__op1_low__]
848 _______________________
849 (3) [__op0_high_*__op1_high_]
852 This gives a 4-word result. Since we are only interested in the
853 lower 2 words, partial result (3) and the upper words of (2a) and
854 (2b) don't need to be calculated. Hence (2a) and (2b) can be
855 calculated using non-widening multiplication.
857 (1), however, needs to be calculated with an unsigned widening
858 multiplication. If this operation is not directly supported we
859 try using a signed widening multiplication and adjust the result.
860 This adjustment works as follows:
862 If both operands are positive then no adjustment is needed.
864 If the operands have different signs, for example op0_low < 0 and
865 op1_low >= 0, the instruction treats the most significant bit of
866 op0_low as a sign bit instead of a bit with significance
867 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
868 with 2**BITS_PER_WORD - op0_low, and two's complements the
869 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
872 Similarly, if both operands are negative, we need to add
873 (op0_low + op1_low) * 2**BITS_PER_WORD.
875 We use a trick to adjust quickly. We logically shift op0_low right
876 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
877 op0_high (op1_high) before it is used to calculate 2b (2a). If no
878 logical shift exists, we do an arithmetic right shift and subtract
882 expand_doubleword_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
883 bool umulp, enum optab_methods methods)
885 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
886 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
887 rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
888 rtx product, adjust, product_high, temp;
890 rtx op0_high = operand_subword_force (op0, high, mode);
891 rtx op0_low = operand_subword_force (op0, low, mode);
892 rtx op1_high = operand_subword_force (op1, high, mode);
893 rtx op1_low = operand_subword_force (op1, low, mode);
895 /* If we're using an unsigned multiply to directly compute the product
896 of the low-order words of the operands and perform any required
897 adjustments of the operands, we begin by trying two more multiplications
898 and then computing the appropriate sum.
900 We have checked above that the required addition is provided.
901 Full-word addition will normally always succeed, especially if
902 it is provided at all, so we don't worry about its failure. The
903 multiplication may well fail, however, so we do handle that. */
907 /* ??? This could be done with emit_store_flag where available. */
908 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
909 NULL_RTX, 1, methods);
911 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
912 NULL_RTX, 0, OPTAB_DIRECT);
915 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
916 NULL_RTX, 0, methods);
919 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
920 NULL_RTX, 0, OPTAB_DIRECT);
927 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
928 NULL_RTX, 0, OPTAB_DIRECT);
932 /* OP0_HIGH should now be dead. */
936 /* ??? This could be done with emit_store_flag where available. */
937 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
938 NULL_RTX, 1, methods);
940 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
941 NULL_RTX, 0, OPTAB_DIRECT);
944 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
945 NULL_RTX, 0, methods);
948 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
949 NULL_RTX, 0, OPTAB_DIRECT);
956 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
957 NULL_RTX, 0, OPTAB_DIRECT);
961 /* OP1_HIGH should now be dead. */
963 adjust = expand_binop (word_mode, add_optab, adjust, temp,
964 adjust, 0, OPTAB_DIRECT);
966 if (target && !REG_P (target))
970 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
971 target, 1, OPTAB_DIRECT);
973 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
974 target, 1, OPTAB_DIRECT);
979 product_high = operand_subword (product, high, 1, mode);
980 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
981 REG_P (product_high) ? product_high : adjust,
983 emit_move_insn (product_high, adjust);
987 /* Wrapper around expand_binop which takes an rtx code to specify
988 the operation to perform, not an optab pointer. All other
989 arguments are the same. */
991 expand_simple_binop (enum machine_mode mode, enum rtx_code code, rtx op0,
992 rtx op1, rtx target, int unsignedp,
993 enum optab_methods methods)
995 optab binop = code_to_optab[(int) code];
998 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
1002 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1003 binop. Order them according to commutative_operand_precedence and, if
1004 possible, try to put TARGET first. */
1006 swap_commutative_operands_with_target (rtx target, rtx op0, rtx op1)
1008 int op0_prec = commutative_operand_precedence (op0);
1009 int op1_prec = commutative_operand_precedence (op1);
1011 if (op0_prec < op1_prec)
1014 if (op0_prec > op1_prec)
1017 /* With equal precedence, both orders are ok, but try to put the
1019 return target && rtx_equal_p (op1, target);
1023 /* Generate code to perform an operation specified by BINOPTAB
1024 on operands OP0 and OP1, with result having machine-mode MODE.
1026 UNSIGNEDP is for the case where we have to widen the operands
1027 to perform the operation. It says to use zero-extension.
1029 If TARGET is nonzero, the value
1030 is generated there, if it is convenient to do so.
1031 In all cases an rtx is returned for the locus of the value;
1032 this may or may not be TARGET. */
1035 expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1,
1036 rtx target, int unsignedp, enum optab_methods methods)
1038 enum optab_methods next_methods
1039 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1040 ? OPTAB_WIDEN : methods);
1041 enum mode_class class;
1042 enum machine_mode wider_mode;
1044 int commutative_op = 0;
1045 int shift_op = (binoptab->code == ASHIFT
1046 || binoptab->code == ASHIFTRT
1047 || binoptab->code == LSHIFTRT
1048 || binoptab->code == ROTATE
1049 || binoptab->code == ROTATERT);
1050 rtx entry_last = get_last_insn ();
1054 class = GET_MODE_CLASS (mode);
1056 /* If subtracting an integer constant, convert this into an addition of
1057 the negated constant. */
1059 if (binoptab == sub_optab && GET_CODE (op1) == CONST_INT)
1061 op1 = negate_rtx (mode, op1);
1062 binoptab = add_optab;
1065 /* If we are inside an appropriately-short loop and we are optimizing,
1066 force expensive constants into a register. */
1067 if (CONSTANT_P (op0) && optimize
1068 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
1070 if (GET_MODE (op0) != VOIDmode)
1071 op0 = convert_modes (mode, VOIDmode, op0, unsignedp);
1072 op0 = force_reg (mode, op0);
1075 if (CONSTANT_P (op1) && optimize
1076 && ! shift_op && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
1078 if (GET_MODE (op1) != VOIDmode)
1079 op1 = convert_modes (mode, VOIDmode, op1, unsignedp);
1080 op1 = force_reg (mode, op1);
1083 /* Record where to delete back to if we backtrack. */
1084 last = get_last_insn ();
1086 /* If operation is commutative, canonicalize the order of the operands. */
1087 if (GET_RTX_CLASS (binoptab->code) == RTX_COMM_ARITH
1088 || binoptab == smul_widen_optab
1089 || binoptab == umul_widen_optab
1090 || binoptab == smul_highpart_optab
1091 || binoptab == umul_highpart_optab)
1094 if (swap_commutative_operands_with_target (target, op0, op1))
1104 /* If we can do it with a three-operand insn, do so. */
1106 if (methods != OPTAB_MUST_WIDEN
1107 && binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1109 int icode = (int) binoptab->handlers[(int) mode].insn_code;
1110 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
1111 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
1113 rtx xop0 = op0, xop1 = op1;
1118 temp = gen_reg_rtx (mode);
1120 /* If it is a commutative operator and the modes would match
1121 if we would swap the operands, we can save the conversions. */
1124 if (GET_MODE (op0) != mode0 && GET_MODE (op1) != mode1
1125 && GET_MODE (op0) == mode1 && GET_MODE (op1) == mode0)
1129 tmp = op0; op0 = op1; op1 = tmp;
1130 tmp = xop0; xop0 = xop1; xop1 = tmp;
1134 /* In case the insn wants input operands in modes different from
1135 those of the actual operands, convert the operands. It would
1136 seem that we don't need to convert CONST_INTs, but we do, so
1137 that they're properly zero-extended, sign-extended or truncated
1140 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
1141 xop0 = convert_modes (mode0,
1142 GET_MODE (op0) != VOIDmode
1147 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
1148 xop1 = convert_modes (mode1,
1149 GET_MODE (op1) != VOIDmode
1154 /* Now, if insn's predicates don't allow our operands, put them into
1157 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
1158 && mode0 != VOIDmode)
1159 xop0 = copy_to_mode_reg (mode0, xop0);
1161 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
1162 && mode1 != VOIDmode)
1163 xop1 = copy_to_mode_reg (mode1, xop1);
1165 if (!insn_data[icode].operand[0].predicate (temp, mode))
1166 temp = gen_reg_rtx (mode);
1168 pat = GEN_FCN (icode) (temp, xop0, xop1);
1171 /* If PAT is composed of more than one insn, try to add an appropriate
1172 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1173 operand, call ourselves again, this time without a target. */
1174 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1175 && ! add_equal_note (pat, temp, binoptab->code, xop0, xop1))
1177 delete_insns_since (last);
1178 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1179 unsignedp, methods);
1186 delete_insns_since (last);
1189 /* If we were trying to rotate by a constant value, and that didn't
1190 work, try rotating the other direction before falling back to
1191 shifts and bitwise-or. */
1193 && (binoptab == rotl_optab || binoptab == rotr_optab)
1194 && class == MODE_INT
1195 && GET_CODE (op1) == CONST_INT
1197 && (unsigned int) INTVAL (op1) < GET_MODE_BITSIZE (mode))
1199 first_pass_p = false;
1200 op1 = GEN_INT (GET_MODE_BITSIZE (mode) - INTVAL (op1));
1201 binoptab = binoptab == rotl_optab ? rotr_optab : rotl_optab;
1205 /* If this is a multiply, see if we can do a widening operation that
1206 takes operands of this mode and makes a wider mode. */
1208 if (binoptab == smul_optab && GET_MODE_WIDER_MODE (mode) != VOIDmode
1209 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1210 ->handlers[(int) GET_MODE_WIDER_MODE (mode)].insn_code)
1211 != CODE_FOR_nothing))
1213 temp = expand_binop (GET_MODE_WIDER_MODE (mode),
1214 unsignedp ? umul_widen_optab : smul_widen_optab,
1215 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1219 if (GET_MODE_CLASS (mode) == MODE_INT
1220 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1221 GET_MODE_BITSIZE (GET_MODE (temp))))
1222 return gen_lowpart (mode, temp);
1224 return convert_to_mode (mode, temp, unsignedp);
1228 /* Look for a wider mode of the same class for which we think we
1229 can open-code the operation. Check for a widening multiply at the
1230 wider mode as well. */
1232 if ((class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
1233 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1234 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
1235 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1237 if (binoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
1238 || (binoptab == smul_optab
1239 && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
1240 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1241 ->handlers[(int) GET_MODE_WIDER_MODE (wider_mode)].insn_code)
1242 != CODE_FOR_nothing)))
1244 rtx xop0 = op0, xop1 = op1;
1247 /* For certain integer operations, we need not actually extend
1248 the narrow operands, as long as we will truncate
1249 the results to the same narrowness. */
1251 if ((binoptab == ior_optab || binoptab == and_optab
1252 || binoptab == xor_optab
1253 || binoptab == add_optab || binoptab == sub_optab
1254 || binoptab == smul_optab || binoptab == ashl_optab)
1255 && class == MODE_INT)
1258 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1260 /* The second operand of a shift must always be extended. */
1261 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1262 no_extend && binoptab != ashl_optab);
1264 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1265 unsignedp, OPTAB_DIRECT);
1268 if (class != MODE_INT
1269 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1270 GET_MODE_BITSIZE (wider_mode)))
1273 target = gen_reg_rtx (mode);
1274 convert_move (target, temp, 0);
1278 return gen_lowpart (mode, temp);
1281 delete_insns_since (last);
1285 /* These can be done a word at a time. */
1286 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1287 && class == MODE_INT
1288 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
1289 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1295 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1296 won't be accurate, so use a new target. */
1297 if (target == 0 || target == op0 || target == op1)
1298 target = gen_reg_rtx (mode);
1302 /* Do the actual arithmetic. */
1303 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
1305 rtx target_piece = operand_subword (target, i, 1, mode);
1306 rtx x = expand_binop (word_mode, binoptab,
1307 operand_subword_force (op0, i, mode),
1308 operand_subword_force (op1, i, mode),
1309 target_piece, unsignedp, next_methods);
1314 if (target_piece != x)
1315 emit_move_insn (target_piece, x);
1318 insns = get_insns ();
1321 if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
1323 if (binoptab->code != UNKNOWN)
1325 = gen_rtx_fmt_ee (binoptab->code, mode,
1326 copy_rtx (op0), copy_rtx (op1));
1330 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1335 /* Synthesize double word shifts from single word shifts. */
1336 if ((binoptab == lshr_optab || binoptab == ashl_optab
1337 || binoptab == ashr_optab)
1338 && class == MODE_INT
1339 && (GET_CODE (op1) == CONST_INT || !optimize_size)
1340 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1341 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1342 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1343 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1345 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1346 enum machine_mode op1_mode;
1348 double_shift_mask = targetm.shift_truncation_mask (mode);
1349 shift_mask = targetm.shift_truncation_mask (word_mode);
1350 op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode;
1352 /* Apply the truncation to constant shifts. */
1353 if (double_shift_mask > 0 && GET_CODE (op1) == CONST_INT)
1354 op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
1356 if (op1 == CONST0_RTX (op1_mode))
1359 /* Make sure that this is a combination that expand_doubleword_shift
1360 can handle. See the comments there for details. */
1361 if (double_shift_mask == 0
1362 || (shift_mask == BITS_PER_WORD - 1
1363 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1365 rtx insns, equiv_value;
1366 rtx into_target, outof_target;
1367 rtx into_input, outof_input;
1368 int left_shift, outof_word;
1370 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1371 won't be accurate, so use a new target. */
1372 if (target == 0 || target == op0 || target == op1)
1373 target = gen_reg_rtx (mode);
1377 /* OUTOF_* is the word we are shifting bits away from, and
1378 INTO_* is the word that we are shifting bits towards, thus
1379 they differ depending on the direction of the shift and
1380 WORDS_BIG_ENDIAN. */
1382 left_shift = binoptab == ashl_optab;
1383 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1385 outof_target = operand_subword (target, outof_word, 1, mode);
1386 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1388 outof_input = operand_subword_force (op0, outof_word, mode);
1389 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1391 if (expand_doubleword_shift (op1_mode, binoptab,
1392 outof_input, into_input, op1,
1393 outof_target, into_target,
1394 unsignedp, methods, shift_mask))
1396 insns = get_insns ();
1399 equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, op0, op1);
1400 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1407 /* Synthesize double word rotates from single word shifts. */
1408 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1409 && class == MODE_INT
1410 && GET_CODE (op1) == CONST_INT
1411 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1412 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1413 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1415 rtx insns, equiv_value;
1416 rtx into_target, outof_target;
1417 rtx into_input, outof_input;
1419 int shift_count, left_shift, outof_word;
1421 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1422 won't be accurate, so use a new target. Do this also if target is not
1423 a REG, first because having a register instead may open optimization
1424 opportunities, and second because if target and op0 happen to be MEMs
1425 designating the same location, we would risk clobbering it too early
1426 in the code sequence we generate below. */
1427 if (target == 0 || target == op0 || target == op1 || ! REG_P (target))
1428 target = gen_reg_rtx (mode);
1432 shift_count = INTVAL (op1);
1434 /* OUTOF_* is the word we are shifting bits away from, and
1435 INTO_* is the word that we are shifting bits towards, thus
1436 they differ depending on the direction of the shift and
1437 WORDS_BIG_ENDIAN. */
1439 left_shift = (binoptab == rotl_optab);
1440 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1442 outof_target = operand_subword (target, outof_word, 1, mode);
1443 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1445 outof_input = operand_subword_force (op0, outof_word, mode);
1446 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1448 if (shift_count == BITS_PER_WORD)
1450 /* This is just a word swap. */
1451 emit_move_insn (outof_target, into_input);
1452 emit_move_insn (into_target, outof_input);
1457 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1458 rtx first_shift_count, second_shift_count;
1459 optab reverse_unsigned_shift, unsigned_shift;
1461 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1462 ? lshr_optab : ashl_optab);
1464 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1465 ? ashl_optab : lshr_optab);
1467 if (shift_count > BITS_PER_WORD)
1469 first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1470 second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1474 first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1475 second_shift_count = GEN_INT (shift_count);
1478 into_temp1 = expand_binop (word_mode, unsigned_shift,
1479 outof_input, first_shift_count,
1480 NULL_RTX, unsignedp, next_methods);
1481 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1482 into_input, second_shift_count,
1483 NULL_RTX, unsignedp, next_methods);
1485 if (into_temp1 != 0 && into_temp2 != 0)
1486 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1487 into_target, unsignedp, next_methods);
1491 if (inter != 0 && inter != into_target)
1492 emit_move_insn (into_target, inter);
1494 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1495 into_input, first_shift_count,
1496 NULL_RTX, unsignedp, next_methods);
1497 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1498 outof_input, second_shift_count,
1499 NULL_RTX, unsignedp, next_methods);
1501 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1502 inter = expand_binop (word_mode, ior_optab,
1503 outof_temp1, outof_temp2,
1504 outof_target, unsignedp, next_methods);
1506 if (inter != 0 && inter != outof_target)
1507 emit_move_insn (outof_target, inter);
1510 insns = get_insns ();
1515 if (binoptab->code != UNKNOWN)
1516 equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, op0, op1);
1520 /* We can't make this a no conflict block if this is a word swap,
1521 because the word swap case fails if the input and output values
1522 are in the same register. */
1523 if (shift_count != BITS_PER_WORD)
1524 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1533 /* These can be done a word at a time by propagating carries. */
1534 if ((binoptab == add_optab || binoptab == sub_optab)
1535 && class == MODE_INT
1536 && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
1537 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1540 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1541 const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
1542 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1543 rtx xop0, xop1, xtarget;
1545 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1546 value is one of those, use it. Otherwise, use 1 since it is the
1547 one easiest to get. */
1548 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1549 int normalizep = STORE_FLAG_VALUE;
1554 /* Prepare the operands. */
1555 xop0 = force_reg (mode, op0);
1556 xop1 = force_reg (mode, op1);
1558 xtarget = gen_reg_rtx (mode);
1560 if (target == 0 || !REG_P (target))
1563 /* Indicate for flow that the entire target reg is being set. */
1565 emit_insn (gen_rtx_CLOBBER (VOIDmode, xtarget));
1567 /* Do the actual arithmetic. */
1568 for (i = 0; i < nwords; i++)
1570 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1571 rtx target_piece = operand_subword (xtarget, index, 1, mode);
1572 rtx op0_piece = operand_subword_force (xop0, index, mode);
1573 rtx op1_piece = operand_subword_force (xop1, index, mode);
1576 /* Main add/subtract of the input operands. */
1577 x = expand_binop (word_mode, binoptab,
1578 op0_piece, op1_piece,
1579 target_piece, unsignedp, next_methods);
1585 /* Store carry from main add/subtract. */
1586 carry_out = gen_reg_rtx (word_mode);
1587 carry_out = emit_store_flag_force (carry_out,
1588 (binoptab == add_optab
1591 word_mode, 1, normalizep);
1598 /* Add/subtract previous carry to main result. */
1599 newx = expand_binop (word_mode,
1600 normalizep == 1 ? binoptab : otheroptab,
1602 NULL_RTX, 1, next_methods);
1606 /* Get out carry from adding/subtracting carry in. */
1607 rtx carry_tmp = gen_reg_rtx (word_mode);
1608 carry_tmp = emit_store_flag_force (carry_tmp,
1609 (binoptab == add_optab
1612 word_mode, 1, normalizep);
1614 /* Logical-ior the two poss. carry together. */
1615 carry_out = expand_binop (word_mode, ior_optab,
1616 carry_out, carry_tmp,
1617 carry_out, 0, next_methods);
1621 emit_move_insn (target_piece, newx);
1625 if (x != target_piece)
1626 emit_move_insn (target_piece, x);
1629 carry_in = carry_out;
1632 if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
1634 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
1635 || ! rtx_equal_p (target, xtarget))
1637 rtx temp = emit_move_insn (target, xtarget);
1639 set_unique_reg_note (temp,
1641 gen_rtx_fmt_ee (binoptab->code, mode,
1652 delete_insns_since (last);
1655 /* Attempt to synthesize double word multiplies using a sequence of word
1656 mode multiplications. We first attempt to generate a sequence using a
1657 more efficient unsigned widening multiply, and if that fails we then
1658 try using a signed widening multiply. */
1660 if (binoptab == smul_optab
1661 && class == MODE_INT
1662 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1663 && smul_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1664 && add_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1666 rtx product = NULL_RTX;
1668 if (umul_widen_optab->handlers[(int) mode].insn_code
1669 != CODE_FOR_nothing)
1671 product = expand_doubleword_mult (mode, op0, op1, target,
1674 delete_insns_since (last);
1677 if (product == NULL_RTX
1678 && smul_widen_optab->handlers[(int) mode].insn_code
1679 != CODE_FOR_nothing)
1681 product = expand_doubleword_mult (mode, op0, op1, target,
1684 delete_insns_since (last);
1687 if (product != NULL_RTX)
1689 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1691 temp = emit_move_insn (target ? target : product, product);
1692 set_unique_reg_note (temp,
1694 gen_rtx_fmt_ee (MULT, mode,
1702 /* It can't be open-coded in this mode.
1703 Use a library call if one is available and caller says that's ok. */
1705 if (binoptab->handlers[(int) mode].libfunc
1706 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
1710 enum machine_mode op1_mode = mode;
1717 op1_mode = word_mode;
1718 /* Specify unsigned here,
1719 since negative shift counts are meaningless. */
1720 op1x = convert_to_mode (word_mode, op1, 1);
1723 if (GET_MODE (op0) != VOIDmode
1724 && GET_MODE (op0) != mode)
1725 op0 = convert_to_mode (mode, op0, unsignedp);
1727 /* Pass 1 for NO_QUEUE so we don't lose any increments
1728 if the libcall is cse'd or moved. */
1729 value = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
1730 NULL_RTX, LCT_CONST, mode, 2,
1731 op0, mode, op1x, op1_mode);
1733 insns = get_insns ();
1736 target = gen_reg_rtx (mode);
1737 emit_libcall_block (insns, target, value,
1738 gen_rtx_fmt_ee (binoptab->code, mode, op0, op1));
1743 delete_insns_since (last);
1745 /* It can't be done in this mode. Can we do it in a wider mode? */
1747 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
1748 || methods == OPTAB_MUST_WIDEN))
1750 /* Caller says, don't even try. */
1751 delete_insns_since (entry_last);
1755 /* Compute the value of METHODS to pass to recursive calls.
1756 Don't allow widening to be tried recursively. */
1758 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
1760 /* Look for a wider mode of the same class for which it appears we can do
1763 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
1765 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
1766 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1768 if ((binoptab->handlers[(int) wider_mode].insn_code
1769 != CODE_FOR_nothing)
1770 || (methods == OPTAB_LIB
1771 && binoptab->handlers[(int) wider_mode].libfunc))
1773 rtx xop0 = op0, xop1 = op1;
1776 /* For certain integer operations, we need not actually extend
1777 the narrow operands, as long as we will truncate
1778 the results to the same narrowness. */
1780 if ((binoptab == ior_optab || binoptab == and_optab
1781 || binoptab == xor_optab
1782 || binoptab == add_optab || binoptab == sub_optab
1783 || binoptab == smul_optab || binoptab == ashl_optab)
1784 && class == MODE_INT)
1787 xop0 = widen_operand (xop0, wider_mode, mode,
1788 unsignedp, no_extend);
1790 /* The second operand of a shift must always be extended. */
1791 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1792 no_extend && binoptab != ashl_optab);
1794 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1795 unsignedp, methods);
1798 if (class != MODE_INT
1799 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1800 GET_MODE_BITSIZE (wider_mode)))
1803 target = gen_reg_rtx (mode);
1804 convert_move (target, temp, 0);
1808 return gen_lowpart (mode, temp);
1811 delete_insns_since (last);
1816 delete_insns_since (entry_last);
1820 /* Expand a binary operator which has both signed and unsigned forms.
1821 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1824 If we widen unsigned operands, we may use a signed wider operation instead
1825 of an unsigned wider operation, since the result would be the same. */
1828 sign_expand_binop (enum machine_mode mode, optab uoptab, optab soptab,
1829 rtx op0, rtx op1, rtx target, int unsignedp,
1830 enum optab_methods methods)
1833 optab direct_optab = unsignedp ? uoptab : soptab;
1834 struct optab wide_soptab;
1836 /* Do it without widening, if possible. */
1837 temp = expand_binop (mode, direct_optab, op0, op1, target,
1838 unsignedp, OPTAB_DIRECT);
1839 if (temp || methods == OPTAB_DIRECT)
1842 /* Try widening to a signed int. Make a fake signed optab that
1843 hides any signed insn for direct use. */
1844 wide_soptab = *soptab;
1845 wide_soptab.handlers[(int) mode].insn_code = CODE_FOR_nothing;
1846 wide_soptab.handlers[(int) mode].libfunc = 0;
1848 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
1849 unsignedp, OPTAB_WIDEN);
1851 /* For unsigned operands, try widening to an unsigned int. */
1852 if (temp == 0 && unsignedp)
1853 temp = expand_binop (mode, uoptab, op0, op1, target,
1854 unsignedp, OPTAB_WIDEN);
1855 if (temp || methods == OPTAB_WIDEN)
1858 /* Use the right width lib call if that exists. */
1859 temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_LIB);
1860 if (temp || methods == OPTAB_LIB)
1863 /* Must widen and use a lib call, use either signed or unsigned. */
1864 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
1865 unsignedp, methods);
1869 return expand_binop (mode, uoptab, op0, op1, target,
1870 unsignedp, methods);
1874 /* Generate code to perform an operation specified by UNOPPTAB
1875 on operand OP0, with two results to TARG0 and TARG1.
1876 We assume that the order of the operands for the instruction
1877 is TARG0, TARG1, OP0.
1879 Either TARG0 or TARG1 may be zero, but what that means is that
1880 the result is not actually wanted. We will generate it into
1881 a dummy pseudo-reg and discard it. They may not both be zero.
1883 Returns 1 if this operation can be performed; 0 if not. */
1886 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
1889 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
1890 enum mode_class class;
1891 enum machine_mode wider_mode;
1892 rtx entry_last = get_last_insn ();
1895 class = GET_MODE_CLASS (mode);
1898 targ0 = gen_reg_rtx (mode);
1900 targ1 = gen_reg_rtx (mode);
1902 /* Record where to go back to if we fail. */
1903 last = get_last_insn ();
1905 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1907 int icode = (int) unoptab->handlers[(int) mode].insn_code;
1908 enum machine_mode mode0 = insn_data[icode].operand[2].mode;
1912 if (GET_MODE (xop0) != VOIDmode
1913 && GET_MODE (xop0) != mode0)
1914 xop0 = convert_to_mode (mode0, xop0, unsignedp);
1916 /* Now, if insn doesn't accept these operands, put them into pseudos. */
1917 if (!insn_data[icode].operand[2].predicate (xop0, mode0))
1918 xop0 = copy_to_mode_reg (mode0, xop0);
1920 /* We could handle this, but we should always be called with a pseudo
1921 for our targets and all insns should take them as outputs. */
1922 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
1923 gcc_assert (insn_data[icode].operand[1].predicate (targ1, mode));
1925 pat = GEN_FCN (icode) (targ0, targ1, xop0);
1932 delete_insns_since (last);
1935 /* It can't be done in this mode. Can we do it in a wider mode? */
1937 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
1939 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
1940 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1942 if (unoptab->handlers[(int) wider_mode].insn_code
1943 != CODE_FOR_nothing)
1945 rtx t0 = gen_reg_rtx (wider_mode);
1946 rtx t1 = gen_reg_rtx (wider_mode);
1947 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
1949 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
1951 convert_move (targ0, t0, unsignedp);
1952 convert_move (targ1, t1, unsignedp);
1956 delete_insns_since (last);
1961 delete_insns_since (entry_last);
1965 /* Generate code to perform an operation specified by BINOPTAB
1966 on operands OP0 and OP1, with two results to TARG1 and TARG2.
1967 We assume that the order of the operands for the instruction
1968 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
1969 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
1971 Either TARG0 or TARG1 may be zero, but what that means is that
1972 the result is not actually wanted. We will generate it into
1973 a dummy pseudo-reg and discard it. They may not both be zero.
1975 Returns 1 if this operation can be performed; 0 if not. */
1978 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
1981 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
1982 enum mode_class class;
1983 enum machine_mode wider_mode;
1984 rtx entry_last = get_last_insn ();
1987 class = GET_MODE_CLASS (mode);
1989 /* If we are inside an appropriately-short loop and we are optimizing,
1990 force expensive constants into a register. */
1991 if (CONSTANT_P (op0) && optimize
1992 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
1993 op0 = force_reg (mode, op0);
1995 if (CONSTANT_P (op1) && optimize
1996 && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
1997 op1 = force_reg (mode, op1);
2000 targ0 = gen_reg_rtx (mode);
2002 targ1 = gen_reg_rtx (mode);
2004 /* Record where to go back to if we fail. */
2005 last = get_last_insn ();
2007 if (binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2009 int icode = (int) binoptab->handlers[(int) mode].insn_code;
2010 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2011 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
2013 rtx xop0 = op0, xop1 = op1;
2015 /* In case the insn wants input operands in modes different from
2016 those of the actual operands, convert the operands. It would
2017 seem that we don't need to convert CONST_INTs, but we do, so
2018 that they're properly zero-extended, sign-extended or truncated
2021 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
2022 xop0 = convert_modes (mode0,
2023 GET_MODE (op0) != VOIDmode
2028 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
2029 xop1 = convert_modes (mode1,
2030 GET_MODE (op1) != VOIDmode
2035 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2036 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2037 xop0 = copy_to_mode_reg (mode0, xop0);
2039 if (!insn_data[icode].operand[2].predicate (xop1, mode1))
2040 xop1 = copy_to_mode_reg (mode1, xop1);
2042 /* We could handle this, but we should always be called with a pseudo
2043 for our targets and all insns should take them as outputs. */
2044 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2045 gcc_assert (insn_data[icode].operand[3].predicate (targ1, mode));
2047 pat = GEN_FCN (icode) (targ0, xop0, xop1, targ1);
2054 delete_insns_since (last);
2057 /* It can't be done in this mode. Can we do it in a wider mode? */
2059 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2061 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
2062 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2064 if (binoptab->handlers[(int) wider_mode].insn_code
2065 != CODE_FOR_nothing)
2067 rtx t0 = gen_reg_rtx (wider_mode);
2068 rtx t1 = gen_reg_rtx (wider_mode);
2069 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2070 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2072 if (expand_twoval_binop (binoptab, cop0, cop1,
2075 convert_move (targ0, t0, unsignedp);
2076 convert_move (targ1, t1, unsignedp);
2080 delete_insns_since (last);
2085 delete_insns_since (entry_last);
2089 /* Expand the two-valued library call indicated by BINOPTAB, but
2090 preserve only one of the values. If TARG0 is non-NULL, the first
2091 value is placed into TARG0; otherwise the second value is placed
2092 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2093 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2094 This routine assumes that the value returned by the library call is
2095 as if the return value was of an integral mode twice as wide as the
2096 mode of OP0. Returns 1 if the call was successful. */
2099 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2100 rtx targ0, rtx targ1, enum rtx_code code)
2102 enum machine_mode mode;
2103 enum machine_mode libval_mode;
2107 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2108 gcc_assert (!targ0 != !targ1);
2110 mode = GET_MODE (op0);
2111 if (!binoptab->handlers[(int) mode].libfunc)
2114 /* The value returned by the library function will have twice as
2115 many bits as the nominal MODE. */
2116 libval_mode = smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode),
2119 libval = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
2120 NULL_RTX, LCT_CONST,
2124 /* Get the part of VAL containing the value that we want. */
2125 libval = simplify_gen_subreg (mode, libval, libval_mode,
2126 targ0 ? 0 : GET_MODE_SIZE (mode));
2127 insns = get_insns ();
2129 /* Move the into the desired location. */
2130 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2131 gen_rtx_fmt_ee (code, mode, op0, op1));
2137 /* Wrapper around expand_unop which takes an rtx code to specify
2138 the operation to perform, not an optab pointer. All other
2139 arguments are the same. */
2141 expand_simple_unop (enum machine_mode mode, enum rtx_code code, rtx op0,
2142 rtx target, int unsignedp)
2144 optab unop = code_to_optab[(int) code];
2147 return expand_unop (mode, unop, op0, target, unsignedp);
2153 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2155 widen_clz (enum machine_mode mode, rtx op0, rtx target)
2157 enum mode_class class = GET_MODE_CLASS (mode);
2158 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2160 enum machine_mode wider_mode;
2161 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
2162 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2164 if (clz_optab->handlers[(int) wider_mode].insn_code
2165 != CODE_FOR_nothing)
2167 rtx xop0, temp, last;
2169 last = get_last_insn ();
2172 target = gen_reg_rtx (mode);
2173 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2174 temp = expand_unop (wider_mode, clz_optab, xop0, NULL_RTX, true);
2176 temp = expand_binop (wider_mode, sub_optab, temp,
2177 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2178 - GET_MODE_BITSIZE (mode)),
2179 target, true, OPTAB_DIRECT);
2181 delete_insns_since (last);
2190 /* Try calculating (parity x) as (and (popcount x) 1), where
2191 popcount can also be done in a wider mode. */
2193 expand_parity (enum machine_mode mode, rtx op0, rtx target)
2195 enum mode_class class = GET_MODE_CLASS (mode);
2196 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2198 enum machine_mode wider_mode;
2199 for (wider_mode = mode; wider_mode != VOIDmode;
2200 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2202 if (popcount_optab->handlers[(int) wider_mode].insn_code
2203 != CODE_FOR_nothing)
2205 rtx xop0, temp, last;
2207 last = get_last_insn ();
2210 target = gen_reg_rtx (mode);
2211 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2212 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2215 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2216 target, true, OPTAB_DIRECT);
2218 delete_insns_since (last);
2227 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2228 conditions, VAL may already be a SUBREG against which we cannot generate
2229 a further SUBREG. In this case, we expect forcing the value into a
2230 register will work around the situation. */
2233 lowpart_subreg_maybe_copy (enum machine_mode omode, rtx val,
2234 enum machine_mode imode)
2237 ret = lowpart_subreg (omode, val, imode);
2240 val = force_reg (imode, val);
2241 ret = lowpart_subreg (omode, val, imode);
2242 gcc_assert (ret != NULL);
2247 /* Expand a floating point absolute value or negation operation via a
2248 logical operation on the sign bit. */
2251 expand_absneg_bit (enum rtx_code code, enum machine_mode mode,
2252 rtx op0, rtx target)
2254 const struct real_format *fmt;
2255 int bitpos, word, nwords, i;
2256 enum machine_mode imode;
2257 HOST_WIDE_INT hi, lo;
2260 /* The format has to have a simple sign bit. */
2261 fmt = REAL_MODE_FORMAT (mode);
2265 bitpos = fmt->signbit_rw;
2269 /* Don't create negative zeros if the format doesn't support them. */
2270 if (code == NEG && !fmt->has_signed_zero)
2273 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2275 imode = int_mode_for_mode (mode);
2276 if (imode == BLKmode)
2285 if (FLOAT_WORDS_BIG_ENDIAN)
2286 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2288 word = bitpos / BITS_PER_WORD;
2289 bitpos = bitpos % BITS_PER_WORD;
2290 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2293 if (bitpos < HOST_BITS_PER_WIDE_INT)
2296 lo = (HOST_WIDE_INT) 1 << bitpos;
2300 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2306 if (target == 0 || target == op0)
2307 target = gen_reg_rtx (mode);
2313 for (i = 0; i < nwords; ++i)
2315 rtx targ_piece = operand_subword (target, i, 1, mode);
2316 rtx op0_piece = operand_subword_force (op0, i, mode);
2320 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2322 immed_double_const (lo, hi, imode),
2323 targ_piece, 1, OPTAB_LIB_WIDEN);
2324 if (temp != targ_piece)
2325 emit_move_insn (targ_piece, temp);
2328 emit_move_insn (targ_piece, op0_piece);
2331 insns = get_insns ();
2334 temp = gen_rtx_fmt_e (code, mode, copy_rtx (op0));
2335 emit_no_conflict_block (insns, target, op0, NULL_RTX, temp);
2339 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2340 gen_lowpart (imode, op0),
2341 immed_double_const (lo, hi, imode),
2342 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2343 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2345 set_unique_reg_note (get_last_insn (), REG_EQUAL,
2346 gen_rtx_fmt_e (code, mode, copy_rtx (op0)));
2352 /* Generate code to perform an operation specified by UNOPTAB
2353 on operand OP0, with result having machine-mode MODE.
2355 UNSIGNEDP is for the case where we have to widen the operands
2356 to perform the operation. It says to use zero-extension.
2358 If TARGET is nonzero, the value
2359 is generated there, if it is convenient to do so.
2360 In all cases an rtx is returned for the locus of the value;
2361 this may or may not be TARGET. */
2364 expand_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
2367 enum mode_class class;
2368 enum machine_mode wider_mode;
2370 rtx last = get_last_insn ();
2373 class = GET_MODE_CLASS (mode);
2375 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2377 int icode = (int) unoptab->handlers[(int) mode].insn_code;
2378 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2384 temp = gen_reg_rtx (mode);
2386 if (GET_MODE (xop0) != VOIDmode
2387 && GET_MODE (xop0) != mode0)
2388 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2390 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2392 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2393 xop0 = copy_to_mode_reg (mode0, xop0);
2395 if (!insn_data[icode].operand[0].predicate (temp, mode))
2396 temp = gen_reg_rtx (mode);
2398 pat = GEN_FCN (icode) (temp, xop0);
2401 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
2402 && ! add_equal_note (pat, temp, unoptab->code, xop0, NULL_RTX))
2404 delete_insns_since (last);
2405 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
2413 delete_insns_since (last);
2416 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2418 /* Widening clz needs special treatment. */
2419 if (unoptab == clz_optab)
2421 temp = widen_clz (mode, op0, target);
2428 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2429 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
2430 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2432 if (unoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing)
2436 /* For certain operations, we need not actually extend
2437 the narrow operand, as long as we will truncate the
2438 results to the same narrowness. */
2440 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2441 (unoptab == neg_optab
2442 || unoptab == one_cmpl_optab)
2443 && class == MODE_INT);
2445 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2450 if (class != MODE_INT)
2453 target = gen_reg_rtx (mode);
2454 convert_move (target, temp, 0);
2458 return gen_lowpart (mode, temp);
2461 delete_insns_since (last);
2465 /* These can be done a word at a time. */
2466 if (unoptab == one_cmpl_optab
2467 && class == MODE_INT
2468 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
2469 && unoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
2474 if (target == 0 || target == op0)
2475 target = gen_reg_rtx (mode);
2479 /* Do the actual arithmetic. */
2480 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
2482 rtx target_piece = operand_subword (target, i, 1, mode);
2483 rtx x = expand_unop (word_mode, unoptab,
2484 operand_subword_force (op0, i, mode),
2485 target_piece, unsignedp);
2487 if (target_piece != x)
2488 emit_move_insn (target_piece, x);
2491 insns = get_insns ();
2494 emit_no_conflict_block (insns, target, op0, NULL_RTX,
2495 gen_rtx_fmt_e (unoptab->code, mode,
2500 if (unoptab->code == NEG)
2502 /* Try negating floating point values by flipping the sign bit. */
2503 if (class == MODE_FLOAT)
2505 temp = expand_absneg_bit (NEG, mode, op0, target);
2510 /* If there is no negation pattern, and we have no negative zero,
2511 try subtracting from zero. */
2512 if (!HONOR_SIGNED_ZEROS (mode))
2514 temp = expand_binop (mode, (unoptab == negv_optab
2515 ? subv_optab : sub_optab),
2516 CONST0_RTX (mode), op0, target,
2517 unsignedp, OPTAB_DIRECT);
2523 /* Try calculating parity (x) as popcount (x) % 2. */
2524 if (unoptab == parity_optab)
2526 temp = expand_parity (mode, op0, target);
2532 /* Now try a library call in this mode. */
2533 if (unoptab->handlers[(int) mode].libfunc)
2537 enum machine_mode outmode = mode;
2539 /* All of these functions return small values. Thus we choose to
2540 have them return something that isn't a double-word. */
2541 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
2542 || unoptab == popcount_optab || unoptab == parity_optab)
2544 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node)));
2548 /* Pass 1 for NO_QUEUE so we don't lose any increments
2549 if the libcall is cse'd or moved. */
2550 value = emit_library_call_value (unoptab->handlers[(int) mode].libfunc,
2551 NULL_RTX, LCT_CONST, outmode,
2553 insns = get_insns ();
2556 target = gen_reg_rtx (outmode);
2557 emit_libcall_block (insns, target, value,
2558 gen_rtx_fmt_e (unoptab->code, mode, op0));
2563 /* It can't be done in this mode. Can we do it in a wider mode? */
2565 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2567 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
2568 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2570 if ((unoptab->handlers[(int) wider_mode].insn_code
2571 != CODE_FOR_nothing)
2572 || unoptab->handlers[(int) wider_mode].libfunc)
2576 /* For certain operations, we need not actually extend
2577 the narrow operand, as long as we will truncate the
2578 results to the same narrowness. */
2580 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2581 (unoptab == neg_optab
2582 || unoptab == one_cmpl_optab)
2583 && class == MODE_INT);
2585 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2588 /* If we are generating clz using wider mode, adjust the
2590 if (unoptab == clz_optab && temp != 0)
2591 temp = expand_binop (wider_mode, sub_optab, temp,
2592 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2593 - GET_MODE_BITSIZE (mode)),
2594 target, true, OPTAB_DIRECT);
2598 if (class != MODE_INT)
2601 target = gen_reg_rtx (mode);
2602 convert_move (target, temp, 0);
2606 return gen_lowpart (mode, temp);
2609 delete_insns_since (last);
2614 /* One final attempt at implementing negation via subtraction,
2615 this time allowing widening of the operand. */
2616 if (unoptab->code == NEG && !HONOR_SIGNED_ZEROS (mode))
2619 temp = expand_binop (mode,
2620 unoptab == negv_optab ? subv_optab : sub_optab,
2621 CONST0_RTX (mode), op0,
2622 target, unsignedp, OPTAB_LIB_WIDEN);
2630 /* Emit code to compute the absolute value of OP0, with result to
2631 TARGET if convenient. (TARGET may be 0.) The return value says
2632 where the result actually is to be found.
2634 MODE is the mode of the operand; the mode of the result is
2635 different but can be deduced from MODE.
2640 expand_abs_nojump (enum machine_mode mode, rtx op0, rtx target,
2641 int result_unsignedp)
2646 result_unsignedp = 1;
2648 /* First try to do it with a special abs instruction. */
2649 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
2654 /* For floating point modes, try clearing the sign bit. */
2655 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2657 temp = expand_absneg_bit (ABS, mode, op0, target);
2662 /* If we have a MAX insn, we can do this as MAX (x, -x). */
2663 if (smax_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
2664 && !HONOR_SIGNED_ZEROS (mode))
2666 rtx last = get_last_insn ();
2668 temp = expand_unop (mode, neg_optab, op0, NULL_RTX, 0);
2670 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
2676 delete_insns_since (last);
2679 /* If this machine has expensive jumps, we can do integer absolute
2680 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2681 where W is the width of MODE. */
2683 if (GET_MODE_CLASS (mode) == MODE_INT && BRANCH_COST >= 2)
2685 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
2686 size_int (GET_MODE_BITSIZE (mode) - 1),
2689 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
2692 temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab,
2693 temp, extended, target, 0, OPTAB_LIB_WIDEN);
2703 expand_abs (enum machine_mode mode, rtx op0, rtx target,
2704 int result_unsignedp, int safe)
2709 result_unsignedp = 1;
2711 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
2715 /* If that does not win, use conditional jump and negate. */
2717 /* It is safe to use the target if it is the same
2718 as the source if this is also a pseudo register */
2719 if (op0 == target && REG_P (op0)
2720 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
2723 op1 = gen_label_rtx ();
2724 if (target == 0 || ! safe
2725 || GET_MODE (target) != mode
2726 || (MEM_P (target) && MEM_VOLATILE_P (target))
2728 && REGNO (target) < FIRST_PSEUDO_REGISTER))
2729 target = gen_reg_rtx (mode);
2731 emit_move_insn (target, op0);
2734 /* If this mode is an integer too wide to compare properly,
2735 compare word by word. Rely on CSE to optimize constant cases. */
2736 if (GET_MODE_CLASS (mode) == MODE_INT
2737 && ! can_compare_p (GE, mode, ccp_jump))
2738 do_jump_by_parts_greater_rtx (mode, 0, target, const0_rtx,
2741 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
2742 NULL_RTX, NULL_RTX, op1);
2744 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
2747 emit_move_insn (target, op0);
2753 /* A subroutine of expand_copysign, perform the copysign operation using the
2754 abs and neg primitives advertised to exist on the target. The assumption
2755 is that we have a split register file, and leaving op0 in fp registers,
2756 and not playing with subregs so much, will help the register allocator. */
2759 expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target,
2760 int bitpos, bool op0_is_abs)
2762 enum machine_mode imode;
2763 HOST_WIDE_INT hi, lo;
2772 op0 = expand_unop (mode, abs_optab, op0, target, 0);
2779 if (target == NULL_RTX)
2780 target = copy_to_reg (op0);
2782 emit_move_insn (target, op0);
2785 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2787 imode = int_mode_for_mode (mode);
2788 if (imode == BLKmode)
2790 op1 = gen_lowpart (imode, op1);
2795 if (FLOAT_WORDS_BIG_ENDIAN)
2796 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2798 word = bitpos / BITS_PER_WORD;
2799 bitpos = bitpos % BITS_PER_WORD;
2800 op1 = operand_subword_force (op1, word, mode);
2803 if (bitpos < HOST_BITS_PER_WIDE_INT)
2806 lo = (HOST_WIDE_INT) 1 << bitpos;
2810 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2814 op1 = expand_binop (imode, and_optab, op1,
2815 immed_double_const (lo, hi, imode),
2816 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2818 label = gen_label_rtx ();
2819 emit_cmp_and_jump_insns (op1, const0_rtx, EQ, NULL_RTX, imode, 1, label);
2821 if (GET_CODE (op0) == CONST_DOUBLE)
2822 op0 = simplify_unary_operation (NEG, mode, op0, mode);
2824 op0 = expand_unop (mode, neg_optab, op0, target, 0);
2826 emit_move_insn (target, op0);
2834 /* A subroutine of expand_copysign, perform the entire copysign operation
2835 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
2836 is true if op0 is known to have its sign bit clear. */
2839 expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
2840 int bitpos, bool op0_is_abs)
2842 enum machine_mode imode;
2843 HOST_WIDE_INT hi, lo;
2844 int word, nwords, i;
2847 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2849 imode = int_mode_for_mode (mode);
2850 if (imode == BLKmode)
2859 if (FLOAT_WORDS_BIG_ENDIAN)
2860 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2862 word = bitpos / BITS_PER_WORD;
2863 bitpos = bitpos % BITS_PER_WORD;
2864 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2867 if (bitpos < HOST_BITS_PER_WIDE_INT)
2870 lo = (HOST_WIDE_INT) 1 << bitpos;
2874 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2878 if (target == 0 || target == op0 || target == op1)
2879 target = gen_reg_rtx (mode);
2885 for (i = 0; i < nwords; ++i)
2887 rtx targ_piece = operand_subword (target, i, 1, mode);
2888 rtx op0_piece = operand_subword_force (op0, i, mode);
2893 op0_piece = expand_binop (imode, and_optab, op0_piece,
2894 immed_double_const (~lo, ~hi, imode),
2895 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2897 op1 = expand_binop (imode, and_optab,
2898 operand_subword_force (op1, i, mode),
2899 immed_double_const (lo, hi, imode),
2900 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2902 temp = expand_binop (imode, ior_optab, op0_piece, op1,
2903 targ_piece, 1, OPTAB_LIB_WIDEN);
2904 if (temp != targ_piece)
2905 emit_move_insn (targ_piece, temp);
2908 emit_move_insn (targ_piece, op0_piece);
2911 insns = get_insns ();
2914 emit_no_conflict_block (insns, target, op0, op1, NULL_RTX);
2918 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
2919 immed_double_const (lo, hi, imode),
2920 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2922 op0 = gen_lowpart (imode, op0);
2924 op0 = expand_binop (imode, and_optab, op0,
2925 immed_double_const (~lo, ~hi, imode),
2926 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2928 temp = expand_binop (imode, ior_optab, op0, op1,
2929 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2930 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2936 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
2937 scalar floating point mode. Return NULL if we do not know how to
2938 expand the operation inline. */
2941 expand_copysign (rtx op0, rtx op1, rtx target)
2943 enum machine_mode mode = GET_MODE (op0);
2944 const struct real_format *fmt;
2948 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
2949 gcc_assert (GET_MODE (op1) == mode);
2951 /* First try to do it with a special instruction. */
2952 temp = expand_binop (mode, copysign_optab, op0, op1,
2953 target, 0, OPTAB_DIRECT);
2957 fmt = REAL_MODE_FORMAT (mode);
2958 if (fmt == NULL || !fmt->has_signed_zero)
2962 if (GET_CODE (op0) == CONST_DOUBLE)
2964 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
2965 op0 = simplify_unary_operation (ABS, mode, op0, mode);
2969 if (fmt->signbit_ro >= 0
2970 && (GET_CODE (op0) == CONST_DOUBLE
2971 || (neg_optab->handlers[mode].insn_code != CODE_FOR_nothing
2972 && abs_optab->handlers[mode].insn_code != CODE_FOR_nothing)))
2974 temp = expand_copysign_absneg (mode, op0, op1, target,
2975 fmt->signbit_ro, op0_is_abs);
2980 if (fmt->signbit_rw < 0)
2982 return expand_copysign_bit (mode, op0, op1, target,
2983 fmt->signbit_rw, op0_is_abs);
2986 /* Generate an instruction whose insn-code is INSN_CODE,
2987 with two operands: an output TARGET and an input OP0.
2988 TARGET *must* be nonzero, and the output is always stored there.
2989 CODE is an rtx code such that (CODE OP0) is an rtx that describes
2990 the value that is stored into TARGET. */
2993 emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code)
2996 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
3001 /* Now, if insn does not accept our operands, put them into pseudos. */
3003 if (!insn_data[icode].operand[1].predicate (op0, mode0))
3004 op0 = copy_to_mode_reg (mode0, op0);
3006 if (!insn_data[icode].operand[0].predicate (temp, GET_MODE (temp)))
3007 temp = gen_reg_rtx (GET_MODE (temp));
3009 pat = GEN_FCN (icode) (temp, op0);
3011 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && code != UNKNOWN)
3012 add_equal_note (pat, temp, code, op0, NULL_RTX);
3017 emit_move_insn (target, temp);
3020 struct no_conflict_data
3022 rtx target, first, insn;
3026 /* Called via note_stores by emit_no_conflict_block and emit_libcall_block.
3027 Set P->must_stay if the currently examined clobber / store has to stay
3028 in the list of insns that constitute the actual no_conflict block /
3031 no_conflict_move_test (rtx dest, rtx set, void *p0)
3033 struct no_conflict_data *p= p0;
3035 /* If this inns directly contributes to setting the target, it must stay. */
3036 if (reg_overlap_mentioned_p (p->target, dest))
3037 p->must_stay = true;
3038 /* If we haven't committed to keeping any other insns in the list yet,
3039 there is nothing more to check. */
3040 else if (p->insn == p->first)
3042 /* If this insn sets / clobbers a register that feeds one of the insns
3043 already in the list, this insn has to stay too. */
3044 else if (reg_overlap_mentioned_p (dest, PATTERN (p->first))
3045 || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest)))
3046 || reg_used_between_p (dest, p->first, p->insn)
3047 /* Likewise if this insn depends on a register set by a previous
3048 insn in the list, or if it sets a result (presumably a hard
3049 register) that is set or clobbered by a previous insn.
3050 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3051 SET_DEST perform the former check on the address, and the latter
3052 check on the MEM. */
3053 || (GET_CODE (set) == SET
3054 && (modified_in_p (SET_SRC (set), p->first)
3055 || modified_in_p (SET_DEST (set), p->first)
3056 || modified_between_p (SET_SRC (set), p->first, p->insn)
3057 || modified_between_p (SET_DEST (set), p->first, p->insn))))
3058 p->must_stay = true;
3061 /* Emit code to perform a series of operations on a multi-word quantity, one
3064 Such a block is preceded by a CLOBBER of the output, consists of multiple
3065 insns, each setting one word of the output, and followed by a SET copying
3066 the output to itself.
3068 Each of the insns setting words of the output receives a REG_NO_CONFLICT
3069 note indicating that it doesn't conflict with the (also multi-word)
3070 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
3073 INSNS is a block of code generated to perform the operation, not including
3074 the CLOBBER and final copy. All insns that compute intermediate values
3075 are first emitted, followed by the block as described above.
3077 TARGET, OP0, and OP1 are the output and inputs of the operations,
3078 respectively. OP1 may be zero for a unary operation.
3080 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
3083 If TARGET is not a register, INSNS is simply emitted with no special
3084 processing. Likewise if anything in INSNS is not an INSN or if
3085 there is a libcall block inside INSNS.
3087 The final insn emitted is returned. */
3090 emit_no_conflict_block (rtx insns, rtx target, rtx op0, rtx op1, rtx equiv)
3092 rtx prev, next, first, last, insn;
3094 if (!REG_P (target) || reload_in_progress)
3095 return emit_insn (insns);
3097 for (insn = insns; insn; insn = NEXT_INSN (insn))
3098 if (!NONJUMP_INSN_P (insn)
3099 || find_reg_note (insn, REG_LIBCALL, NULL_RTX))
3100 return emit_insn (insns);
3102 /* First emit all insns that do not store into words of the output and remove
3103 these from the list. */
3104 for (insn = insns; insn; insn = next)
3107 struct no_conflict_data data;
3109 next = NEXT_INSN (insn);
3111 /* Some ports (cris) create a libcall regions at their own. We must
3112 avoid any potential nesting of LIBCALLs. */
3113 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3114 remove_note (insn, note);
3115 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3116 remove_note (insn, note);
3118 data.target = target;
3122 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3123 if (! data.must_stay)
3125 if (PREV_INSN (insn))
3126 NEXT_INSN (PREV_INSN (insn)) = next;
3131 PREV_INSN (next) = PREV_INSN (insn);
3137 prev = get_last_insn ();
3139 /* Now write the CLOBBER of the output, followed by the setting of each
3140 of the words, followed by the final copy. */
3141 if (target != op0 && target != op1)
3142 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
3144 for (insn = insns; insn; insn = next)
3146 next = NEXT_INSN (insn);
3149 if (op1 && REG_P (op1))
3150 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op1,
3153 if (op0 && REG_P (op0))
3154 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op0,
3158 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3159 != CODE_FOR_nothing)
3161 last = emit_move_insn (target, target);
3163 set_unique_reg_note (last, REG_EQUAL, equiv);
3167 last = get_last_insn ();
3169 /* Remove any existing REG_EQUAL note from "last", or else it will
3170 be mistaken for a note referring to the full contents of the
3171 alleged libcall value when found together with the REG_RETVAL
3172 note added below. An existing note can come from an insn
3173 expansion at "last". */
3174 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3178 first = get_insns ();
3180 first = NEXT_INSN (prev);
3182 /* Encapsulate the block so it gets manipulated as a unit. */
3183 REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
3185 REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first, REG_NOTES (last));
3190 /* Emit code to make a call to a constant function or a library call.
3192 INSNS is a list containing all insns emitted in the call.
3193 These insns leave the result in RESULT. Our block is to copy RESULT
3194 to TARGET, which is logically equivalent to EQUIV.
3196 We first emit any insns that set a pseudo on the assumption that these are
3197 loading constants into registers; doing so allows them to be safely cse'ed
3198 between blocks. Then we emit all the other insns in the block, followed by
3199 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3200 note with an operand of EQUIV.
3202 Moving assignments to pseudos outside of the block is done to improve
3203 the generated code, but is not required to generate correct code,
3204 hence being unable to move an assignment is not grounds for not making
3205 a libcall block. There are two reasons why it is safe to leave these
3206 insns inside the block: First, we know that these pseudos cannot be
3207 used in generated RTL outside the block since they are created for
3208 temporary purposes within the block. Second, CSE will not record the
3209 values of anything set inside a libcall block, so we know they must
3210 be dead at the end of the block.
3212 Except for the first group of insns (the ones setting pseudos), the
3213 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3216 emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv)
3218 rtx final_dest = target;
3219 rtx prev, next, first, last, insn;
3221 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3222 into a MEM later. Protect the libcall block from this change. */
3223 if (! REG_P (target) || REG_USERVAR_P (target))
3224 target = gen_reg_rtx (GET_MODE (target));
3226 /* If we're using non-call exceptions, a libcall corresponding to an
3227 operation that may trap may also trap. */
3228 if (flag_non_call_exceptions && may_trap_p (equiv))
3230 for (insn = insns; insn; insn = NEXT_INSN (insn))
3233 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3235 if (note != 0 && INTVAL (XEXP (note, 0)) <= 0)
3236 remove_note (insn, note);
3240 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3241 reg note to indicate that this call cannot throw or execute a nonlocal
3242 goto (unless there is already a REG_EH_REGION note, in which case
3244 for (insn = insns; insn; insn = NEXT_INSN (insn))
3247 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3250 XEXP (note, 0) = constm1_rtx;
3252 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx,
3256 /* First emit all insns that set pseudos. Remove them from the list as
3257 we go. Avoid insns that set pseudos which were referenced in previous
3258 insns. These can be generated by move_by_pieces, for example,
3259 to update an address. Similarly, avoid insns that reference things
3260 set in previous insns. */
3262 for (insn = insns; insn; insn = next)
3264 rtx set = single_set (insn);
3267 /* Some ports (cris) create a libcall regions at their own. We must
3268 avoid any potential nesting of LIBCALLs. */
3269 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3270 remove_note (insn, note);
3271 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3272 remove_note (insn, note);
3274 next = NEXT_INSN (insn);
3276 if (set != 0 && REG_P (SET_DEST (set))
3277 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3279 struct no_conflict_data data;
3281 data.target = const0_rtx;
3285 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3286 if (! data.must_stay)
3288 if (PREV_INSN (insn))
3289 NEXT_INSN (PREV_INSN (insn)) = next;
3294 PREV_INSN (next) = PREV_INSN (insn);
3300 /* Some ports use a loop to copy large arguments onto the stack.
3301 Don't move anything outside such a loop. */
3306 prev = get_last_insn ();
3308 /* Write the remaining insns followed by the final copy. */
3310 for (insn = insns; insn; insn = next)
3312 next = NEXT_INSN (insn);
3317 last = emit_move_insn (target, result);
3318 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3319 != CODE_FOR_nothing)
3320 set_unique_reg_note (last, REG_EQUAL, copy_rtx (equiv));
3323 /* Remove any existing REG_EQUAL note from "last", or else it will
3324 be mistaken for a note referring to the full contents of the
3325 libcall value when found together with the REG_RETVAL note added
3326 below. An existing note can come from an insn expansion at
3328 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3331 if (final_dest != target)
3332 emit_move_insn (final_dest, target);
3335 first = get_insns ();
3337 first = NEXT_INSN (prev);
3339 /* Encapsulate the block so it gets manipulated as a unit. */
3340 if (!flag_non_call_exceptions || !may_trap_p (equiv))
3342 /* We can't attach the REG_LIBCALL and REG_RETVAL notes
3343 when the encapsulated region would not be in one basic block,
3344 i.e. when there is a control_flow_insn_p insn between FIRST and LAST.
3346 bool attach_libcall_retval_notes = true;
3347 next = NEXT_INSN (last);
3348 for (insn = first; insn != next; insn = NEXT_INSN (insn))
3349 if (control_flow_insn_p (insn))
3351 attach_libcall_retval_notes = false;
3355 if (attach_libcall_retval_notes)
3357 REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
3359 REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first,
3365 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3366 PURPOSE describes how this comparison will be used. CODE is the rtx
3367 comparison code we will be using.
3369 ??? Actually, CODE is slightly weaker than that. A target is still
3370 required to implement all of the normal bcc operations, but not
3371 required to implement all (or any) of the unordered bcc operations. */
3374 can_compare_p (enum rtx_code code, enum machine_mode mode,
3375 enum can_compare_purpose purpose)
3379 if (cmp_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3381 if (purpose == ccp_jump)
3382 return bcc_gen_fctn[(int) code] != NULL;
3383 else if (purpose == ccp_store_flag)
3384 return setcc_gen_code[(int) code] != CODE_FOR_nothing;
3386 /* There's only one cmov entry point, and it's allowed to fail. */
3389 if (purpose == ccp_jump
3390 && cbranch_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3392 if (purpose == ccp_cmov
3393 && cmov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3395 if (purpose == ccp_store_flag
3396 && cstore_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3398 mode = GET_MODE_WIDER_MODE (mode);
3400 while (mode != VOIDmode);
3405 /* This function is called when we are going to emit a compare instruction that
3406 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3408 *PMODE is the mode of the inputs (in case they are const_int).
3409 *PUNSIGNEDP nonzero says that the operands are unsigned;
3410 this matters if they need to be widened.
3412 If they have mode BLKmode, then SIZE specifies the size of both operands.
3414 This function performs all the setup necessary so that the caller only has
3415 to emit a single comparison insn. This setup can involve doing a BLKmode
3416 comparison or emitting a library call to perform the comparison if no insn
3417 is available to handle it.
3418 The values which are passed in through pointers can be modified; the caller
3419 should perform the comparison on the modified values. Constant
3420 comparisons must have already been folded. */
3423 prepare_cmp_insn (rtx *px, rtx *py, enum rtx_code *pcomparison, rtx size,
3424 enum machine_mode *pmode, int *punsignedp,
3425 enum can_compare_purpose purpose)
3427 enum machine_mode mode = *pmode;
3428 rtx x = *px, y = *py;
3429 int unsignedp = *punsignedp;
3430 enum mode_class class;
3432 class = GET_MODE_CLASS (mode);
3434 /* If we are inside an appropriately-short loop and we are optimizing,
3435 force expensive constants into a register. */
3436 if (CONSTANT_P (x) && optimize
3437 && rtx_cost (x, COMPARE) > COSTS_N_INSNS (1))
3438 x = force_reg (mode, x);
3440 if (CONSTANT_P (y) && optimize
3441 && rtx_cost (y, COMPARE) > COSTS_N_INSNS (1))
3442 y = force_reg (mode, y);
3445 /* Make sure if we have a canonical comparison. The RTL
3446 documentation states that canonical comparisons are required only
3447 for targets which have cc0. */
3448 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
3451 /* Don't let both operands fail to indicate the mode. */
3452 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
3453 x = force_reg (mode, x);
3455 /* Handle all BLKmode compares. */
3457 if (mode == BLKmode)
3459 enum machine_mode cmp_mode, result_mode;
3460 enum insn_code cmp_code;
3465 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
3469 /* Try to use a memory block compare insn - either cmpstr
3470 or cmpmem will do. */
3471 for (cmp_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
3472 cmp_mode != VOIDmode;
3473 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode))
3475 cmp_code = cmpmem_optab[cmp_mode];
3476 if (cmp_code == CODE_FOR_nothing)
3477 cmp_code = cmpstr_optab[cmp_mode];
3478 if (cmp_code == CODE_FOR_nothing)
3479 cmp_code = cmpstrn_optab[cmp_mode];
3480 if (cmp_code == CODE_FOR_nothing)
3483 /* Must make sure the size fits the insn's mode. */
3484 if ((GET_CODE (size) == CONST_INT
3485 && INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode)))
3486 || (GET_MODE_BITSIZE (GET_MODE (size))
3487 > GET_MODE_BITSIZE (cmp_mode)))
3490 result_mode = insn_data[cmp_code].operand[0].mode;
3491 result = gen_reg_rtx (result_mode);
3492 size = convert_to_mode (cmp_mode, size, 1);
3493 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
3497 *pmode = result_mode;
3501 /* Otherwise call a library function, memcmp. */
3502 libfunc = memcmp_libfunc;
3503 length_type = sizetype;
3504 result_mode = TYPE_MODE (integer_type_node);
3505 cmp_mode = TYPE_MODE (length_type);
3506 size = convert_to_mode (TYPE_MODE (length_type), size,
3507 TYPE_UNSIGNED (length_type));
3509 result = emit_library_call_value (libfunc, 0, LCT_PURE_MAKE_BLOCK,
3516 *pmode = result_mode;
3520 /* Don't allow operands to the compare to trap, as that can put the
3521 compare and branch in different basic blocks. */
3522 if (flag_non_call_exceptions)
3525 x = force_reg (mode, x);
3527 y = force_reg (mode, y);
3532 if (can_compare_p (*pcomparison, mode, purpose))
3535 /* Handle a lib call just for the mode we are using. */
3537 if (cmp_optab->handlers[(int) mode].libfunc && class != MODE_FLOAT)
3539 rtx libfunc = cmp_optab->handlers[(int) mode].libfunc;
3542 /* If we want unsigned, and this mode has a distinct unsigned
3543 comparison routine, use that. */
3544 if (unsignedp && ucmp_optab->handlers[(int) mode].libfunc)
3545 libfunc = ucmp_optab->handlers[(int) mode].libfunc;
3547 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST_MAKE_BLOCK,
3548 word_mode, 2, x, mode, y, mode);
3552 if (TARGET_LIB_INT_CMP_BIASED)
3553 /* Integer comparison returns a result that must be compared
3554 against 1, so that even if we do an unsigned compare
3555 afterward, there is still a value that can represent the
3556 result "less than". */
3566 gcc_assert (class == MODE_FLOAT);
3567 prepare_float_lib_cmp (px, py, pcomparison, pmode, punsignedp);
3570 /* Before emitting an insn with code ICODE, make sure that X, which is going
3571 to be used for operand OPNUM of the insn, is converted from mode MODE to
3572 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3573 that it is accepted by the operand predicate. Return the new value. */
3576 prepare_operand (int icode, rtx x, int opnum, enum machine_mode mode,
3577 enum machine_mode wider_mode, int unsignedp)
3579 if (mode != wider_mode)
3580 x = convert_modes (wider_mode, mode, x, unsignedp);
3582 if (!insn_data[icode].operand[opnum].predicate
3583 (x, insn_data[icode].operand[opnum].mode))
3587 x = copy_to_mode_reg (insn_data[icode].operand[opnum].mode, x);
3593 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3594 we can do the comparison.
3595 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3596 be NULL_RTX which indicates that only a comparison is to be generated. */
3599 emit_cmp_and_jump_insn_1 (rtx x, rtx y, enum machine_mode mode,
3600 enum rtx_code comparison, int unsignedp, rtx label)
3602 rtx test = gen_rtx_fmt_ee (comparison, mode, x, y);
3603 enum mode_class class = GET_MODE_CLASS (mode);
3604 enum machine_mode wider_mode = mode;
3606 /* Try combined insns first. */
3609 enum insn_code icode;
3610 PUT_MODE (test, wider_mode);
3614 icode = cbranch_optab->handlers[(int) wider_mode].insn_code;
3616 if (icode != CODE_FOR_nothing
3617 && insn_data[icode].operand[0].predicate (test, wider_mode))
3619 x = prepare_operand (icode, x, 1, mode, wider_mode, unsignedp);
3620 y = prepare_operand (icode, y, 2, mode, wider_mode, unsignedp);
3621 emit_jump_insn (GEN_FCN (icode) (test, x, y, label));
3626 /* Handle some compares against zero. */
3627 icode = (int) tst_optab->handlers[(int) wider_mode].insn_code;
3628 if (y == CONST0_RTX (mode) && icode != CODE_FOR_nothing)
3630 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
3631 emit_insn (GEN_FCN (icode) (x));
3633 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
3637 /* Handle compares for which there is a directly suitable insn. */
3639 icode = (int) cmp_optab->handlers[(int) wider_mode].insn_code;
3640 if (icode != CODE_FOR_nothing)
3642 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
3643 y = prepare_operand (icode, y, 1, mode, wider_mode, unsignedp);
3644 emit_insn (GEN_FCN (icode) (x, y));
3646 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
3650 if (class != MODE_INT && class != MODE_FLOAT
3651 && class != MODE_COMPLEX_FLOAT)
3654 wider_mode = GET_MODE_WIDER_MODE (wider_mode);
3656 while (wider_mode != VOIDmode);
3661 /* Generate code to compare X with Y so that the condition codes are
3662 set and to jump to LABEL if the condition is true. If X is a
3663 constant and Y is not a constant, then the comparison is swapped to
3664 ensure that the comparison RTL has the canonical form.
3666 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3667 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
3668 the proper branch condition code.
3670 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
3672 MODE is the mode of the inputs (in case they are const_int).
3674 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
3675 be passed unchanged to emit_cmp_insn, then potentially converted into an
3676 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
3679 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
3680 enum machine_mode mode, int unsignedp, rtx label)
3682 rtx op0 = x, op1 = y;
3684 /* Swap operands and condition to ensure canonical RTL. */
3685 if (swap_commutative_operands_p (x, y))
3687 /* If we're not emitting a branch, this means some caller
3692 comparison = swap_condition (comparison);
3696 /* If OP0 is still a constant, then both X and Y must be constants.
3697 Force X into a register to create canonical RTL. */
3698 if (CONSTANT_P (op0))
3699 op0 = force_reg (mode, op0);
3703 comparison = unsigned_condition (comparison);
3705 prepare_cmp_insn (&op0, &op1, &comparison, size, &mode, &unsignedp,
3707 emit_cmp_and_jump_insn_1 (op0, op1, mode, comparison, unsignedp, label);
3710 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
3713 emit_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
3714 enum machine_mode mode, int unsignedp)
3716 emit_cmp_and_jump_insns (x, y, comparison, size, mode, unsignedp, 0);
3719 /* Emit a library call comparison between floating point X and Y.
3720 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
3723 prepare_float_lib_cmp (rtx *px, rtx *py, enum rtx_code *pcomparison,
3724 enum machine_mode *pmode, int *punsignedp)
3726 enum rtx_code comparison = *pcomparison;
3727 enum rtx_code swapped = swap_condition (comparison);
3728 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
3731 enum machine_mode orig_mode = GET_MODE (x);
3732 enum machine_mode mode;
3733 rtx value, target, insns, equiv;
3735 bool reversed_p = false;
3737 for (mode = orig_mode; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
3739 if ((libfunc = code_to_optab[comparison]->handlers[mode].libfunc))
3742 if ((libfunc = code_to_optab[swapped]->handlers[mode].libfunc))
3745 tmp = x; x = y; y = tmp;
3746 comparison = swapped;
3750 if ((libfunc = code_to_optab[reversed]->handlers[mode].libfunc)
3751 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, reversed))
3753 comparison = reversed;
3759 gcc_assert (mode != VOIDmode);
3761 if (mode != orig_mode)
3763 x = convert_to_mode (mode, x, 0);
3764 y = convert_to_mode (mode, y, 0);
3767 /* Attach a REG_EQUAL note describing the semantics of the libcall to
3768 the RTL. The allows the RTL optimizers to delete the libcall if the
3769 condition can be determined at compile-time. */
3770 if (comparison == UNORDERED)
3772 rtx temp = simplify_gen_relational (NE, word_mode, mode, x, x);
3773 equiv = simplify_gen_relational (NE, word_mode, mode, y, y);
3774 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
3775 temp, const_true_rtx, equiv);
3779 equiv = simplify_gen_relational (comparison, word_mode, mode, x, y);
3780 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
3782 rtx true_rtx, false_rtx;
3787 true_rtx = const0_rtx;
3788 false_rtx = const_true_rtx;
3792 true_rtx = const_true_rtx;
3793 false_rtx = const0_rtx;
3797 true_rtx = const1_rtx;
3798 false_rtx = const0_rtx;
3802 true_rtx = const0_rtx;
3803 false_rtx = constm1_rtx;
3807 true_rtx = constm1_rtx;
3808 false_rtx = const0_rtx;
3812 true_rtx = const0_rtx;
3813 false_rtx = const1_rtx;
3819 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
3820 equiv, true_rtx, false_rtx);
3825 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
3826 word_mode, 2, x, mode, y, mode);
3827 insns = get_insns ();
3830 target = gen_reg_rtx (word_mode);
3831 emit_libcall_block (insns, target, value, equiv);
3833 if (comparison == UNORDERED
3834 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
3835 comparison = reversed_p ? EQ : NE;
3840 *pcomparison = comparison;
3844 /* Generate code to indirectly jump to a location given in the rtx LOC. */
3847 emit_indirect_jump (rtx loc)
3849 if (!insn_data[(int) CODE_FOR_indirect_jump].operand[0].predicate
3851 loc = copy_to_mode_reg (Pmode, loc);
3853 emit_jump_insn (gen_indirect_jump (loc));
3857 #ifdef HAVE_conditional_move
3859 /* Emit a conditional move instruction if the machine supports one for that
3860 condition and machine mode.
3862 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3863 the mode to use should they be constants. If it is VOIDmode, they cannot
3866 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
3867 should be stored there. MODE is the mode to use should they be constants.
3868 If it is VOIDmode, they cannot both be constants.
3870 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3871 is not supported. */
3874 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
3875 enum machine_mode cmode, rtx op2, rtx op3,
3876 enum machine_mode mode, int unsignedp)
3878 rtx tem, subtarget, comparison, insn;
3879 enum insn_code icode;
3880 enum rtx_code reversed;
3882 /* If one operand is constant, make it the second one. Only do this
3883 if the other operand is not constant as well. */
3885 if (swap_commutative_operands_p (op0, op1))
3890 code = swap_condition (code);
3893 /* get_condition will prefer to generate LT and GT even if the old
3894 comparison was against zero, so undo that canonicalization here since
3895 comparisons against zero are cheaper. */
3896 if (code == LT && op1 == const1_rtx)
3897 code = LE, op1 = const0_rtx;
3898 else if (code == GT && op1 == constm1_rtx)
3899 code = GE, op1 = const0_rtx;
3901 if (cmode == VOIDmode)
3902 cmode = GET_MODE (op0);
3904 if (swap_commutative_operands_p (op2, op3)
3905 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
3914 if (mode == VOIDmode)
3915 mode = GET_MODE (op2);
3917 icode = movcc_gen_code[mode];
3919 if (icode == CODE_FOR_nothing)
3923 target = gen_reg_rtx (mode);
3927 /* If the insn doesn't accept these operands, put them in pseudos. */
3929 if (!insn_data[icode].operand[0].predicate
3930 (subtarget, insn_data[icode].operand[0].mode))
3931 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
3933 if (!insn_data[icode].operand[2].predicate
3934 (op2, insn_data[icode].operand[2].mode))
3935 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
3937 if (!insn_data[icode].operand[3].predicate
3938 (op3, insn_data[icode].operand[3].mode))
3939 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
3941 /* Everything should now be in the suitable form, so emit the compare insn
3942 and then the conditional move. */
3945 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
3947 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
3948 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
3949 return NULL and let the caller figure out how best to deal with this
3951 if (GET_CODE (comparison) != code)
3954 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
3956 /* If that failed, then give up. */
3962 if (subtarget != target)
3963 convert_move (target, subtarget, 0);
3968 /* Return nonzero if a conditional move of mode MODE is supported.
3970 This function is for combine so it can tell whether an insn that looks
3971 like a conditional move is actually supported by the hardware. If we
3972 guess wrong we lose a bit on optimization, but that's it. */
3973 /* ??? sparc64 supports conditionally moving integers values based on fp
3974 comparisons, and vice versa. How do we handle them? */
3977 can_conditionally_move_p (enum machine_mode mode)
3979 if (movcc_gen_code[mode] != CODE_FOR_nothing)
3985 #endif /* HAVE_conditional_move */
3987 /* Emit a conditional addition instruction if the machine supports one for that
3988 condition and machine mode.
3990 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3991 the mode to use should they be constants. If it is VOIDmode, they cannot
3994 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
3995 should be stored there. MODE is the mode to use should they be constants.
3996 If it is VOIDmode, they cannot both be constants.
3998 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3999 is not supported. */
4002 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
4003 enum machine_mode cmode, rtx op2, rtx op3,
4004 enum machine_mode mode, int unsignedp)
4006 rtx tem, subtarget, comparison, insn;
4007 enum insn_code icode;
4008 enum rtx_code reversed;
4010 /* If one operand is constant, make it the second one. Only do this
4011 if the other operand is not constant as well. */
4013 if (swap_commutative_operands_p (op0, op1))
4018 code = swap_condition (code);
4021 /* get_condition will prefer to generate LT and GT even if the old
4022 comparison was against zero, so undo that canonicalization here since
4023 comparisons against zero are cheaper. */
4024 if (code == LT && op1 == const1_rtx)
4025 code = LE, op1 = const0_rtx;
4026 else if (code == GT && op1 == constm1_rtx)
4027 code = GE, op1 = const0_rtx;
4029 if (cmode == VOIDmode)
4030 cmode = GET_MODE (op0);
4032 if (swap_commutative_operands_p (op2, op3)
4033 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4042 if (mode == VOIDmode)
4043 mode = GET_MODE (op2);
4045 icode = addcc_optab->handlers[(int) mode].insn_code;
4047 if (icode == CODE_FOR_nothing)
4051 target = gen_reg_rtx (mode);
4053 /* If the insn doesn't accept these operands, put them in pseudos. */
4055 if (!insn_data[icode].operand[0].predicate
4056 (target, insn_data[icode].operand[0].mode))
4057 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4061 if (!insn_data[icode].operand[2].predicate
4062 (op2, insn_data[icode].operand[2].mode))
4063 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4065 if (!insn_data[icode].operand[3].predicate
4066 (op3, insn_data[icode].operand[3].mode))
4067 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4069 /* Everything should now be in the suitable form, so emit the compare insn
4070 and then the conditional move. */
4073 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4075 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4076 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4077 return NULL and let the caller figure out how best to deal with this
4079 if (GET_CODE (comparison) != code)
4082 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4084 /* If that failed, then give up. */
4090 if (subtarget != target)
4091 convert_move (target, subtarget, 0);
4096 /* These functions attempt to generate an insn body, rather than
4097 emitting the insn, but if the gen function already emits them, we
4098 make no attempt to turn them back into naked patterns. */
4100 /* Generate and return an insn body to add Y to X. */
4103 gen_add2_insn (rtx x, rtx y)
4105 int icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4107 gcc_assert (insn_data[icode].operand[0].predicate
4108 (x, insn_data[icode].operand[0].mode));
4109 gcc_assert (insn_data[icode].operand[1].predicate
4110 (x, insn_data[icode].operand[1].mode));
4111 gcc_assert (insn_data[icode].operand[2].predicate
4112 (y, insn_data[icode].operand[2].mode));
4114 return GEN_FCN (icode) (x, x, y);
4117 /* Generate and return an insn body to add r1 and c,
4118 storing the result in r0. */
4120 gen_add3_insn (rtx r0, rtx r1, rtx c)
4122 int icode = (int) add_optab->handlers[(int) GET_MODE (r0)].insn_code;
4124 if (icode == CODE_FOR_nothing
4125 || !(insn_data[icode].operand[0].predicate
4126 (r0, insn_data[icode].operand[0].mode))
4127 || !(insn_data[icode].operand[1].predicate
4128 (r1, insn_data[icode].operand[1].mode))
4129 || !(insn_data[icode].operand[2].predicate
4130 (c, insn_data[icode].operand[2].mode)))
4133 return GEN_FCN (icode) (r0, r1, c);
4137 have_add2_insn (rtx x, rtx y)
4141 gcc_assert (GET_MODE (x) != VOIDmode);
4143 icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4145 if (icode == CODE_FOR_nothing)
4148 if (!(insn_data[icode].operand[0].predicate
4149 (x, insn_data[icode].operand[0].mode))
4150 || !(insn_data[icode].operand[1].predicate
4151 (x, insn_data[icode].operand[1].mode))
4152 || !(insn_data[icode].operand[2].predicate
4153 (y, insn_data[icode].operand[2].mode)))
4159 /* Generate and return an insn body to subtract Y from X. */
4162 gen_sub2_insn (rtx x, rtx y)
4164 int icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4166 gcc_assert (insn_data[icode].operand[0].predicate
4167 (x, insn_data[icode].operand[0].mode));
4168 gcc_assert (insn_data[icode].operand[1].predicate
4169 (x, insn_data[icode].operand[1].mode));
4170 gcc_assert (insn_data[icode].operand[2].predicate
4171 (y, insn_data[icode].operand[2].mode));
4173 return GEN_FCN (icode) (x, x, y);
4176 /* Generate and return an insn body to subtract r1 and c,
4177 storing the result in r0. */
4179 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4181 int icode = (int) sub_optab->handlers[(int) GET_MODE (r0)].insn_code;
4183 if (icode == CODE_FOR_nothing
4184 || !(insn_data[icode].operand[0].predicate
4185 (r0, insn_data[icode].operand[0].mode))
4186 || !(insn_data[icode].operand[1].predicate
4187 (r1, insn_data[icode].operand[1].mode))
4188 || !(insn_data[icode].operand[2].predicate
4189 (c, insn_data[icode].operand[2].mode)))
4192 return GEN_FCN (icode) (r0, r1, c);
4196 have_sub2_insn (rtx x, rtx y)
4200 gcc_assert (GET_MODE (x) != VOIDmode);
4202 icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4204 if (icode == CODE_FOR_nothing)
4207 if (!(insn_data[icode].operand[0].predicate
4208 (x, insn_data[icode].operand[0].mode))
4209 || !(insn_data[icode].operand[1].predicate
4210 (x, insn_data[icode].operand[1].mode))
4211 || !(insn_data[icode].operand[2].predicate
4212 (y, insn_data[icode].operand[2].mode)))
4218 /* Generate the body of an instruction to copy Y into X.
4219 It may be a list of insns, if one insn isn't enough. */
4222 gen_move_insn (rtx x, rtx y)
4227 emit_move_insn_1 (x, y);
4233 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4234 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4235 no such operation exists, CODE_FOR_nothing will be returned. */
4238 can_extend_p (enum machine_mode to_mode, enum machine_mode from_mode,
4242 #ifdef HAVE_ptr_extend
4244 return CODE_FOR_ptr_extend;
4247 tab = unsignedp ? zext_optab : sext_optab;
4248 return tab->handlers[to_mode][from_mode].insn_code;
4251 /* Generate the body of an insn to extend Y (with mode MFROM)
4252 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4255 gen_extend_insn (rtx x, rtx y, enum machine_mode mto,
4256 enum machine_mode mfrom, int unsignedp)
4258 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
4259 return GEN_FCN (icode) (x, y);
4262 /* can_fix_p and can_float_p say whether the target machine
4263 can directly convert a given fixed point type to
4264 a given floating point type, or vice versa.
4265 The returned value is the CODE_FOR_... value to use,
4266 or CODE_FOR_nothing if these modes cannot be directly converted.
4268 *TRUNCP_PTR is set to 1 if it is necessary to output
4269 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4271 static enum insn_code
4272 can_fix_p (enum machine_mode fixmode, enum machine_mode fltmode,
4273 int unsignedp, int *truncp_ptr)
4276 enum insn_code icode;
4278 tab = unsignedp ? ufixtrunc_optab : sfixtrunc_optab;
4279 icode = tab->handlers[fixmode][fltmode].insn_code;
4280 if (icode != CODE_FOR_nothing)
4286 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4287 for this to work. We need to rework the fix* and ftrunc* patterns
4288 and documentation. */
4289 tab = unsignedp ? ufix_optab : sfix_optab;
4290 icode = tab->handlers[fixmode][fltmode].insn_code;
4291 if (icode != CODE_FOR_nothing
4292 && ftrunc_optab->handlers[fltmode].insn_code != CODE_FOR_nothing)
4299 return CODE_FOR_nothing;
4302 static enum insn_code
4303 can_float_p (enum machine_mode fltmode, enum machine_mode fixmode,
4308 tab = unsignedp ? ufloat_optab : sfloat_optab;
4309 return tab->handlers[fltmode][fixmode].insn_code;
4312 /* Generate code to convert FROM to floating point
4313 and store in TO. FROM must be fixed point and not VOIDmode.
4314 UNSIGNEDP nonzero means regard FROM as unsigned.
4315 Normally this is done by correcting the final value
4316 if it is negative. */
4319 expand_float (rtx to, rtx from, int unsignedp)
4321 enum insn_code icode;
4323 enum machine_mode fmode, imode;
4325 /* Crash now, because we won't be able to decide which mode to use. */
4326 gcc_assert (GET_MODE (from) != VOIDmode);
4328 /* Look for an insn to do the conversion. Do it in the specified
4329 modes if possible; otherwise convert either input, output or both to
4330 wider mode. If the integer mode is wider than the mode of FROM,
4331 we can do the conversion signed even if the input is unsigned. */
4333 for (fmode = GET_MODE (to); fmode != VOIDmode;
4334 fmode = GET_MODE_WIDER_MODE (fmode))
4335 for (imode = GET_MODE (from); imode != VOIDmode;
4336 imode = GET_MODE_WIDER_MODE (imode))
4338 int doing_unsigned = unsignedp;
4340 if (fmode != GET_MODE (to)
4341 && significand_size (fmode) < GET_MODE_BITSIZE (GET_MODE (from)))
4344 icode = can_float_p (fmode, imode, unsignedp);
4345 if (icode == CODE_FOR_nothing && imode != GET_MODE (from) && unsignedp)
4346 icode = can_float_p (fmode, imode, 0), doing_unsigned = 0;
4348 if (icode != CODE_FOR_nothing)
4350 if (imode != GET_MODE (from))
4351 from = convert_to_mode (imode, from, unsignedp);
4353 if (fmode != GET_MODE (to))
4354 target = gen_reg_rtx (fmode);
4356 emit_unop_insn (icode, target, from,
4357 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
4360 convert_move (to, target, 0);
4365 /* Unsigned integer, and no way to convert directly.
4366 Convert as signed, then conditionally adjust the result. */
4369 rtx label = gen_label_rtx ();
4371 REAL_VALUE_TYPE offset;
4373 /* Look for a usable floating mode FMODE wider than the source and at
4374 least as wide as the target. Using FMODE will avoid rounding woes
4375 with unsigned values greater than the signed maximum value. */
4377 for (fmode = GET_MODE (to); fmode != VOIDmode;
4378 fmode = GET_MODE_WIDER_MODE (fmode))
4379 if (GET_MODE_BITSIZE (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
4380 && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
4383 if (fmode == VOIDmode)
4385 /* There is no such mode. Pretend the target is wide enough. */
4386 fmode = GET_MODE (to);
4388 /* Avoid double-rounding when TO is narrower than FROM. */
4389 if ((significand_size (fmode) + 1)
4390 < GET_MODE_BITSIZE (GET_MODE (from)))
4393 rtx neglabel = gen_label_rtx ();
4395 /* Don't use TARGET if it isn't a register, is a hard register,
4396 or is the wrong mode. */
4398 || REGNO (target) < FIRST_PSEUDO_REGISTER
4399 || GET_MODE (target) != fmode)
4400 target = gen_reg_rtx (fmode);
4402 imode = GET_MODE (from);
4403 do_pending_stack_adjust ();
4405 /* Test whether the sign bit is set. */
4406 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
4409 /* The sign bit is not set. Convert as signed. */
4410 expand_float (target, from, 0);
4411 emit_jump_insn (gen_jump (label));
4414 /* The sign bit is set.
4415 Convert to a usable (positive signed) value by shifting right
4416 one bit, while remembering if a nonzero bit was shifted
4417 out; i.e., compute (from & 1) | (from >> 1). */
4419 emit_label (neglabel);
4420 temp = expand_binop (imode, and_optab, from, const1_rtx,
4421 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4422 temp1 = expand_shift (RSHIFT_EXPR, imode, from, integer_one_node,
4424 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
4426 expand_float (target, temp, 0);
4428 /* Multiply by 2 to undo the shift above. */
4429 temp = expand_binop (fmode, add_optab, target, target,
4430 target, 0, OPTAB_LIB_WIDEN);
4432 emit_move_insn (target, temp);
4434 do_pending_stack_adjust ();
4440 /* If we are about to do some arithmetic to correct for an
4441 unsigned operand, do it in a pseudo-register. */
4443 if (GET_MODE (to) != fmode
4444 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
4445 target = gen_reg_rtx (fmode);
4447 /* Convert as signed integer to floating. */
4448 expand_float (target, from, 0);
4450 /* If FROM is negative (and therefore TO is negative),
4451 correct its value by 2**bitwidth. */
4453 do_pending_stack_adjust ();
4454 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from),
4458 real_2expN (&offset, GET_MODE_BITSIZE (GET_MODE (from)));
4459 temp = expand_binop (fmode, add_optab, target,
4460 CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode),
4461 target, 0, OPTAB_LIB_WIDEN);
4463 emit_move_insn (target, temp);
4465 do_pending_stack_adjust ();
4470 /* No hardware instruction available; call a library routine. */
4475 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
4477 if (GET_MODE_SIZE (GET_MODE (from)) < GET_MODE_SIZE (SImode))
4478 from = convert_to_mode (SImode, from, unsignedp);
4480 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
4481 gcc_assert (libfunc);
4485 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4486 GET_MODE (to), 1, from,
4488 insns = get_insns ();
4491 emit_libcall_block (insns, target, value,
4492 gen_rtx_FLOAT (GET_MODE (to), from));
4497 /* Copy result to requested destination
4498 if we have been computing in a temp location. */
4502 if (GET_MODE (target) == GET_MODE (to))
4503 emit_move_insn (to, target);
4505 convert_move (to, target, 0);
4509 /* Generate code to convert FROM to fixed point and store in TO. FROM
4510 must be floating point. */
4513 expand_fix (rtx to, rtx from, int unsignedp)
4515 enum insn_code icode;
4517 enum machine_mode fmode, imode;
4520 /* We first try to find a pair of modes, one real and one integer, at
4521 least as wide as FROM and TO, respectively, in which we can open-code
4522 this conversion. If the integer mode is wider than the mode of TO,
4523 we can do the conversion either signed or unsigned. */
4525 for (fmode = GET_MODE (from); fmode != VOIDmode;
4526 fmode = GET_MODE_WIDER_MODE (fmode))
4527 for (imode = GET_MODE (to); imode != VOIDmode;
4528 imode = GET_MODE_WIDER_MODE (imode))
4530 int doing_unsigned = unsignedp;
4532 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
4533 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
4534 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
4536 if (icode != CODE_FOR_nothing)
4538 if (fmode != GET_MODE (from))
4539 from = convert_to_mode (fmode, from, 0);
4543 rtx temp = gen_reg_rtx (GET_MODE (from));
4544 from = expand_unop (GET_MODE (from), ftrunc_optab, from,
4548 if (imode != GET_MODE (to))
4549 target = gen_reg_rtx (imode);
4551 emit_unop_insn (icode, target, from,
4552 doing_unsigned ? UNSIGNED_FIX : FIX);
4554 convert_move (to, target, unsignedp);
4559 /* For an unsigned conversion, there is one more way to do it.
4560 If we have a signed conversion, we generate code that compares
4561 the real value to the largest representable positive number. If if
4562 is smaller, the conversion is done normally. Otherwise, subtract
4563 one plus the highest signed number, convert, and add it back.
4565 We only need to check all real modes, since we know we didn't find
4566 anything with a wider integer mode.
4568 This code used to extend FP value into mode wider than the destination.
4569 This is not needed. Consider, for instance conversion from SFmode
4572 The hot path trought the code is dealing with inputs smaller than 2^63
4573 and doing just the conversion, so there is no bits to lose.
4575 In the other path we know the value is positive in the range 2^63..2^64-1
4576 inclusive. (as for other imput overflow happens and result is undefined)
4577 So we know that the most important bit set in mantissa corresponds to
4578 2^63. The subtraction of 2^63 should not generate any rounding as it
4579 simply clears out that bit. The rest is trivial. */
4581 if (unsignedp && GET_MODE_BITSIZE (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
4582 for (fmode = GET_MODE (from); fmode != VOIDmode;
4583 fmode = GET_MODE_WIDER_MODE (fmode))
4584 if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0,
4588 REAL_VALUE_TYPE offset;
4589 rtx limit, lab1, lab2, insn;
4591 bitsize = GET_MODE_BITSIZE (GET_MODE (to));
4592 real_2expN (&offset, bitsize - 1);
4593 limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode);
4594 lab1 = gen_label_rtx ();
4595 lab2 = gen_label_rtx ();
4597 if (fmode != GET_MODE (from))
4598 from = convert_to_mode (fmode, from, 0);
4600 /* See if we need to do the subtraction. */
4601 do_pending_stack_adjust ();
4602 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from),
4605 /* If not, do the signed "fix" and branch around fixup code. */
4606 expand_fix (to, from, 0);
4607 emit_jump_insn (gen_jump (lab2));
4610 /* Otherwise, subtract 2**(N-1), convert to signed number,
4611 then add 2**(N-1). Do the addition using XOR since this
4612 will often generate better code. */
4614 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
4615 NULL_RTX, 0, OPTAB_LIB_WIDEN);
4616 expand_fix (to, target, 0);
4617 target = expand_binop (GET_MODE (to), xor_optab, to,
4619 ((HOST_WIDE_INT) 1 << (bitsize - 1),
4621 to, 1, OPTAB_LIB_WIDEN);
4624 emit_move_insn (to, target);
4628 if (mov_optab->handlers[(int) GET_MODE (to)].insn_code
4629 != CODE_FOR_nothing)
4631 /* Make a place for a REG_NOTE and add it. */
4632 insn = emit_move_insn (to, to);
4633 set_unique_reg_note (insn,
4635 gen_rtx_fmt_e (UNSIGNED_FIX,
4643 /* We can't do it with an insn, so use a library call. But first ensure
4644 that the mode of TO is at least as wide as SImode, since those are the
4645 only library calls we know about. */
4647 if (GET_MODE_SIZE (GET_MODE (to)) < GET_MODE_SIZE (SImode))
4649 target = gen_reg_rtx (SImode);
4651 expand_fix (target, from, unsignedp);
4659 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
4660 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
4661 gcc_assert (libfunc);
4665 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4666 GET_MODE (to), 1, from,
4668 insns = get_insns ();
4671 emit_libcall_block (insns, target, value,
4672 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
4673 GET_MODE (to), from));
4678 if (GET_MODE (to) == GET_MODE (target))
4679 emit_move_insn (to, target);
4681 convert_move (to, target, 0);
4685 /* Report whether we have an instruction to perform the operation
4686 specified by CODE on operands of mode MODE. */
4688 have_insn_for (enum rtx_code code, enum machine_mode mode)
4690 return (code_to_optab[(int) code] != 0
4691 && (code_to_optab[(int) code]->handlers[(int) mode].insn_code
4692 != CODE_FOR_nothing));
4695 /* Create a blank optab. */
4700 optab op = ggc_alloc (sizeof (struct optab));
4701 for (i = 0; i < NUM_MACHINE_MODES; i++)
4703 op->handlers[i].insn_code = CODE_FOR_nothing;
4704 op->handlers[i].libfunc = 0;
4710 static convert_optab
4711 new_convert_optab (void)
4714 convert_optab op = ggc_alloc (sizeof (struct convert_optab));
4715 for (i = 0; i < NUM_MACHINE_MODES; i++)
4716 for (j = 0; j < NUM_MACHINE_MODES; j++)
4718 op->handlers[i][j].insn_code = CODE_FOR_nothing;
4719 op->handlers[i][j].libfunc = 0;
4724 /* Same, but fill in its code as CODE, and write it into the
4725 code_to_optab table. */
4727 init_optab (enum rtx_code code)
4729 optab op = new_optab ();
4731 code_to_optab[(int) code] = op;
4735 /* Same, but fill in its code as CODE, and do _not_ write it into
4736 the code_to_optab table. */
4738 init_optabv (enum rtx_code code)
4740 optab op = new_optab ();
4745 /* Conversion optabs never go in the code_to_optab table. */
4746 static inline convert_optab
4747 init_convert_optab (enum rtx_code code)
4749 convert_optab op = new_convert_optab ();
4754 /* Initialize the libfunc fields of an entire group of entries in some
4755 optab. Each entry is set equal to a string consisting of a leading
4756 pair of underscores followed by a generic operation name followed by
4757 a mode name (downshifted to lowercase) followed by a single character
4758 representing the number of operands for the given operation (which is
4759 usually one of the characters '2', '3', or '4').
4761 OPTABLE is the table in which libfunc fields are to be initialized.
4762 FIRST_MODE is the first machine mode index in the given optab to
4764 LAST_MODE is the last machine mode index in the given optab to
4766 OPNAME is the generic (string) name of the operation.
4767 SUFFIX is the character which specifies the number of operands for
4768 the given generic operation.
4772 init_libfuncs (optab optable, int first_mode, int last_mode,
4773 const char *opname, int suffix)
4776 unsigned opname_len = strlen (opname);
4778 for (mode = first_mode; (int) mode <= (int) last_mode;
4779 mode = (enum machine_mode) ((int) mode + 1))
4781 const char *mname = GET_MODE_NAME (mode);
4782 unsigned mname_len = strlen (mname);
4783 char *libfunc_name = alloca (2 + opname_len + mname_len + 1 + 1);
4790 for (q = opname; *q; )
4792 for (q = mname; *q; q++)
4793 *p++ = TOLOWER (*q);
4797 optable->handlers[(int) mode].libfunc
4798 = init_one_libfunc (ggc_alloc_string (libfunc_name, p - libfunc_name));
4802 /* Initialize the libfunc fields of an entire group of entries in some
4803 optab which correspond to all integer mode operations. The parameters
4804 have the same meaning as similarly named ones for the `init_libfuncs'
4805 routine. (See above). */
4808 init_integral_libfuncs (optab optable, const char *opname, int suffix)
4810 int maxsize = 2*BITS_PER_WORD;
4811 if (maxsize < LONG_LONG_TYPE_SIZE)
4812 maxsize = LONG_LONG_TYPE_SIZE;
4813 init_libfuncs (optable, word_mode,
4814 mode_for_size (maxsize, MODE_INT, 0),
4818 /* Initialize the libfunc fields of an entire group of entries in some
4819 optab which correspond to all real mode operations. The parameters
4820 have the same meaning as similarly named ones for the `init_libfuncs'
4821 routine. (See above). */
4824 init_floating_libfuncs (optab optable, const char *opname, int suffix)
4826 init_libfuncs (optable, MIN_MODE_FLOAT, MAX_MODE_FLOAT, opname, suffix);
4829 /* Initialize the libfunc fields of an entire group of entries of an
4830 inter-mode-class conversion optab. The string formation rules are
4831 similar to the ones for init_libfuncs, above, but instead of having
4832 a mode name and an operand count these functions have two mode names
4833 and no operand count. */
4835 init_interclass_conv_libfuncs (convert_optab tab, const char *opname,
4836 enum mode_class from_class,
4837 enum mode_class to_class)
4839 enum machine_mode first_from_mode = GET_CLASS_NARROWEST_MODE (from_class);
4840 enum machine_mode first_to_mode = GET_CLASS_NARROWEST_MODE (to_class);
4841 size_t opname_len = strlen (opname);
4842 size_t max_mname_len = 0;
4844 enum machine_mode fmode, tmode;
4845 const char *fname, *tname;
4847 char *libfunc_name, *suffix;
4850 for (fmode = first_from_mode;
4852 fmode = GET_MODE_WIDER_MODE (fmode))
4853 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (fmode)));
4855 for (tmode = first_to_mode;
4857 tmode = GET_MODE_WIDER_MODE (tmode))
4858 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (tmode)));
4860 libfunc_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
4861 libfunc_name[0] = '_';
4862 libfunc_name[1] = '_';
4863 memcpy (&libfunc_name[2], opname, opname_len);
4864 suffix = libfunc_name + opname_len + 2;
4866 for (fmode = first_from_mode; fmode != VOIDmode;
4867 fmode = GET_MODE_WIDER_MODE (fmode))
4868 for (tmode = first_to_mode; tmode != VOIDmode;
4869 tmode = GET_MODE_WIDER_MODE (tmode))
4871 fname = GET_MODE_NAME (fmode);
4872 tname = GET_MODE_NAME (tmode);
4875 for (q = fname; *q; p++, q++)
4877 for (q = tname; *q; p++, q++)
4882 tab->handlers[tmode][fmode].libfunc
4883 = init_one_libfunc (ggc_alloc_string (libfunc_name,
4888 /* Initialize the libfunc fields of an entire group of entries of an
4889 intra-mode-class conversion optab. The string formation rules are
4890 similar to the ones for init_libfunc, above. WIDENING says whether
4891 the optab goes from narrow to wide modes or vice versa. These functions
4892 have two mode names _and_ an operand count. */
4894 init_intraclass_conv_libfuncs (convert_optab tab, const char *opname,
4895 enum mode_class class, bool widening)
4897 enum machine_mode first_mode = GET_CLASS_NARROWEST_MODE (class);
4898 size_t opname_len = strlen (opname);
4899 size_t max_mname_len = 0;
4901 enum machine_mode nmode, wmode;
4902 const char *nname, *wname;
4904 char *libfunc_name, *suffix;
4907 for (nmode = first_mode; nmode != VOIDmode;
4908 nmode = GET_MODE_WIDER_MODE (nmode))
4909 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (nmode)));
4911 libfunc_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
4912 libfunc_name[0] = '_';
4913 libfunc_name[1] = '_';
4914 memcpy (&libfunc_name[2], opname, opname_len);
4915 suffix = libfunc_name + opname_len + 2;
4917 for (nmode = first_mode; nmode != VOIDmode;
4918 nmode = GET_MODE_WIDER_MODE (nmode))
4919 for (wmode = GET_MODE_WIDER_MODE (nmode); wmode != VOIDmode;
4920 wmode = GET_MODE_WIDER_MODE (wmode))
4922 nname = GET_MODE_NAME (nmode);
4923 wname = GET_MODE_NAME (wmode);
4926 for (q = widening ? nname : wname; *q; p++, q++)
4928 for (q = widening ? wname : nname; *q; p++, q++)
4934 tab->handlers[widening ? wmode : nmode]
4935 [widening ? nmode : wmode].libfunc
4936 = init_one_libfunc (ggc_alloc_string (libfunc_name,
4943 init_one_libfunc (const char *name)
4947 /* Create a FUNCTION_DECL that can be passed to
4948 targetm.encode_section_info. */
4949 /* ??? We don't have any type information except for this is
4950 a function. Pretend this is "int foo()". */
4951 tree decl = build_decl (FUNCTION_DECL, get_identifier (name),
4952 build_function_type (integer_type_node, NULL_TREE));
4953 DECL_ARTIFICIAL (decl) = 1;
4954 DECL_EXTERNAL (decl) = 1;
4955 TREE_PUBLIC (decl) = 1;
4957 symbol = XEXP (DECL_RTL (decl), 0);
4959 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
4960 are the flags assigned by targetm.encode_section_info. */
4961 SYMBOL_REF_DECL (symbol) = 0;
4966 /* Call this to reset the function entry for one optab (OPTABLE) in mode
4967 MODE to NAME, which should be either 0 or a string constant. */
4969 set_optab_libfunc (optab optable, enum machine_mode mode, const char *name)
4972 optable->handlers[mode].libfunc = init_one_libfunc (name);
4974 optable->handlers[mode].libfunc = 0;
4977 /* Call this to reset the function entry for one conversion optab
4978 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
4979 either 0 or a string constant. */
4981 set_conv_libfunc (convert_optab optable, enum machine_mode tmode,
4982 enum machine_mode fmode, const char *name)
4985 optable->handlers[tmode][fmode].libfunc = init_one_libfunc (name);
4987 optable->handlers[tmode][fmode].libfunc = 0;
4990 /* Call this once to initialize the contents of the optabs
4991 appropriately for the current target machine. */
4998 /* Start by initializing all tables to contain CODE_FOR_nothing. */
5000 for (i = 0; i < NUM_RTX_CODE; i++)
5001 setcc_gen_code[i] = CODE_FOR_nothing;
5003 #ifdef HAVE_conditional_move
5004 for (i = 0; i < NUM_MACHINE_MODES; i++)
5005 movcc_gen_code[i] = CODE_FOR_nothing;
5008 for (i = 0; i < NUM_MACHINE_MODES; i++)
5010 vcond_gen_code[i] = CODE_FOR_nothing;
5011 vcondu_gen_code[i] = CODE_FOR_nothing;
5014 add_optab = init_optab (PLUS);
5015 addv_optab = init_optabv (PLUS);
5016 sub_optab = init_optab (MINUS);
5017 subv_optab = init_optabv (MINUS);
5018 smul_optab = init_optab (MULT);
5019 smulv_optab = init_optabv (MULT);
5020 smul_highpart_optab = init_optab (UNKNOWN);
5021 umul_highpart_optab = init_optab (UNKNOWN);
5022 smul_widen_optab = init_optab (UNKNOWN);
5023 umul_widen_optab = init_optab (UNKNOWN);
5024 sdiv_optab = init_optab (DIV);
5025 sdivv_optab = init_optabv (DIV);
5026 sdivmod_optab = init_optab (UNKNOWN);
5027 udiv_optab = init_optab (UDIV);
5028 udivmod_optab = init_optab (UNKNOWN);
5029 smod_optab = init_optab (MOD);
5030 umod_optab = init_optab (UMOD);
5031 fmod_optab = init_optab (UNKNOWN);
5032 drem_optab = init_optab (UNKNOWN);
5033 ftrunc_optab = init_optab (UNKNOWN);
5034 and_optab = init_optab (AND);
5035 ior_optab = init_optab (IOR);
5036 xor_optab = init_optab (XOR);
5037 ashl_optab = init_optab (ASHIFT);
5038 ashr_optab = init_optab (ASHIFTRT);
5039 lshr_optab = init_optab (LSHIFTRT);
5040 rotl_optab = init_optab (ROTATE);
5041 rotr_optab = init_optab (ROTATERT);
5042 smin_optab = init_optab (SMIN);
5043 smax_optab = init_optab (SMAX);
5044 umin_optab = init_optab (UMIN);
5045 umax_optab = init_optab (UMAX);
5046 pow_optab = init_optab (UNKNOWN);
5047 atan2_optab = init_optab (UNKNOWN);
5049 /* These three have codes assigned exclusively for the sake of
5051 mov_optab = init_optab (SET);
5052 movstrict_optab = init_optab (STRICT_LOW_PART);
5053 cmp_optab = init_optab (COMPARE);
5055 ucmp_optab = init_optab (UNKNOWN);
5056 tst_optab = init_optab (UNKNOWN);
5058 eq_optab = init_optab (EQ);
5059 ne_optab = init_optab (NE);
5060 gt_optab = init_optab (GT);
5061 ge_optab = init_optab (GE);
5062 lt_optab = init_optab (LT);
5063 le_optab = init_optab (LE);
5064 unord_optab = init_optab (UNORDERED);
5066 neg_optab = init_optab (NEG);
5067 negv_optab = init_optabv (NEG);
5068 abs_optab = init_optab (ABS);
5069 absv_optab = init_optabv (ABS);
5070 addcc_optab = init_optab (UNKNOWN);
5071 one_cmpl_optab = init_optab (NOT);
5072 ffs_optab = init_optab (FFS);
5073 clz_optab = init_optab (CLZ);
5074 ctz_optab = init_optab (CTZ);
5075 popcount_optab = init_optab (POPCOUNT);
5076 parity_optab = init_optab (PARITY);
5077 sqrt_optab = init_optab (SQRT);
5078 floor_optab = init_optab (UNKNOWN);
5079 lfloor_optab = init_optab (UNKNOWN);
5080 ceil_optab = init_optab (UNKNOWN);
5081 lceil_optab = init_optab (UNKNOWN);
5082 round_optab = init_optab (UNKNOWN);
5083 btrunc_optab = init_optab (UNKNOWN);
5084 nearbyint_optab = init_optab (UNKNOWN);
5085 rint_optab = init_optab (UNKNOWN);
5086 lrint_optab = init_optab (UNKNOWN);
5087 sincos_optab = init_optab (UNKNOWN);
5088 sin_optab = init_optab (UNKNOWN);
5089 asin_optab = init_optab (UNKNOWN);
5090 cos_optab = init_optab (UNKNOWN);
5091 acos_optab = init_optab (UNKNOWN);
5092 exp_optab = init_optab (UNKNOWN);
5093 exp10_optab = init_optab (UNKNOWN);
5094 exp2_optab = init_optab (UNKNOWN);
5095 expm1_optab = init_optab (UNKNOWN);
5096 ldexp_optab = init_optab (UNKNOWN);
5097 logb_optab = init_optab (UNKNOWN);
5098 ilogb_optab = init_optab (UNKNOWN);
5099 log_optab = init_optab (UNKNOWN);
5100 log10_optab = init_optab (UNKNOWN);
5101 log2_optab = init_optab (UNKNOWN);
5102 log1p_optab = init_optab (UNKNOWN);
5103 tan_optab = init_optab (UNKNOWN);
5104 atan_optab = init_optab (UNKNOWN);
5105 copysign_optab = init_optab (UNKNOWN);
5107 strlen_optab = init_optab (UNKNOWN);
5108 cbranch_optab = init_optab (UNKNOWN);
5109 cmov_optab = init_optab (UNKNOWN);
5110 cstore_optab = init_optab (UNKNOWN);
5111 push_optab = init_optab (UNKNOWN);
5113 reduc_smax_optab = init_optab (UNKNOWN);
5114 reduc_umax_optab = init_optab (UNKNOWN);
5115 reduc_smin_optab = init_optab (UNKNOWN);
5116 reduc_umin_optab = init_optab (UNKNOWN);
5117 reduc_splus_optab = init_optab (UNKNOWN);
5118 reduc_uplus_optab = init_optab (UNKNOWN);
5120 vec_extract_optab = init_optab (UNKNOWN);
5121 vec_set_optab = init_optab (UNKNOWN);
5122 vec_init_optab = init_optab (UNKNOWN);
5123 vec_shl_optab = init_optab (UNKNOWN);
5124 vec_shr_optab = init_optab (UNKNOWN);
5125 vec_realign_load_optab = init_optab (UNKNOWN);
5126 movmisalign_optab = init_optab (UNKNOWN);
5128 powi_optab = init_optab (UNKNOWN);
5131 sext_optab = init_convert_optab (SIGN_EXTEND);
5132 zext_optab = init_convert_optab (ZERO_EXTEND);
5133 trunc_optab = init_convert_optab (TRUNCATE);
5134 sfix_optab = init_convert_optab (FIX);
5135 ufix_optab = init_convert_optab (UNSIGNED_FIX);
5136 sfixtrunc_optab = init_convert_optab (UNKNOWN);
5137 ufixtrunc_optab = init_convert_optab (UNKNOWN);
5138 sfloat_optab = init_convert_optab (FLOAT);
5139 ufloat_optab = init_convert_optab (UNSIGNED_FLOAT);
5141 for (i = 0; i < NUM_MACHINE_MODES; i++)
5143 movmem_optab[i] = CODE_FOR_nothing;
5144 cmpstr_optab[i] = CODE_FOR_nothing;
5145 cmpstrn_optab[i] = CODE_FOR_nothing;
5146 cmpmem_optab[i] = CODE_FOR_nothing;
5147 setmem_optab[i] = CODE_FOR_nothing;
5149 sync_add_optab[i] = CODE_FOR_nothing;
5150 sync_sub_optab[i] = CODE_FOR_nothing;
5151 sync_ior_optab[i] = CODE_FOR_nothing;
5152 sync_and_optab[i] = CODE_FOR_nothing;
5153 sync_xor_optab[i] = CODE_FOR_nothing;
5154 sync_nand_optab[i] = CODE_FOR_nothing;
5155 sync_old_add_optab[i] = CODE_FOR_nothing;
5156 sync_old_sub_optab[i] = CODE_FOR_nothing;
5157 sync_old_ior_optab[i] = CODE_FOR_nothing;
5158 sync_old_and_optab[i] = CODE_FOR_nothing;
5159 sync_old_xor_optab[i] = CODE_FOR_nothing;
5160 sync_old_nand_optab[i] = CODE_FOR_nothing;
5161 sync_new_add_optab[i] = CODE_FOR_nothing;
5162 sync_new_sub_optab[i] = CODE_FOR_nothing;
5163 sync_new_ior_optab[i] = CODE_FOR_nothing;
5164 sync_new_and_optab[i] = CODE_FOR_nothing;
5165 sync_new_xor_optab[i] = CODE_FOR_nothing;
5166 sync_new_nand_optab[i] = CODE_FOR_nothing;
5167 sync_compare_and_swap[i] = CODE_FOR_nothing;
5168 sync_compare_and_swap_cc[i] = CODE_FOR_nothing;
5169 sync_lock_test_and_set[i] = CODE_FOR_nothing;
5170 sync_lock_release[i] = CODE_FOR_nothing;
5172 #ifdef HAVE_SECONDARY_RELOADS
5173 reload_in_optab[i] = reload_out_optab[i] = CODE_FOR_nothing;
5177 /* Fill in the optabs with the insns we support. */
5180 /* Initialize the optabs with the names of the library functions. */
5181 init_integral_libfuncs (add_optab, "add", '3');
5182 init_floating_libfuncs (add_optab, "add", '3');
5183 init_integral_libfuncs (addv_optab, "addv", '3');
5184 init_floating_libfuncs (addv_optab, "add", '3');
5185 init_integral_libfuncs (sub_optab, "sub", '3');
5186 init_floating_libfuncs (sub_optab, "sub", '3');
5187 init_integral_libfuncs (subv_optab, "subv", '3');
5188 init_floating_libfuncs (subv_optab, "sub", '3');
5189 init_integral_libfuncs (smul_optab, "mul", '3');
5190 init_floating_libfuncs (smul_optab, "mul", '3');
5191 init_integral_libfuncs (smulv_optab, "mulv", '3');
5192 init_floating_libfuncs (smulv_optab, "mul", '3');
5193 init_integral_libfuncs (sdiv_optab, "div", '3');
5194 init_floating_libfuncs (sdiv_optab, "div", '3');
5195 init_integral_libfuncs (sdivv_optab, "divv", '3');
5196 init_integral_libfuncs (udiv_optab, "udiv", '3');
5197 init_integral_libfuncs (sdivmod_optab, "divmod", '4');
5198 init_integral_libfuncs (udivmod_optab, "udivmod", '4');
5199 init_integral_libfuncs (smod_optab, "mod", '3');
5200 init_integral_libfuncs (umod_optab, "umod", '3');
5201 init_floating_libfuncs (ftrunc_optab, "ftrunc", '2');
5202 init_integral_libfuncs (and_optab, "and", '3');
5203 init_integral_libfuncs (ior_optab, "ior", '3');
5204 init_integral_libfuncs (xor_optab, "xor", '3');
5205 init_integral_libfuncs (ashl_optab, "ashl", '3');
5206 init_integral_libfuncs (ashr_optab, "ashr", '3');
5207 init_integral_libfuncs (lshr_optab, "lshr", '3');
5208 init_integral_libfuncs (smin_optab, "min", '3');
5209 init_floating_libfuncs (smin_optab, "min", '3');
5210 init_integral_libfuncs (smax_optab, "max", '3');
5211 init_floating_libfuncs (smax_optab, "max", '3');
5212 init_integral_libfuncs (umin_optab, "umin", '3');
5213 init_integral_libfuncs (umax_optab, "umax", '3');
5214 init_integral_libfuncs (neg_optab, "neg", '2');
5215 init_floating_libfuncs (neg_optab, "neg", '2');
5216 init_integral_libfuncs (negv_optab, "negv", '2');
5217 init_floating_libfuncs (negv_optab, "neg", '2');
5218 init_integral_libfuncs (one_cmpl_optab, "one_cmpl", '2');
5219 init_integral_libfuncs (ffs_optab, "ffs", '2');
5220 init_integral_libfuncs (clz_optab, "clz", '2');
5221 init_integral_libfuncs (ctz_optab, "ctz", '2');
5222 init_integral_libfuncs (popcount_optab, "popcount", '2');
5223 init_integral_libfuncs (parity_optab, "parity", '2');
5225 /* Comparison libcalls for integers MUST come in pairs,
5227 init_integral_libfuncs (cmp_optab, "cmp", '2');
5228 init_integral_libfuncs (ucmp_optab, "ucmp", '2');
5229 init_floating_libfuncs (cmp_optab, "cmp", '2');
5231 /* EQ etc are floating point only. */
5232 init_floating_libfuncs (eq_optab, "eq", '2');
5233 init_floating_libfuncs (ne_optab, "ne", '2');
5234 init_floating_libfuncs (gt_optab, "gt", '2');
5235 init_floating_libfuncs (ge_optab, "ge", '2');
5236 init_floating_libfuncs (lt_optab, "lt", '2');
5237 init_floating_libfuncs (le_optab, "le", '2');
5238 init_floating_libfuncs (unord_optab, "unord", '2');
5240 init_floating_libfuncs (powi_optab, "powi", '2');
5243 init_interclass_conv_libfuncs (sfloat_optab, "float",
5244 MODE_INT, MODE_FLOAT);
5245 init_interclass_conv_libfuncs (sfix_optab, "fix",
5246 MODE_FLOAT, MODE_INT);
5247 init_interclass_conv_libfuncs (ufix_optab, "fixuns",
5248 MODE_FLOAT, MODE_INT);
5250 /* sext_optab is also used for FLOAT_EXTEND. */
5251 init_intraclass_conv_libfuncs (sext_optab, "extend", MODE_FLOAT, true);
5252 init_intraclass_conv_libfuncs (trunc_optab, "trunc", MODE_FLOAT, false);
5254 /* Use cabs for double complex abs, since systems generally have cabs.
5255 Don't define any libcall for float complex, so that cabs will be used. */
5256 if (complex_double_type_node)
5257 abs_optab->handlers[TYPE_MODE (complex_double_type_node)].libfunc
5258 = init_one_libfunc ("cabs");
5260 /* The ffs function operates on `int'. */
5261 ffs_optab->handlers[(int) mode_for_size (INT_TYPE_SIZE, MODE_INT, 0)].libfunc
5262 = init_one_libfunc ("ffs");
5264 abort_libfunc = init_one_libfunc ("abort");
5265 memcpy_libfunc = init_one_libfunc ("memcpy");
5266 memmove_libfunc = init_one_libfunc ("memmove");
5267 memcmp_libfunc = init_one_libfunc ("memcmp");
5268 memset_libfunc = init_one_libfunc ("memset");
5269 setbits_libfunc = init_one_libfunc ("__setbits");
5271 #ifndef DONT_USE_BUILTIN_SETJMP
5272 setjmp_libfunc = init_one_libfunc ("__builtin_setjmp");
5273 longjmp_libfunc = init_one_libfunc ("__builtin_longjmp");
5275 setjmp_libfunc = init_one_libfunc ("setjmp");
5276 longjmp_libfunc = init_one_libfunc ("longjmp");
5278 unwind_sjlj_register_libfunc = init_one_libfunc ("_Unwind_SjLj_Register");
5279 unwind_sjlj_unregister_libfunc
5280 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5282 /* For function entry/exit instrumentation. */
5283 profile_function_entry_libfunc
5284 = init_one_libfunc ("__cyg_profile_func_enter");
5285 profile_function_exit_libfunc
5286 = init_one_libfunc ("__cyg_profile_func_exit");
5288 gcov_flush_libfunc = init_one_libfunc ("__gcov_flush");
5290 if (HAVE_conditional_trap)
5291 trap_rtx = gen_rtx_fmt_ee (EQ, VOIDmode, NULL_RTX, NULL_RTX);
5293 /* Allow the target to add more libcalls or rename some, etc. */
5294 targetm.init_libfuncs ();
5299 /* Print information about the current contents of the optabs on
5303 debug_optab_libfuncs (void)
5309 /* Dump the arithmetic optabs. */
5310 for (i = 0; i != (int) OTI_MAX; i++)
5311 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5314 struct optab_handlers *h;
5317 h = &o->handlers[j];
5320 gcc_assert (GET_CODE (h->libfunc) = SYMBOL_REF);
5321 fprintf (stderr, "%s\t%s:\t%s\n",
5322 GET_RTX_NAME (o->code),
5324 XSTR (h->libfunc, 0));
5328 /* Dump the conversion optabs. */
5329 for (i = 0; i < (int) COI_MAX; ++i)
5330 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5331 for (k = 0; k < NUM_MACHINE_MODES; ++k)
5334 struct optab_handlers *h;
5336 o = &convert_optab_table[i];
5337 h = &o->handlers[j][k];
5340 gcc_assert (GET_CODE (h->libfunc) = SYMBOL_REF);
5341 fprintf (stderr, "%s\t%s\t%s:\t%s\n",
5342 GET_RTX_NAME (o->code),
5345 XSTR (h->libfunc, 0));
5353 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5354 CODE. Return 0 on failure. */
5357 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED, rtx op1,
5358 rtx op2 ATTRIBUTE_UNUSED, rtx tcode ATTRIBUTE_UNUSED)
5360 enum machine_mode mode = GET_MODE (op1);
5361 enum insn_code icode;
5364 if (!HAVE_conditional_trap)
5367 if (mode == VOIDmode)
5370 icode = cmp_optab->handlers[(int) mode].insn_code;
5371 if (icode == CODE_FOR_nothing)
5375 op1 = prepare_operand (icode, op1, 0, mode, mode, 0);
5376 op2 = prepare_operand (icode, op2, 1, mode, mode, 0);
5382 emit_insn (GEN_FCN (icode) (op1, op2));
5384 PUT_CODE (trap_rtx, code);
5385 gcc_assert (HAVE_conditional_trap);
5386 insn = gen_conditional_trap (trap_rtx, tcode);
5390 insn = get_insns ();
5397 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5398 or unsigned operation code. */
5400 static enum rtx_code
5401 get_rtx_code (enum tree_code tcode, bool unsignedp)
5413 code = unsignedp ? LTU : LT;
5416 code = unsignedp ? LEU : LE;
5419 code = unsignedp ? GTU : GT;
5422 code = unsignedp ? GEU : GE;
5425 case UNORDERED_EXPR:
5456 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
5457 unsigned operators. Do not generate compare instruction. */
5460 vector_compare_rtx (tree cond, bool unsignedp, enum insn_code icode)
5462 enum rtx_code rcode;
5464 rtx rtx_op0, rtx_op1;
5466 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
5467 ensures that condition is a relational operation. */
5468 gcc_assert (COMPARISON_CLASS_P (cond));
5470 rcode = get_rtx_code (TREE_CODE (cond), unsignedp);
5471 t_op0 = TREE_OPERAND (cond, 0);
5472 t_op1 = TREE_OPERAND (cond, 1);
5474 /* Expand operands. */
5475 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)), 1);
5476 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)), 1);
5478 if (!insn_data[icode].operand[4].predicate (rtx_op0, GET_MODE (rtx_op0))
5479 && GET_MODE (rtx_op0) != VOIDmode)
5480 rtx_op0 = force_reg (GET_MODE (rtx_op0), rtx_op0);
5482 if (!insn_data[icode].operand[5].predicate (rtx_op1, GET_MODE (rtx_op1))
5483 && GET_MODE (rtx_op1) != VOIDmode)
5484 rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1);
5486 return gen_rtx_fmt_ee (rcode, VOIDmode, rtx_op0, rtx_op1);
5489 /* Return insn code for VEC_COND_EXPR EXPR. */
5491 static inline enum insn_code
5492 get_vcond_icode (tree expr, enum machine_mode mode)
5494 enum insn_code icode = CODE_FOR_nothing;
5496 if (TYPE_UNSIGNED (TREE_TYPE (expr)))
5497 icode = vcondu_gen_code[mode];
5499 icode = vcond_gen_code[mode];
5503 /* Return TRUE iff, appropriate vector insns are available
5504 for vector cond expr expr in VMODE mode. */
5507 expand_vec_cond_expr_p (tree expr, enum machine_mode vmode)
5509 if (get_vcond_icode (expr, vmode) == CODE_FOR_nothing)
5514 /* Generate insns for VEC_COND_EXPR. */
5517 expand_vec_cond_expr (tree vec_cond_expr, rtx target)
5519 enum insn_code icode;
5520 rtx comparison, rtx_op1, rtx_op2, cc_op0, cc_op1;
5521 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_cond_expr));
5522 bool unsignedp = TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr));
5524 icode = get_vcond_icode (vec_cond_expr, mode);
5525 if (icode == CODE_FOR_nothing)
5528 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
5529 target = gen_reg_rtx (mode);
5531 /* Get comparison rtx. First expand both cond expr operands. */
5532 comparison = vector_compare_rtx (TREE_OPERAND (vec_cond_expr, 0),
5534 cc_op0 = XEXP (comparison, 0);
5535 cc_op1 = XEXP (comparison, 1);
5536 /* Expand both operands and force them in reg, if required. */
5537 rtx_op1 = expand_expr (TREE_OPERAND (vec_cond_expr, 1),
5538 NULL_RTX, VOIDmode, 1);
5539 if (!insn_data[icode].operand[1].predicate (rtx_op1, mode)
5540 && mode != VOIDmode)
5541 rtx_op1 = force_reg (mode, rtx_op1);
5543 rtx_op2 = expand_expr (TREE_OPERAND (vec_cond_expr, 2),
5544 NULL_RTX, VOIDmode, 1);
5545 if (!insn_data[icode].operand[2].predicate (rtx_op2, mode)
5546 && mode != VOIDmode)
5547 rtx_op2 = force_reg (mode, rtx_op2);
5549 /* Emit instruction! */
5550 emit_insn (GEN_FCN (icode) (target, rtx_op1, rtx_op2,
5551 comparison, cc_op0, cc_op1));
5557 /* This is an internal subroutine of the other compare_and_swap expanders.
5558 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
5559 operation. TARGET is an optional place to store the value result of
5560 the operation. ICODE is the particular instruction to expand. Return
5561 the result of the operation. */
5564 expand_val_compare_and_swap_1 (rtx mem, rtx old_val, rtx new_val,
5565 rtx target, enum insn_code icode)
5567 enum machine_mode mode = GET_MODE (mem);
5570 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
5571 target = gen_reg_rtx (mode);
5573 if (GET_MODE (old_val) != VOIDmode && GET_MODE (old_val) != mode)
5574 old_val = convert_modes (mode, GET_MODE (old_val), old_val, 1);
5575 if (!insn_data[icode].operand[2].predicate (old_val, mode))
5576 old_val = force_reg (mode, old_val);
5578 if (GET_MODE (new_val) != VOIDmode && GET_MODE (new_val) != mode)
5579 new_val = convert_modes (mode, GET_MODE (new_val), new_val, 1);
5580 if (!insn_data[icode].operand[3].predicate (new_val, mode))
5581 new_val = force_reg (mode, new_val);
5583 insn = GEN_FCN (icode) (target, mem, old_val, new_val);
5584 if (insn == NULL_RTX)
5591 /* Expand a compare-and-swap operation and return its value. */
5594 expand_val_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
5596 enum machine_mode mode = GET_MODE (mem);
5597 enum insn_code icode = sync_compare_and_swap[mode];
5599 if (icode == CODE_FOR_nothing)
5602 return expand_val_compare_and_swap_1 (mem, old_val, new_val, target, icode);
5605 /* Expand a compare-and-swap operation and store true into the result if
5606 the operation was successful and false otherwise. Return the result.
5607 Unlike other routines, TARGET is not optional. */
5610 expand_bool_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
5612 enum machine_mode mode = GET_MODE (mem);
5613 enum insn_code icode;
5614 rtx subtarget, label0, label1;
5616 /* If the target supports a compare-and-swap pattern that simultaneously
5617 sets some flag for success, then use it. Otherwise use the regular
5618 compare-and-swap and follow that immediately with a compare insn. */
5619 icode = sync_compare_and_swap_cc[mode];
5623 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
5625 if (subtarget != NULL_RTX)
5629 case CODE_FOR_nothing:
5630 icode = sync_compare_and_swap[mode];
5631 if (icode == CODE_FOR_nothing)
5634 /* Ensure that if old_val == mem, that we're not comparing
5635 against an old value. */
5636 if (MEM_P (old_val))
5637 old_val = force_reg (mode, old_val);
5639 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
5641 if (subtarget == NULL_RTX)
5644 emit_cmp_insn (subtarget, old_val, EQ, const0_rtx, mode, true);
5647 /* If the target has a sane STORE_FLAG_VALUE, then go ahead and use a
5648 setcc instruction from the beginning. We don't work too hard here,
5649 but it's nice to not be stupid about initial code gen either. */
5650 if (STORE_FLAG_VALUE == 1)
5652 icode = setcc_gen_code[EQ];
5653 if (icode != CODE_FOR_nothing)
5655 enum machine_mode cmode = insn_data[icode].operand[0].mode;
5659 if (!insn_data[icode].operand[0].predicate (target, cmode))
5660 subtarget = gen_reg_rtx (cmode);
5662 insn = GEN_FCN (icode) (subtarget);
5666 if (GET_MODE (target) != GET_MODE (subtarget))
5668 convert_move (target, subtarget, 1);
5676 /* Without an appropriate setcc instruction, use a set of branches to
5677 get 1 and 0 stored into target. Presumably if the target has a
5678 STORE_FLAG_VALUE that isn't 1, then this will get cleaned up by ifcvt. */
5680 label0 = gen_label_rtx ();
5681 label1 = gen_label_rtx ();
5683 emit_jump_insn (bcc_gen_fctn[EQ] (label0));
5684 emit_move_insn (target, const0_rtx);
5685 emit_jump_insn (gen_jump (label1));
5687 emit_label (label0);
5688 emit_move_insn (target, const1_rtx);
5689 emit_label (label1);
5694 /* This is a helper function for the other atomic operations. This function
5695 emits a loop that contains SEQ that iterates until a compare-and-swap
5696 operation at the end succeeds. MEM is the memory to be modified. SEQ is
5697 a set of instructions that takes a value from OLD_REG as an input and
5698 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
5699 set to the current contents of MEM. After SEQ, a compare-and-swap will
5700 attempt to update MEM with NEW_REG. The function returns true when the
5701 loop was generated successfully. */
5704 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
5706 enum machine_mode mode = GET_MODE (mem);
5707 enum insn_code icode;
5708 rtx label, cmp_reg, subtarget;
5710 /* The loop we want to generate looks like
5716 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
5717 if (cmp_reg != old_reg)
5720 Note that we only do the plain load from memory once. Subsequent
5721 iterations use the value loaded by the compare-and-swap pattern. */
5723 label = gen_label_rtx ();
5724 cmp_reg = gen_reg_rtx (mode);
5726 emit_move_insn (cmp_reg, mem);
5728 emit_move_insn (old_reg, cmp_reg);
5732 /* If the target supports a compare-and-swap pattern that simultaneously
5733 sets some flag for success, then use it. Otherwise use the regular
5734 compare-and-swap and follow that immediately with a compare insn. */
5735 icode = sync_compare_and_swap_cc[mode];
5739 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
5741 if (subtarget != NULL_RTX)
5743 gcc_assert (subtarget == cmp_reg);
5748 case CODE_FOR_nothing:
5749 icode = sync_compare_and_swap[mode];
5750 if (icode == CODE_FOR_nothing)
5753 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
5755 if (subtarget == NULL_RTX)
5757 if (subtarget != cmp_reg)
5758 emit_move_insn (cmp_reg, subtarget);
5760 emit_cmp_insn (cmp_reg, old_reg, EQ, const0_rtx, mode, true);
5763 /* ??? Mark this jump predicted not taken? */
5764 emit_jump_insn (bcc_gen_fctn[NE] (label));
5769 /* This function generates the atomic operation MEM CODE= VAL. In this
5770 case, we do not care about any resulting value. Returns NULL if we
5771 cannot generate the operation. */
5774 expand_sync_operation (rtx mem, rtx val, enum rtx_code code)
5776 enum machine_mode mode = GET_MODE (mem);
5777 enum insn_code icode;
5780 /* Look to see if the target supports the operation directly. */
5784 icode = sync_add_optab[mode];
5787 icode = sync_ior_optab[mode];
5790 icode = sync_xor_optab[mode];
5793 icode = sync_and_optab[mode];
5796 icode = sync_nand_optab[mode];
5800 icode = sync_sub_optab[mode];
5801 if (icode == CODE_FOR_nothing)
5803 icode = sync_add_optab[mode];
5804 if (icode != CODE_FOR_nothing)
5806 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
5816 /* Generate the direct operation, if present. */
5817 if (icode != CODE_FOR_nothing)
5819 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
5820 val = convert_modes (mode, GET_MODE (val), val, 1);
5821 if (!insn_data[icode].operand[1].predicate (val, mode))
5822 val = force_reg (mode, val);
5824 insn = GEN_FCN (icode) (mem, val);
5832 /* Failing that, generate a compare-and-swap loop in which we perform the
5833 operation with normal arithmetic instructions. */
5834 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
5836 rtx t0 = gen_reg_rtx (mode), t1;
5843 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
5846 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
5847 true, OPTAB_LIB_WIDEN);
5849 insn = get_insns ();
5852 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
5859 /* This function generates the atomic operation MEM CODE= VAL. In this
5860 case, we do care about the resulting value: if AFTER is true then
5861 return the value MEM holds after the operation, if AFTER is false
5862 then return the value MEM holds before the operation. TARGET is an
5863 optional place for the result value to be stored. */
5866 expand_sync_fetch_operation (rtx mem, rtx val, enum rtx_code code,
5867 bool after, rtx target)
5869 enum machine_mode mode = GET_MODE (mem);
5870 enum insn_code old_code, new_code, icode;
5874 /* Look to see if the target supports the operation directly. */
5878 old_code = sync_old_add_optab[mode];
5879 new_code = sync_new_add_optab[mode];
5882 old_code = sync_old_ior_optab[mode];
5883 new_code = sync_new_ior_optab[mode];
5886 old_code = sync_old_xor_optab[mode];
5887 new_code = sync_new_xor_optab[mode];
5890 old_code = sync_old_and_optab[mode];
5891 new_code = sync_new_and_optab[mode];
5894 old_code = sync_old_nand_optab[mode];
5895 new_code = sync_new_nand_optab[mode];
5899 old_code = sync_old_sub_optab[mode];
5900 new_code = sync_new_sub_optab[mode];
5901 if (old_code == CODE_FOR_nothing && new_code == CODE_FOR_nothing)
5903 old_code = sync_old_add_optab[mode];
5904 new_code = sync_new_add_optab[mode];
5905 if (old_code != CODE_FOR_nothing || new_code != CODE_FOR_nothing)
5907 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
5917 /* If the target does supports the proper new/old operation, great. But
5918 if we only support the opposite old/new operation, check to see if we
5919 can compensate. In the case in which the old value is supported, then
5920 we can always perform the operation again with normal arithmetic. In
5921 the case in which the new value is supported, then we can only handle
5922 this in the case the operation is reversible. */
5927 if (icode == CODE_FOR_nothing)
5930 if (icode != CODE_FOR_nothing)
5937 if (icode == CODE_FOR_nothing
5938 && (code == PLUS || code == MINUS || code == XOR))
5941 if (icode != CODE_FOR_nothing)
5946 /* If we found something supported, great. */
5947 if (icode != CODE_FOR_nothing)
5949 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
5950 target = gen_reg_rtx (mode);
5952 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
5953 val = convert_modes (mode, GET_MODE (val), val, 1);
5954 if (!insn_data[icode].operand[2].predicate (val, mode))
5955 val = force_reg (mode, val);
5957 insn = GEN_FCN (icode) (target, mem, val);
5962 /* If we need to compensate for using an operation with the
5963 wrong return value, do so now. */
5970 else if (code == MINUS)
5975 target = expand_simple_unop (mode, NOT, target, NULL_RTX, true);
5976 target = expand_simple_binop (mode, code, target, val, NULL_RTX,
5977 true, OPTAB_LIB_WIDEN);
5984 /* Failing that, generate a compare-and-swap loop in which we perform the
5985 operation with normal arithmetic instructions. */
5986 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
5988 rtx t0 = gen_reg_rtx (mode), t1;
5990 if (!target || !register_operand (target, mode))
5991 target = gen_reg_rtx (mode);
5996 emit_move_insn (target, t0);
6000 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
6003 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
6004 true, OPTAB_LIB_WIDEN);
6006 emit_move_insn (target, t1);
6008 insn = get_insns ();
6011 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
6018 /* This function expands a test-and-set operation. Ideally we atomically
6019 store VAL in MEM and return the previous value in MEM. Some targets
6020 may not support this operation and only support VAL with the constant 1;
6021 in this case while the return value will be 0/1, but the exact value
6022 stored in MEM is target defined. TARGET is an option place to stick
6023 the return value. */
6026 expand_sync_lock_test_and_set (rtx mem, rtx val, rtx target)
6028 enum machine_mode mode = GET_MODE (mem);
6029 enum insn_code icode;
6032 /* If the target supports the test-and-set directly, great. */
6033 icode = sync_lock_test_and_set[mode];
6034 if (icode != CODE_FOR_nothing)
6036 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6037 target = gen_reg_rtx (mode);
6039 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6040 val = convert_modes (mode, GET_MODE (val), val, 1);
6041 if (!insn_data[icode].operand[2].predicate (val, mode))
6042 val = force_reg (mode, val);
6044 insn = GEN_FCN (icode) (target, mem, val);
6052 /* Otherwise, use a compare-and-swap loop for the exchange. */
6053 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6055 if (!target || !register_operand (target, mode))
6056 target = gen_reg_rtx (mode);
6057 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6058 val = convert_modes (mode, GET_MODE (val), val, 1);
6059 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
6066 #include "gt-optabs.h"