1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
25 #include "coretypes.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
45 #include "basic-block.h"
48 /* Each optab contains info on how this target machine
49 can perform a particular operation
50 for all sizes and kinds of operands.
52 The operation to be performed is often specified
53 by passing one of these optabs as an argument.
55 See expr.h for documentation of these optabs. */
57 optab optab_table[OTI_MAX];
59 rtx libfunc_table[LTI_MAX];
61 /* Tables of patterns for converting one mode to another. */
62 convert_optab convert_optab_table[CTI_MAX];
64 /* Contains the optab used for each rtx code. */
65 optab code_to_optab[NUM_RTX_CODE + 1];
67 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
68 gives the gen_function to make a branch to test that condition. */
70 rtxfun bcc_gen_fctn[NUM_RTX_CODE];
72 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
73 gives the insn code to make a store-condition insn
74 to test that condition. */
76 enum insn_code setcc_gen_code[NUM_RTX_CODE];
78 #ifdef HAVE_conditional_move
79 /* Indexed by the machine mode, gives the insn code to make a conditional
80 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
81 setcc_gen_code to cut down on the number of named patterns. Consider a day
82 when a lot more rtx codes are conditional (eg: for the ARM). */
84 enum insn_code movcc_gen_code[NUM_MACHINE_MODES];
87 /* Indexed by the machine mode, gives the insn code for vector conditional
90 enum insn_code vcond_gen_code[NUM_MACHINE_MODES];
91 enum insn_code vcondu_gen_code[NUM_MACHINE_MODES];
93 /* The insn generating function can not take an rtx_code argument.
94 TRAP_RTX is used as an rtx argument. Its code is replaced with
95 the code to be used in the trap insn and all other fields are ignored. */
96 static GTY(()) rtx trap_rtx;
98 static int add_equal_note (rtx, rtx, enum rtx_code, rtx, rtx);
99 static rtx widen_operand (rtx, enum machine_mode, enum machine_mode, int,
101 static void prepare_cmp_insn (rtx *, rtx *, enum rtx_code *, rtx,
102 enum machine_mode *, int *,
103 enum can_compare_purpose);
104 static enum insn_code can_fix_p (enum machine_mode, enum machine_mode, int,
106 static enum insn_code can_float_p (enum machine_mode, enum machine_mode, int);
107 static optab new_optab (void);
108 static convert_optab new_convert_optab (void);
109 static inline optab init_optab (enum rtx_code);
110 static inline optab init_optabv (enum rtx_code);
111 static inline convert_optab init_convert_optab (enum rtx_code);
112 static void init_libfuncs (optab, int, int, const char *, int);
113 static void init_integral_libfuncs (optab, const char *, int);
114 static void init_floating_libfuncs (optab, const char *, int);
115 static void init_interclass_conv_libfuncs (convert_optab, const char *,
116 enum mode_class, enum mode_class);
117 static void init_intraclass_conv_libfuncs (convert_optab, const char *,
118 enum mode_class, bool);
119 static void emit_cmp_and_jump_insn_1 (rtx, rtx, enum machine_mode,
120 enum rtx_code, int, rtx);
121 static void prepare_float_lib_cmp (rtx *, rtx *, enum rtx_code *,
122 enum machine_mode *, int *);
123 static rtx widen_clz (enum machine_mode, rtx, rtx);
124 static rtx expand_parity (enum machine_mode, rtx, rtx);
125 static enum rtx_code get_rtx_code (enum tree_code, bool);
126 static rtx vector_compare_rtx (tree, bool, enum insn_code);
128 #ifndef HAVE_conditional_trap
129 #define HAVE_conditional_trap 0
130 #define gen_conditional_trap(a,b) (gcc_unreachable (), NULL_RTX)
133 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
134 the result of operation CODE applied to OP0 (and OP1 if it is a binary
137 If the last insn does not set TARGET, don't do anything, but return 1.
139 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
140 don't add the REG_EQUAL note but return 0. Our caller can then try
141 again, ensuring that TARGET is not one of the operands. */
144 add_equal_note (rtx insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
146 rtx last_insn, insn, set;
149 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
151 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
152 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
153 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
154 && GET_RTX_CLASS (code) != RTX_COMPARE
155 && GET_RTX_CLASS (code) != RTX_UNARY)
158 if (GET_CODE (target) == ZERO_EXTRACT)
161 for (last_insn = insns;
162 NEXT_INSN (last_insn) != NULL_RTX;
163 last_insn = NEXT_INSN (last_insn))
166 set = single_set (last_insn);
170 if (! rtx_equal_p (SET_DEST (set), target)
171 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
172 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
173 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
176 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
177 besides the last insn. */
178 if (reg_overlap_mentioned_p (target, op0)
179 || (op1 && reg_overlap_mentioned_p (target, op1)))
181 insn = PREV_INSN (last_insn);
182 while (insn != NULL_RTX)
184 if (reg_set_p (target, insn))
187 insn = PREV_INSN (insn);
191 if (GET_RTX_CLASS (code) == RTX_UNARY)
192 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
194 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
196 set_unique_reg_note (last_insn, REG_EQUAL, note);
201 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
202 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
203 not actually do a sign-extend or zero-extend, but can leave the
204 higher-order bits of the result rtx undefined, for example, in the case
205 of logical operations, but not right shifts. */
208 widen_operand (rtx op, enum machine_mode mode, enum machine_mode oldmode,
209 int unsignedp, int no_extend)
213 /* If we don't have to extend and this is a constant, return it. */
214 if (no_extend && GET_MODE (op) == VOIDmode)
217 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
218 extend since it will be more efficient to do so unless the signedness of
219 a promoted object differs from our extension. */
221 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
222 && SUBREG_PROMOTED_UNSIGNED_P (op) == unsignedp))
223 return convert_modes (mode, oldmode, op, unsignedp);
225 /* If MODE is no wider than a single word, we return a paradoxical
227 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
228 return gen_rtx_SUBREG (mode, force_reg (GET_MODE (op), op), 0);
230 /* Otherwise, get an object of MODE, clobber it, and set the low-order
233 result = gen_reg_rtx (mode);
234 emit_insn (gen_rtx_CLOBBER (VOIDmode, result));
235 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
239 /* Return the optab used for computing the operation given by
240 the tree code, CODE. This function is not always usable (for
241 example, it cannot give complete results for multiplication
242 or division) but probably ought to be relied on more widely
243 throughout the expander. */
245 optab_for_tree_code (enum tree_code code, tree type)
257 return one_cmpl_optab;
266 return TYPE_UNSIGNED (type) ? umod_optab : smod_optab;
274 return TYPE_UNSIGNED (type) ? udiv_optab : sdiv_optab;
280 return TYPE_UNSIGNED (type) ? lshr_optab : ashr_optab;
289 return TYPE_UNSIGNED (type) ? umax_optab : smax_optab;
292 return TYPE_UNSIGNED (type) ? umin_optab : smin_optab;
294 case REALIGN_LOAD_EXPR:
295 return vec_realign_load_optab;
298 return TYPE_UNSIGNED (type) ? reduc_umax_optab : reduc_smax_optab;
301 return TYPE_UNSIGNED (type) ? reduc_umin_optab : reduc_smin_optab;
303 case REDUC_PLUS_EXPR:
304 return TYPE_UNSIGNED (type) ? reduc_uplus_optab : reduc_splus_optab;
306 case VEC_LSHIFT_EXPR:
307 return vec_shl_optab;
309 case VEC_RSHIFT_EXPR:
310 return vec_shr_optab;
316 trapv = flag_trapv && INTEGRAL_TYPE_P (type) && !TYPE_UNSIGNED (type);
320 return trapv ? addv_optab : add_optab;
323 return trapv ? subv_optab : sub_optab;
326 return trapv ? smulv_optab : smul_optab;
329 return trapv ? negv_optab : neg_optab;
332 return trapv ? absv_optab : abs_optab;
340 /* Generate code to perform an operation specified by TERNARY_OPTAB
341 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
343 UNSIGNEDP is for the case where we have to widen the operands
344 to perform the operation. It says to use zero-extension.
346 If TARGET is nonzero, the value
347 is generated there, if it is convenient to do so.
348 In all cases an rtx is returned for the locus of the value;
349 this may or may not be TARGET. */
352 expand_ternary_op (enum machine_mode mode, optab ternary_optab, rtx op0,
353 rtx op1, rtx op2, rtx target, int unsignedp)
355 int icode = (int) ternary_optab->handlers[(int) mode].insn_code;
356 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
357 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
358 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
361 rtx xop0 = op0, xop1 = op1, xop2 = op2;
363 gcc_assert (ternary_optab->handlers[(int) mode].insn_code
364 != CODE_FOR_nothing);
366 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
367 temp = gen_reg_rtx (mode);
371 /* In case the insn wants input operands in modes different from
372 those of the actual operands, convert the operands. It would
373 seem that we don't need to convert CONST_INTs, but we do, so
374 that they're properly zero-extended, sign-extended or truncated
377 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
378 xop0 = convert_modes (mode0,
379 GET_MODE (op0) != VOIDmode
384 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
385 xop1 = convert_modes (mode1,
386 GET_MODE (op1) != VOIDmode
391 if (GET_MODE (op2) != mode2 && mode2 != VOIDmode)
392 xop2 = convert_modes (mode2,
393 GET_MODE (op2) != VOIDmode
398 /* Now, if insn's predicates don't allow our operands, put them into
401 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
402 && mode0 != VOIDmode)
403 xop0 = copy_to_mode_reg (mode0, xop0);
405 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
406 && mode1 != VOIDmode)
407 xop1 = copy_to_mode_reg (mode1, xop1);
409 if (!insn_data[icode].operand[3].predicate (xop2, mode2)
410 && mode2 != VOIDmode)
411 xop2 = copy_to_mode_reg (mode2, xop2);
413 pat = GEN_FCN (icode) (temp, xop0, xop1, xop2);
420 /* Like expand_binop, but return a constant rtx if the result can be
421 calculated at compile time. The arguments and return value are
422 otherwise the same as for expand_binop. */
425 simplify_expand_binop (enum machine_mode mode, optab binoptab,
426 rtx op0, rtx op1, rtx target, int unsignedp,
427 enum optab_methods methods)
429 if (CONSTANT_P (op0) && CONSTANT_P (op1))
430 return simplify_gen_binary (binoptab->code, mode, op0, op1);
432 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
435 /* Like simplify_expand_binop, but always put the result in TARGET.
436 Return true if the expansion succeeded. */
439 force_expand_binop (enum machine_mode mode, optab binoptab,
440 rtx op0, rtx op1, rtx target, int unsignedp,
441 enum optab_methods methods)
443 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
444 target, unsignedp, methods);
448 emit_move_insn (target, x);
452 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
455 expand_vec_shift_expr (tree vec_shift_expr, rtx target)
457 enum insn_code icode;
458 rtx rtx_op1, rtx_op2;
459 enum machine_mode mode1;
460 enum machine_mode mode2;
461 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_shift_expr));
462 tree vec_oprnd = TREE_OPERAND (vec_shift_expr, 0);
463 tree shift_oprnd = TREE_OPERAND (vec_shift_expr, 1);
467 switch (TREE_CODE (vec_shift_expr))
469 case VEC_RSHIFT_EXPR:
470 shift_optab = vec_shr_optab;
472 case VEC_LSHIFT_EXPR:
473 shift_optab = vec_shl_optab;
479 icode = (int) shift_optab->handlers[(int) mode].insn_code;
480 gcc_assert (icode != CODE_FOR_nothing);
482 mode1 = insn_data[icode].operand[1].mode;
483 mode2 = insn_data[icode].operand[2].mode;
485 rtx_op1 = expand_expr (vec_oprnd, NULL_RTX, VOIDmode, EXPAND_NORMAL);
486 if (!(*insn_data[icode].operand[1].predicate) (rtx_op1, mode1)
487 && mode1 != VOIDmode)
488 rtx_op1 = force_reg (mode1, rtx_op1);
490 rtx_op2 = expand_expr (shift_oprnd, NULL_RTX, VOIDmode, EXPAND_NORMAL);
491 if (!(*insn_data[icode].operand[2].predicate) (rtx_op2, mode2)
492 && mode2 != VOIDmode)
493 rtx_op2 = force_reg (mode2, rtx_op2);
496 || ! (*insn_data[icode].operand[0].predicate) (target, mode))
497 target = gen_reg_rtx (mode);
499 /* Emit instruction */
500 pat = GEN_FCN (icode) (target, rtx_op1, rtx_op2);
507 /* This subroutine of expand_doubleword_shift handles the cases in which
508 the effective shift value is >= BITS_PER_WORD. The arguments and return
509 value are the same as for the parent routine, except that SUPERWORD_OP1
510 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
511 INTO_TARGET may be null if the caller has decided to calculate it. */
514 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
515 rtx outof_target, rtx into_target,
516 int unsignedp, enum optab_methods methods)
518 if (into_target != 0)
519 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
520 into_target, unsignedp, methods))
523 if (outof_target != 0)
525 /* For a signed right shift, we must fill OUTOF_TARGET with copies
526 of the sign bit, otherwise we must fill it with zeros. */
527 if (binoptab != ashr_optab)
528 emit_move_insn (outof_target, CONST0_RTX (word_mode));
530 if (!force_expand_binop (word_mode, binoptab,
531 outof_input, GEN_INT (BITS_PER_WORD - 1),
532 outof_target, unsignedp, methods))
538 /* This subroutine of expand_doubleword_shift handles the cases in which
539 the effective shift value is < BITS_PER_WORD. The arguments and return
540 value are the same as for the parent routine. */
543 expand_subword_shift (enum machine_mode op1_mode, optab binoptab,
544 rtx outof_input, rtx into_input, rtx op1,
545 rtx outof_target, rtx into_target,
546 int unsignedp, enum optab_methods methods,
547 unsigned HOST_WIDE_INT shift_mask)
549 optab reverse_unsigned_shift, unsigned_shift;
552 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
553 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
555 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
556 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
557 the opposite direction to BINOPTAB. */
558 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
560 carries = outof_input;
561 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
562 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
567 /* We must avoid shifting by BITS_PER_WORD bits since that is either
568 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
569 has unknown behavior. Do a single shift first, then shift by the
570 remainder. It's OK to use ~OP1 as the remainder if shift counts
571 are truncated to the mode size. */
572 carries = expand_binop (word_mode, reverse_unsigned_shift,
573 outof_input, const1_rtx, 0, unsignedp, methods);
574 if (shift_mask == BITS_PER_WORD - 1)
576 tmp = immed_double_const (-1, -1, op1_mode);
577 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
582 tmp = immed_double_const (BITS_PER_WORD - 1, 0, op1_mode);
583 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
587 if (tmp == 0 || carries == 0)
589 carries = expand_binop (word_mode, reverse_unsigned_shift,
590 carries, tmp, 0, unsignedp, methods);
594 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
595 so the result can go directly into INTO_TARGET if convenient. */
596 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
597 into_target, unsignedp, methods);
601 /* Now OR in the bits carried over from OUTOF_INPUT. */
602 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
603 into_target, unsignedp, methods))
606 /* Use a standard word_mode shift for the out-of half. */
607 if (outof_target != 0)
608 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
609 outof_target, unsignedp, methods))
616 #ifdef HAVE_conditional_move
617 /* Try implementing expand_doubleword_shift using conditional moves.
618 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
619 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
620 are the shift counts to use in the former and latter case. All other
621 arguments are the same as the parent routine. */
624 expand_doubleword_shift_condmove (enum machine_mode op1_mode, optab binoptab,
625 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
626 rtx outof_input, rtx into_input,
627 rtx subword_op1, rtx superword_op1,
628 rtx outof_target, rtx into_target,
629 int unsignedp, enum optab_methods methods,
630 unsigned HOST_WIDE_INT shift_mask)
632 rtx outof_superword, into_superword;
634 /* Put the superword version of the output into OUTOF_SUPERWORD and
636 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
637 if (outof_target != 0 && subword_op1 == superword_op1)
639 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
640 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
641 into_superword = outof_target;
642 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
643 outof_superword, 0, unsignedp, methods))
648 into_superword = gen_reg_rtx (word_mode);
649 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
650 outof_superword, into_superword,
655 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
656 if (!expand_subword_shift (op1_mode, binoptab,
657 outof_input, into_input, subword_op1,
658 outof_target, into_target,
659 unsignedp, methods, shift_mask))
662 /* Select between them. Do the INTO half first because INTO_SUPERWORD
663 might be the current value of OUTOF_TARGET. */
664 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
665 into_target, into_superword, word_mode, false))
668 if (outof_target != 0)
669 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
670 outof_target, outof_superword,
678 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
679 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
680 input operand; the shift moves bits in the direction OUTOF_INPUT->
681 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
682 of the target. OP1 is the shift count and OP1_MODE is its mode.
683 If OP1 is constant, it will have been truncated as appropriate
684 and is known to be nonzero.
686 If SHIFT_MASK is zero, the result of word shifts is undefined when the
687 shift count is outside the range [0, BITS_PER_WORD). This routine must
688 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
690 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
691 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
692 fill with zeros or sign bits as appropriate.
694 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
695 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
696 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
697 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
700 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
701 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
702 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
703 function wants to calculate it itself.
705 Return true if the shift could be successfully synthesized. */
708 expand_doubleword_shift (enum machine_mode op1_mode, optab binoptab,
709 rtx outof_input, rtx into_input, rtx op1,
710 rtx outof_target, rtx into_target,
711 int unsignedp, enum optab_methods methods,
712 unsigned HOST_WIDE_INT shift_mask)
714 rtx superword_op1, tmp, cmp1, cmp2;
715 rtx subword_label, done_label;
716 enum rtx_code cmp_code;
718 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
719 fill the result with sign or zero bits as appropriate. If so, the value
720 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
721 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
722 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
724 This isn't worthwhile for constant shifts since the optimizers will
725 cope better with in-range shift counts. */
726 if (shift_mask >= BITS_PER_WORD
728 && !CONSTANT_P (op1))
730 if (!expand_doubleword_shift (op1_mode, binoptab,
731 outof_input, into_input, op1,
733 unsignedp, methods, shift_mask))
735 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
736 outof_target, unsignedp, methods))
741 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
742 is true when the effective shift value is less than BITS_PER_WORD.
743 Set SUPERWORD_OP1 to the shift count that should be used to shift
744 OUTOF_INPUT into INTO_TARGET when the condition is false. */
745 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
746 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
748 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
749 is a subword shift count. */
750 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
752 cmp2 = CONST0_RTX (op1_mode);
758 /* Set CMP1 to OP1 - BITS_PER_WORD. */
759 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
761 cmp2 = CONST0_RTX (op1_mode);
763 superword_op1 = cmp1;
768 /* If we can compute the condition at compile time, pick the
769 appropriate subroutine. */
770 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
771 if (tmp != 0 && GET_CODE (tmp) == CONST_INT)
773 if (tmp == const0_rtx)
774 return expand_superword_shift (binoptab, outof_input, superword_op1,
775 outof_target, into_target,
778 return expand_subword_shift (op1_mode, binoptab,
779 outof_input, into_input, op1,
780 outof_target, into_target,
781 unsignedp, methods, shift_mask);
784 #ifdef HAVE_conditional_move
785 /* Try using conditional moves to generate straight-line code. */
787 rtx start = get_last_insn ();
788 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
789 cmp_code, cmp1, cmp2,
790 outof_input, into_input,
792 outof_target, into_target,
793 unsignedp, methods, shift_mask))
795 delete_insns_since (start);
799 /* As a last resort, use branches to select the correct alternative. */
800 subword_label = gen_label_rtx ();
801 done_label = gen_label_rtx ();
803 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
804 0, 0, subword_label);
806 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
807 outof_target, into_target,
811 emit_jump_insn (gen_jump (done_label));
813 emit_label (subword_label);
815 if (!expand_subword_shift (op1_mode, binoptab,
816 outof_input, into_input, op1,
817 outof_target, into_target,
818 unsignedp, methods, shift_mask))
821 emit_label (done_label);
825 /* Subroutine of expand_binop. Perform a double word multiplication of
826 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
827 as the target's word_mode. This function return NULL_RTX if anything
828 goes wrong, in which case it may have already emitted instructions
829 which need to be deleted.
831 If we want to multiply two two-word values and have normal and widening
832 multiplies of single-word values, we can do this with three smaller
833 multiplications. Note that we do not make a REG_NO_CONFLICT block here
834 because we are not operating on one word at a time.
836 The multiplication proceeds as follows:
837 _______________________
838 [__op0_high_|__op0_low__]
839 _______________________
840 * [__op1_high_|__op1_low__]
841 _______________________________________________
842 _______________________
843 (1) [__op0_low__*__op1_low__]
844 _______________________
845 (2a) [__op0_low__*__op1_high_]
846 _______________________
847 (2b) [__op0_high_*__op1_low__]
848 _______________________
849 (3) [__op0_high_*__op1_high_]
852 This gives a 4-word result. Since we are only interested in the
853 lower 2 words, partial result (3) and the upper words of (2a) and
854 (2b) don't need to be calculated. Hence (2a) and (2b) can be
855 calculated using non-widening multiplication.
857 (1), however, needs to be calculated with an unsigned widening
858 multiplication. If this operation is not directly supported we
859 try using a signed widening multiplication and adjust the result.
860 This adjustment works as follows:
862 If both operands are positive then no adjustment is needed.
864 If the operands have different signs, for example op0_low < 0 and
865 op1_low >= 0, the instruction treats the most significant bit of
866 op0_low as a sign bit instead of a bit with significance
867 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
868 with 2**BITS_PER_WORD - op0_low, and two's complements the
869 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
872 Similarly, if both operands are negative, we need to add
873 (op0_low + op1_low) * 2**BITS_PER_WORD.
875 We use a trick to adjust quickly. We logically shift op0_low right
876 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
877 op0_high (op1_high) before it is used to calculate 2b (2a). If no
878 logical shift exists, we do an arithmetic right shift and subtract
882 expand_doubleword_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
883 bool umulp, enum optab_methods methods)
885 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
886 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
887 rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
888 rtx product, adjust, product_high, temp;
890 rtx op0_high = operand_subword_force (op0, high, mode);
891 rtx op0_low = operand_subword_force (op0, low, mode);
892 rtx op1_high = operand_subword_force (op1, high, mode);
893 rtx op1_low = operand_subword_force (op1, low, mode);
895 /* If we're using an unsigned multiply to directly compute the product
896 of the low-order words of the operands and perform any required
897 adjustments of the operands, we begin by trying two more multiplications
898 and then computing the appropriate sum.
900 We have checked above that the required addition is provided.
901 Full-word addition will normally always succeed, especially if
902 it is provided at all, so we don't worry about its failure. The
903 multiplication may well fail, however, so we do handle that. */
907 /* ??? This could be done with emit_store_flag where available. */
908 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
909 NULL_RTX, 1, methods);
911 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
912 NULL_RTX, 0, OPTAB_DIRECT);
915 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
916 NULL_RTX, 0, methods);
919 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
920 NULL_RTX, 0, OPTAB_DIRECT);
927 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
928 NULL_RTX, 0, OPTAB_DIRECT);
932 /* OP0_HIGH should now be dead. */
936 /* ??? This could be done with emit_store_flag where available. */
937 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
938 NULL_RTX, 1, methods);
940 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
941 NULL_RTX, 0, OPTAB_DIRECT);
944 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
945 NULL_RTX, 0, methods);
948 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
949 NULL_RTX, 0, OPTAB_DIRECT);
956 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
957 NULL_RTX, 0, OPTAB_DIRECT);
961 /* OP1_HIGH should now be dead. */
963 adjust = expand_binop (word_mode, add_optab, adjust, temp,
964 adjust, 0, OPTAB_DIRECT);
966 if (target && !REG_P (target))
970 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
971 target, 1, OPTAB_DIRECT);
973 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
974 target, 1, OPTAB_DIRECT);
979 product_high = operand_subword (product, high, 1, mode);
980 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
981 REG_P (product_high) ? product_high : adjust,
983 emit_move_insn (product_high, adjust);
987 /* Wrapper around expand_binop which takes an rtx code to specify
988 the operation to perform, not an optab pointer. All other
989 arguments are the same. */
991 expand_simple_binop (enum machine_mode mode, enum rtx_code code, rtx op0,
992 rtx op1, rtx target, int unsignedp,
993 enum optab_methods methods)
995 optab binop = code_to_optab[(int) code];
998 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
1001 /* Generate code to perform an operation specified by BINOPTAB
1002 on operands OP0 and OP1, with result having machine-mode MODE.
1004 UNSIGNEDP is for the case where we have to widen the operands
1005 to perform the operation. It says to use zero-extension.
1007 If TARGET is nonzero, the value
1008 is generated there, if it is convenient to do so.
1009 In all cases an rtx is returned for the locus of the value;
1010 this may or may not be TARGET. */
1013 expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1,
1014 rtx target, int unsignedp, enum optab_methods methods)
1016 enum optab_methods next_methods
1017 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1018 ? OPTAB_WIDEN : methods);
1019 enum mode_class class;
1020 enum machine_mode wider_mode;
1022 int commutative_op = 0;
1023 int shift_op = (binoptab->code == ASHIFT
1024 || binoptab->code == ASHIFTRT
1025 || binoptab->code == LSHIFTRT
1026 || binoptab->code == ROTATE
1027 || binoptab->code == ROTATERT);
1028 rtx entry_last = get_last_insn ();
1031 class = GET_MODE_CLASS (mode);
1035 /* Load duplicate non-volatile operands once. */
1036 if (rtx_equal_p (op0, op1) && ! volatile_refs_p (op0))
1038 op0 = force_not_mem (op0);
1043 op0 = force_not_mem (op0);
1044 op1 = force_not_mem (op1);
1048 /* If subtracting an integer constant, convert this into an addition of
1049 the negated constant. */
1051 if (binoptab == sub_optab && GET_CODE (op1) == CONST_INT)
1053 op1 = negate_rtx (mode, op1);
1054 binoptab = add_optab;
1057 /* If we are inside an appropriately-short loop and we are optimizing,
1058 force expensive constants into a register. */
1059 if (CONSTANT_P (op0) && optimize
1060 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
1062 if (GET_MODE (op0) != VOIDmode)
1063 op0 = convert_modes (mode, VOIDmode, op0, unsignedp);
1064 op0 = force_reg (mode, op0);
1067 if (CONSTANT_P (op1) && optimize
1068 && ! shift_op && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
1070 if (GET_MODE (op1) != VOIDmode)
1071 op1 = convert_modes (mode, VOIDmode, op1, unsignedp);
1072 op1 = force_reg (mode, op1);
1075 /* Record where to delete back to if we backtrack. */
1076 last = get_last_insn ();
1078 /* If operation is commutative,
1079 try to make the first operand a register.
1080 Even better, try to make it the same as the target.
1081 Also try to make the last operand a constant. */
1082 if (GET_RTX_CLASS (binoptab->code) == RTX_COMM_ARITH
1083 || binoptab == smul_widen_optab
1084 || binoptab == umul_widen_optab
1085 || binoptab == smul_highpart_optab
1086 || binoptab == umul_highpart_optab)
1090 if (((target == 0 || REG_P (target))
1094 : rtx_equal_p (op1, target))
1095 || GET_CODE (op0) == CONST_INT)
1103 /* If we can do it with a three-operand insn, do so. */
1105 if (methods != OPTAB_MUST_WIDEN
1106 && binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1108 int icode = (int) binoptab->handlers[(int) mode].insn_code;
1109 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
1110 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
1112 rtx xop0 = op0, xop1 = op1;
1117 temp = gen_reg_rtx (mode);
1119 /* If it is a commutative operator and the modes would match
1120 if we would swap the operands, we can save the conversions. */
1123 if (GET_MODE (op0) != mode0 && GET_MODE (op1) != mode1
1124 && GET_MODE (op0) == mode1 && GET_MODE (op1) == mode0)
1128 tmp = op0; op0 = op1; op1 = tmp;
1129 tmp = xop0; xop0 = xop1; xop1 = tmp;
1133 /* In case the insn wants input operands in modes different from
1134 those of the actual operands, convert the operands. It would
1135 seem that we don't need to convert CONST_INTs, but we do, so
1136 that they're properly zero-extended, sign-extended or truncated
1139 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
1140 xop0 = convert_modes (mode0,
1141 GET_MODE (op0) != VOIDmode
1146 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
1147 xop1 = convert_modes (mode1,
1148 GET_MODE (op1) != VOIDmode
1153 /* Now, if insn's predicates don't allow our operands, put them into
1156 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
1157 && mode0 != VOIDmode)
1158 xop0 = copy_to_mode_reg (mode0, xop0);
1160 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
1161 && mode1 != VOIDmode)
1162 xop1 = copy_to_mode_reg (mode1, xop1);
1164 if (!insn_data[icode].operand[0].predicate (temp, mode))
1165 temp = gen_reg_rtx (mode);
1167 pat = GEN_FCN (icode) (temp, xop0, xop1);
1170 /* If PAT is composed of more than one insn, try to add an appropriate
1171 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1172 operand, call ourselves again, this time without a target. */
1173 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1174 && ! add_equal_note (pat, temp, binoptab->code, xop0, xop1))
1176 delete_insns_since (last);
1177 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1178 unsignedp, methods);
1185 delete_insns_since (last);
1188 /* If this is a multiply, see if we can do a widening operation that
1189 takes operands of this mode and makes a wider mode. */
1191 if (binoptab == smul_optab && GET_MODE_WIDER_MODE (mode) != VOIDmode
1192 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1193 ->handlers[(int) GET_MODE_WIDER_MODE (mode)].insn_code)
1194 != CODE_FOR_nothing))
1196 temp = expand_binop (GET_MODE_WIDER_MODE (mode),
1197 unsignedp ? umul_widen_optab : smul_widen_optab,
1198 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1202 if (GET_MODE_CLASS (mode) == MODE_INT)
1203 return gen_lowpart (mode, temp);
1205 return convert_to_mode (mode, temp, unsignedp);
1209 /* Look for a wider mode of the same class for which we think we
1210 can open-code the operation. Check for a widening multiply at the
1211 wider mode as well. */
1213 if ((class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
1214 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1215 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
1216 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1218 if (binoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
1219 || (binoptab == smul_optab
1220 && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
1221 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1222 ->handlers[(int) GET_MODE_WIDER_MODE (wider_mode)].insn_code)
1223 != CODE_FOR_nothing)))
1225 rtx xop0 = op0, xop1 = op1;
1228 /* For certain integer operations, we need not actually extend
1229 the narrow operands, as long as we will truncate
1230 the results to the same narrowness. */
1232 if ((binoptab == ior_optab || binoptab == and_optab
1233 || binoptab == xor_optab
1234 || binoptab == add_optab || binoptab == sub_optab
1235 || binoptab == smul_optab || binoptab == ashl_optab)
1236 && class == MODE_INT)
1239 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1241 /* The second operand of a shift must always be extended. */
1242 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1243 no_extend && binoptab != ashl_optab);
1245 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1246 unsignedp, OPTAB_DIRECT);
1249 if (class != MODE_INT)
1252 target = gen_reg_rtx (mode);
1253 convert_move (target, temp, 0);
1257 return gen_lowpart (mode, temp);
1260 delete_insns_since (last);
1264 /* These can be done a word at a time. */
1265 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1266 && class == MODE_INT
1267 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
1268 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1274 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1275 won't be accurate, so use a new target. */
1276 if (target == 0 || target == op0 || target == op1)
1277 target = gen_reg_rtx (mode);
1281 /* Do the actual arithmetic. */
1282 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
1284 rtx target_piece = operand_subword (target, i, 1, mode);
1285 rtx x = expand_binop (word_mode, binoptab,
1286 operand_subword_force (op0, i, mode),
1287 operand_subword_force (op1, i, mode),
1288 target_piece, unsignedp, next_methods);
1293 if (target_piece != x)
1294 emit_move_insn (target_piece, x);
1297 insns = get_insns ();
1300 if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
1302 if (binoptab->code != UNKNOWN)
1304 = gen_rtx_fmt_ee (binoptab->code, mode,
1305 copy_rtx (op0), copy_rtx (op1));
1309 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1314 /* Synthesize double word shifts from single word shifts. */
1315 if ((binoptab == lshr_optab || binoptab == ashl_optab
1316 || binoptab == ashr_optab)
1317 && class == MODE_INT
1318 && (GET_CODE (op1) == CONST_INT || !optimize_size)
1319 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1320 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1321 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1322 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1324 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1325 enum machine_mode op1_mode;
1327 double_shift_mask = targetm.shift_truncation_mask (mode);
1328 shift_mask = targetm.shift_truncation_mask (word_mode);
1329 op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode;
1331 /* Apply the truncation to constant shifts. */
1332 if (double_shift_mask > 0 && GET_CODE (op1) == CONST_INT)
1333 op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
1335 if (op1 == CONST0_RTX (op1_mode))
1338 /* Make sure that this is a combination that expand_doubleword_shift
1339 can handle. See the comments there for details. */
1340 if (double_shift_mask == 0
1341 || (shift_mask == BITS_PER_WORD - 1
1342 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1344 rtx insns, equiv_value;
1345 rtx into_target, outof_target;
1346 rtx into_input, outof_input;
1347 int left_shift, outof_word;
1349 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1350 won't be accurate, so use a new target. */
1351 if (target == 0 || target == op0 || target == op1)
1352 target = gen_reg_rtx (mode);
1356 /* OUTOF_* is the word we are shifting bits away from, and
1357 INTO_* is the word that we are shifting bits towards, thus
1358 they differ depending on the direction of the shift and
1359 WORDS_BIG_ENDIAN. */
1361 left_shift = binoptab == ashl_optab;
1362 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1364 outof_target = operand_subword (target, outof_word, 1, mode);
1365 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1367 outof_input = operand_subword_force (op0, outof_word, mode);
1368 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1370 if (expand_doubleword_shift (op1_mode, binoptab,
1371 outof_input, into_input, op1,
1372 outof_target, into_target,
1373 unsignedp, methods, shift_mask))
1375 insns = get_insns ();
1378 equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, op0, op1);
1379 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1386 /* Synthesize double word rotates from single word shifts. */
1387 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1388 && class == MODE_INT
1389 && GET_CODE (op1) == CONST_INT
1390 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1391 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1392 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1394 rtx insns, equiv_value;
1395 rtx into_target, outof_target;
1396 rtx into_input, outof_input;
1398 int shift_count, left_shift, outof_word;
1400 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1401 won't be accurate, so use a new target. Do this also if target is not
1402 a REG, first because having a register instead may open optimization
1403 opportunities, and second because if target and op0 happen to be MEMs
1404 designating the same location, we would risk clobbering it too early
1405 in the code sequence we generate below. */
1406 if (target == 0 || target == op0 || target == op1 || ! REG_P (target))
1407 target = gen_reg_rtx (mode);
1411 shift_count = INTVAL (op1);
1413 /* OUTOF_* is the word we are shifting bits away from, and
1414 INTO_* is the word that we are shifting bits towards, thus
1415 they differ depending on the direction of the shift and
1416 WORDS_BIG_ENDIAN. */
1418 left_shift = (binoptab == rotl_optab);
1419 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1421 outof_target = operand_subword (target, outof_word, 1, mode);
1422 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1424 outof_input = operand_subword_force (op0, outof_word, mode);
1425 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1427 if (shift_count == BITS_PER_WORD)
1429 /* This is just a word swap. */
1430 emit_move_insn (outof_target, into_input);
1431 emit_move_insn (into_target, outof_input);
1436 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1437 rtx first_shift_count, second_shift_count;
1438 optab reverse_unsigned_shift, unsigned_shift;
1440 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1441 ? lshr_optab : ashl_optab);
1443 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1444 ? ashl_optab : lshr_optab);
1446 if (shift_count > BITS_PER_WORD)
1448 first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1449 second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1453 first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1454 second_shift_count = GEN_INT (shift_count);
1457 into_temp1 = expand_binop (word_mode, unsigned_shift,
1458 outof_input, first_shift_count,
1459 NULL_RTX, unsignedp, next_methods);
1460 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1461 into_input, second_shift_count,
1462 NULL_RTX, unsignedp, next_methods);
1464 if (into_temp1 != 0 && into_temp2 != 0)
1465 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1466 into_target, unsignedp, next_methods);
1470 if (inter != 0 && inter != into_target)
1471 emit_move_insn (into_target, inter);
1473 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1474 into_input, first_shift_count,
1475 NULL_RTX, unsignedp, next_methods);
1476 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1477 outof_input, second_shift_count,
1478 NULL_RTX, unsignedp, next_methods);
1480 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1481 inter = expand_binop (word_mode, ior_optab,
1482 outof_temp1, outof_temp2,
1483 outof_target, unsignedp, next_methods);
1485 if (inter != 0 && inter != outof_target)
1486 emit_move_insn (outof_target, inter);
1489 insns = get_insns ();
1494 if (binoptab->code != UNKNOWN)
1495 equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, op0, op1);
1499 /* We can't make this a no conflict block if this is a word swap,
1500 because the word swap case fails if the input and output values
1501 are in the same register. */
1502 if (shift_count != BITS_PER_WORD)
1503 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1512 /* These can be done a word at a time by propagating carries. */
1513 if ((binoptab == add_optab || binoptab == sub_optab)
1514 && class == MODE_INT
1515 && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
1516 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1519 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1520 const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
1521 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1522 rtx xop0, xop1, xtarget;
1524 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1525 value is one of those, use it. Otherwise, use 1 since it is the
1526 one easiest to get. */
1527 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1528 int normalizep = STORE_FLAG_VALUE;
1533 /* Prepare the operands. */
1534 xop0 = force_reg (mode, op0);
1535 xop1 = force_reg (mode, op1);
1537 xtarget = gen_reg_rtx (mode);
1539 if (target == 0 || !REG_P (target))
1542 /* Indicate for flow that the entire target reg is being set. */
1544 emit_insn (gen_rtx_CLOBBER (VOIDmode, xtarget));
1546 /* Do the actual arithmetic. */
1547 for (i = 0; i < nwords; i++)
1549 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1550 rtx target_piece = operand_subword (xtarget, index, 1, mode);
1551 rtx op0_piece = operand_subword_force (xop0, index, mode);
1552 rtx op1_piece = operand_subword_force (xop1, index, mode);
1555 /* Main add/subtract of the input operands. */
1556 x = expand_binop (word_mode, binoptab,
1557 op0_piece, op1_piece,
1558 target_piece, unsignedp, next_methods);
1564 /* Store carry from main add/subtract. */
1565 carry_out = gen_reg_rtx (word_mode);
1566 carry_out = emit_store_flag_force (carry_out,
1567 (binoptab == add_optab
1570 word_mode, 1, normalizep);
1577 /* Add/subtract previous carry to main result. */
1578 newx = expand_binop (word_mode,
1579 normalizep == 1 ? binoptab : otheroptab,
1581 NULL_RTX, 1, next_methods);
1585 /* Get out carry from adding/subtracting carry in. */
1586 rtx carry_tmp = gen_reg_rtx (word_mode);
1587 carry_tmp = emit_store_flag_force (carry_tmp,
1588 (binoptab == add_optab
1591 word_mode, 1, normalizep);
1593 /* Logical-ior the two poss. carry together. */
1594 carry_out = expand_binop (word_mode, ior_optab,
1595 carry_out, carry_tmp,
1596 carry_out, 0, next_methods);
1600 emit_move_insn (target_piece, newx);
1604 if (x != target_piece)
1605 emit_move_insn (target_piece, x);
1608 carry_in = carry_out;
1611 if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
1613 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
1614 || ! rtx_equal_p (target, xtarget))
1616 rtx temp = emit_move_insn (target, xtarget);
1618 set_unique_reg_note (temp,
1620 gen_rtx_fmt_ee (binoptab->code, mode,
1631 delete_insns_since (last);
1634 /* Attempt to synthesize double word multiplies using a sequence of word
1635 mode multiplications. We first attempt to generate a sequence using a
1636 more efficient unsigned widening multiply, and if that fails we then
1637 try using a signed widening multiply. */
1639 if (binoptab == smul_optab
1640 && class == MODE_INT
1641 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1642 && smul_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1643 && add_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1645 rtx product = NULL_RTX;
1647 if (umul_widen_optab->handlers[(int) mode].insn_code
1648 != CODE_FOR_nothing)
1650 product = expand_doubleword_mult (mode, op0, op1, target,
1653 delete_insns_since (last);
1656 if (product == NULL_RTX
1657 && smul_widen_optab->handlers[(int) mode].insn_code
1658 != CODE_FOR_nothing)
1660 product = expand_doubleword_mult (mode, op0, op1, target,
1663 delete_insns_since (last);
1666 if (product != NULL_RTX)
1668 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1670 temp = emit_move_insn (target ? target : product, product);
1671 set_unique_reg_note (temp,
1673 gen_rtx_fmt_ee (MULT, mode,
1681 /* It can't be open-coded in this mode.
1682 Use a library call if one is available and caller says that's ok. */
1684 if (binoptab->handlers[(int) mode].libfunc
1685 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
1689 enum machine_mode op1_mode = mode;
1696 op1_mode = word_mode;
1697 /* Specify unsigned here,
1698 since negative shift counts are meaningless. */
1699 op1x = convert_to_mode (word_mode, op1, 1);
1702 if (GET_MODE (op0) != VOIDmode
1703 && GET_MODE (op0) != mode)
1704 op0 = convert_to_mode (mode, op0, unsignedp);
1706 /* Pass 1 for NO_QUEUE so we don't lose any increments
1707 if the libcall is cse'd or moved. */
1708 value = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
1709 NULL_RTX, LCT_CONST, mode, 2,
1710 op0, mode, op1x, op1_mode);
1712 insns = get_insns ();
1715 target = gen_reg_rtx (mode);
1716 emit_libcall_block (insns, target, value,
1717 gen_rtx_fmt_ee (binoptab->code, mode, op0, op1));
1722 delete_insns_since (last);
1724 /* It can't be done in this mode. Can we do it in a wider mode? */
1726 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
1727 || methods == OPTAB_MUST_WIDEN))
1729 /* Caller says, don't even try. */
1730 delete_insns_since (entry_last);
1734 /* Compute the value of METHODS to pass to recursive calls.
1735 Don't allow widening to be tried recursively. */
1737 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
1739 /* Look for a wider mode of the same class for which it appears we can do
1742 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
1744 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
1745 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1747 if ((binoptab->handlers[(int) wider_mode].insn_code
1748 != CODE_FOR_nothing)
1749 || (methods == OPTAB_LIB
1750 && binoptab->handlers[(int) wider_mode].libfunc))
1752 rtx xop0 = op0, xop1 = op1;
1755 /* For certain integer operations, we need not actually extend
1756 the narrow operands, as long as we will truncate
1757 the results to the same narrowness. */
1759 if ((binoptab == ior_optab || binoptab == and_optab
1760 || binoptab == xor_optab
1761 || binoptab == add_optab || binoptab == sub_optab
1762 || binoptab == smul_optab || binoptab == ashl_optab)
1763 && class == MODE_INT)
1766 xop0 = widen_operand (xop0, wider_mode, mode,
1767 unsignedp, no_extend);
1769 /* The second operand of a shift must always be extended. */
1770 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1771 no_extend && binoptab != ashl_optab);
1773 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1774 unsignedp, methods);
1777 if (class != MODE_INT)
1780 target = gen_reg_rtx (mode);
1781 convert_move (target, temp, 0);
1785 return gen_lowpart (mode, temp);
1788 delete_insns_since (last);
1793 delete_insns_since (entry_last);
1797 /* Expand a binary operator which has both signed and unsigned forms.
1798 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1801 If we widen unsigned operands, we may use a signed wider operation instead
1802 of an unsigned wider operation, since the result would be the same. */
1805 sign_expand_binop (enum machine_mode mode, optab uoptab, optab soptab,
1806 rtx op0, rtx op1, rtx target, int unsignedp,
1807 enum optab_methods methods)
1810 optab direct_optab = unsignedp ? uoptab : soptab;
1811 struct optab wide_soptab;
1813 /* Do it without widening, if possible. */
1814 temp = expand_binop (mode, direct_optab, op0, op1, target,
1815 unsignedp, OPTAB_DIRECT);
1816 if (temp || methods == OPTAB_DIRECT)
1819 /* Try widening to a signed int. Make a fake signed optab that
1820 hides any signed insn for direct use. */
1821 wide_soptab = *soptab;
1822 wide_soptab.handlers[(int) mode].insn_code = CODE_FOR_nothing;
1823 wide_soptab.handlers[(int) mode].libfunc = 0;
1825 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
1826 unsignedp, OPTAB_WIDEN);
1828 /* For unsigned operands, try widening to an unsigned int. */
1829 if (temp == 0 && unsignedp)
1830 temp = expand_binop (mode, uoptab, op0, op1, target,
1831 unsignedp, OPTAB_WIDEN);
1832 if (temp || methods == OPTAB_WIDEN)
1835 /* Use the right width lib call if that exists. */
1836 temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_LIB);
1837 if (temp || methods == OPTAB_LIB)
1840 /* Must widen and use a lib call, use either signed or unsigned. */
1841 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
1842 unsignedp, methods);
1846 return expand_binop (mode, uoptab, op0, op1, target,
1847 unsignedp, methods);
1851 /* Generate code to perform an operation specified by UNOPPTAB
1852 on operand OP0, with two results to TARG0 and TARG1.
1853 We assume that the order of the operands for the instruction
1854 is TARG0, TARG1, OP0.
1856 Either TARG0 or TARG1 may be zero, but what that means is that
1857 the result is not actually wanted. We will generate it into
1858 a dummy pseudo-reg and discard it. They may not both be zero.
1860 Returns 1 if this operation can be performed; 0 if not. */
1863 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
1866 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
1867 enum mode_class class;
1868 enum machine_mode wider_mode;
1869 rtx entry_last = get_last_insn ();
1872 class = GET_MODE_CLASS (mode);
1875 op0 = force_not_mem (op0);
1878 targ0 = gen_reg_rtx (mode);
1880 targ1 = gen_reg_rtx (mode);
1882 /* Record where to go back to if we fail. */
1883 last = get_last_insn ();
1885 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1887 int icode = (int) unoptab->handlers[(int) mode].insn_code;
1888 enum machine_mode mode0 = insn_data[icode].operand[2].mode;
1892 if (GET_MODE (xop0) != VOIDmode
1893 && GET_MODE (xop0) != mode0)
1894 xop0 = convert_to_mode (mode0, xop0, unsignedp);
1896 /* Now, if insn doesn't accept these operands, put them into pseudos. */
1897 if (!insn_data[icode].operand[2].predicate (xop0, mode0))
1898 xop0 = copy_to_mode_reg (mode0, xop0);
1900 /* We could handle this, but we should always be called with a pseudo
1901 for our targets and all insns should take them as outputs. */
1902 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
1903 gcc_assert (insn_data[icode].operand[1].predicate (targ1, mode));
1905 pat = GEN_FCN (icode) (targ0, targ1, xop0);
1912 delete_insns_since (last);
1915 /* It can't be done in this mode. Can we do it in a wider mode? */
1917 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
1919 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
1920 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1922 if (unoptab->handlers[(int) wider_mode].insn_code
1923 != CODE_FOR_nothing)
1925 rtx t0 = gen_reg_rtx (wider_mode);
1926 rtx t1 = gen_reg_rtx (wider_mode);
1927 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
1929 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
1931 convert_move (targ0, t0, unsignedp);
1932 convert_move (targ1, t1, unsignedp);
1936 delete_insns_since (last);
1941 delete_insns_since (entry_last);
1945 /* Generate code to perform an operation specified by BINOPTAB
1946 on operands OP0 and OP1, with two results to TARG1 and TARG2.
1947 We assume that the order of the operands for the instruction
1948 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
1949 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
1951 Either TARG0 or TARG1 may be zero, but what that means is that
1952 the result is not actually wanted. We will generate it into
1953 a dummy pseudo-reg and discard it. They may not both be zero.
1955 Returns 1 if this operation can be performed; 0 if not. */
1958 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
1961 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
1962 enum mode_class class;
1963 enum machine_mode wider_mode;
1964 rtx entry_last = get_last_insn ();
1967 class = GET_MODE_CLASS (mode);
1971 op0 = force_not_mem (op0);
1972 op1 = force_not_mem (op1);
1975 /* If we are inside an appropriately-short loop and we are optimizing,
1976 force expensive constants into a register. */
1977 if (CONSTANT_P (op0) && optimize
1978 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
1979 op0 = force_reg (mode, op0);
1981 if (CONSTANT_P (op1) && optimize
1982 && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
1983 op1 = force_reg (mode, op1);
1986 targ0 = gen_reg_rtx (mode);
1988 targ1 = gen_reg_rtx (mode);
1990 /* Record where to go back to if we fail. */
1991 last = get_last_insn ();
1993 if (binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1995 int icode = (int) binoptab->handlers[(int) mode].insn_code;
1996 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
1997 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
1999 rtx xop0 = op0, xop1 = op1;
2001 /* In case the insn wants input operands in modes different from
2002 those of the actual operands, convert the operands. It would
2003 seem that we don't need to convert CONST_INTs, but we do, so
2004 that they're properly zero-extended, sign-extended or truncated
2007 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
2008 xop0 = convert_modes (mode0,
2009 GET_MODE (op0) != VOIDmode
2014 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
2015 xop1 = convert_modes (mode1,
2016 GET_MODE (op1) != VOIDmode
2021 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2022 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2023 xop0 = copy_to_mode_reg (mode0, xop0);
2025 if (!insn_data[icode].operand[2].predicate (xop1, mode1))
2026 xop1 = copy_to_mode_reg (mode1, xop1);
2028 /* We could handle this, but we should always be called with a pseudo
2029 for our targets and all insns should take them as outputs. */
2030 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2031 gcc_assert (insn_data[icode].operand[3].predicate (targ1, mode));
2033 pat = GEN_FCN (icode) (targ0, xop0, xop1, targ1);
2040 delete_insns_since (last);
2043 /* It can't be done in this mode. Can we do it in a wider mode? */
2045 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2047 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
2048 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2050 if (binoptab->handlers[(int) wider_mode].insn_code
2051 != CODE_FOR_nothing)
2053 rtx t0 = gen_reg_rtx (wider_mode);
2054 rtx t1 = gen_reg_rtx (wider_mode);
2055 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2056 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2058 if (expand_twoval_binop (binoptab, cop0, cop1,
2061 convert_move (targ0, t0, unsignedp);
2062 convert_move (targ1, t1, unsignedp);
2066 delete_insns_since (last);
2071 delete_insns_since (entry_last);
2075 /* Expand the two-valued library call indicated by BINOPTAB, but
2076 preserve only one of the values. If TARG0 is non-NULL, the first
2077 value is placed into TARG0; otherwise the second value is placed
2078 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2079 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2080 This routine assumes that the value returned by the library call is
2081 as if the return value was of an integral mode twice as wide as the
2082 mode of OP0. Returns 1 if the call was successful. */
2085 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2086 rtx targ0, rtx targ1, enum rtx_code code)
2088 enum machine_mode mode;
2089 enum machine_mode libval_mode;
2093 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2094 gcc_assert (!targ0 != !targ1);
2096 mode = GET_MODE (op0);
2097 if (!binoptab->handlers[(int) mode].libfunc)
2100 /* The value returned by the library function will have twice as
2101 many bits as the nominal MODE. */
2102 libval_mode = smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode),
2105 libval = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
2106 NULL_RTX, LCT_CONST,
2110 /* Get the part of VAL containing the value that we want. */
2111 libval = simplify_gen_subreg (mode, libval, libval_mode,
2112 targ0 ? 0 : GET_MODE_SIZE (mode));
2113 insns = get_insns ();
2115 /* Move the into the desired location. */
2116 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2117 gen_rtx_fmt_ee (code, mode, op0, op1));
2123 /* Wrapper around expand_unop which takes an rtx code to specify
2124 the operation to perform, not an optab pointer. All other
2125 arguments are the same. */
2127 expand_simple_unop (enum machine_mode mode, enum rtx_code code, rtx op0,
2128 rtx target, int unsignedp)
2130 optab unop = code_to_optab[(int) code];
2133 return expand_unop (mode, unop, op0, target, unsignedp);
2139 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2141 widen_clz (enum machine_mode mode, rtx op0, rtx target)
2143 enum mode_class class = GET_MODE_CLASS (mode);
2144 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2146 enum machine_mode wider_mode;
2147 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
2148 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2150 if (clz_optab->handlers[(int) wider_mode].insn_code
2151 != CODE_FOR_nothing)
2153 rtx xop0, temp, last;
2155 last = get_last_insn ();
2158 target = gen_reg_rtx (mode);
2159 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2160 temp = expand_unop (wider_mode, clz_optab, xop0, NULL_RTX, true);
2162 temp = expand_binop (wider_mode, sub_optab, temp,
2163 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2164 - GET_MODE_BITSIZE (mode)),
2165 target, true, OPTAB_DIRECT);
2167 delete_insns_since (last);
2176 /* Try calculating (parity x) as (and (popcount x) 1), where
2177 popcount can also be done in a wider mode. */
2179 expand_parity (enum machine_mode mode, rtx op0, rtx target)
2181 enum mode_class class = GET_MODE_CLASS (mode);
2182 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2184 enum machine_mode wider_mode;
2185 for (wider_mode = mode; wider_mode != VOIDmode;
2186 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2188 if (popcount_optab->handlers[(int) wider_mode].insn_code
2189 != CODE_FOR_nothing)
2191 rtx xop0, temp, last;
2193 last = get_last_insn ();
2196 target = gen_reg_rtx (mode);
2197 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2198 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2201 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2202 target, true, OPTAB_DIRECT);
2204 delete_insns_since (last);
2213 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2214 conditions, VAL may already be a SUBREG against which we cannot generate
2215 a further SUBREG. In this case, we expect forcing the value into a
2216 register will work around the situation. */
2219 lowpart_subreg_maybe_copy (enum machine_mode omode, rtx val,
2220 enum machine_mode imode)
2223 ret = lowpart_subreg (omode, val, imode);
2226 val = force_reg (imode, val);
2227 ret = lowpart_subreg (omode, val, imode);
2228 gcc_assert (ret != NULL);
2233 /* Expand a floating point absolute value or negation operation via a
2234 logical operation on the sign bit. */
2237 expand_absneg_bit (enum rtx_code code, enum machine_mode mode,
2238 rtx op0, rtx target)
2240 const struct real_format *fmt;
2241 int bitpos, word, nwords, i;
2242 enum machine_mode imode;
2243 HOST_WIDE_INT hi, lo;
2246 /* The format has to have a simple sign bit. */
2247 fmt = REAL_MODE_FORMAT (mode);
2251 bitpos = fmt->signbit_rw;
2255 /* Don't create negative zeros if the format doesn't support them. */
2256 if (code == NEG && !fmt->has_signed_zero)
2259 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2261 imode = int_mode_for_mode (mode);
2262 if (imode == BLKmode)
2271 if (FLOAT_WORDS_BIG_ENDIAN)
2272 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2274 word = bitpos / BITS_PER_WORD;
2275 bitpos = bitpos % BITS_PER_WORD;
2276 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2279 if (bitpos < HOST_BITS_PER_WIDE_INT)
2282 lo = (HOST_WIDE_INT) 1 << bitpos;
2286 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2292 if (target == 0 || target == op0)
2293 target = gen_reg_rtx (mode);
2299 for (i = 0; i < nwords; ++i)
2301 rtx targ_piece = operand_subword (target, i, 1, mode);
2302 rtx op0_piece = operand_subword_force (op0, i, mode);
2306 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2308 immed_double_const (lo, hi, imode),
2309 targ_piece, 1, OPTAB_LIB_WIDEN);
2310 if (temp != targ_piece)
2311 emit_move_insn (targ_piece, temp);
2314 emit_move_insn (targ_piece, op0_piece);
2317 insns = get_insns ();
2320 temp = gen_rtx_fmt_e (code, mode, copy_rtx (op0));
2321 emit_no_conflict_block (insns, target, op0, NULL_RTX, temp);
2325 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2326 gen_lowpart (imode, op0),
2327 immed_double_const (lo, hi, imode),
2328 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2329 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2331 set_unique_reg_note (get_last_insn (), REG_EQUAL,
2332 gen_rtx_fmt_e (code, mode, copy_rtx (op0)));
2338 /* Generate code to perform an operation specified by UNOPTAB
2339 on operand OP0, with result having machine-mode MODE.
2341 UNSIGNEDP is for the case where we have to widen the operands
2342 to perform the operation. It says to use zero-extension.
2344 If TARGET is nonzero, the value
2345 is generated there, if it is convenient to do so.
2346 In all cases an rtx is returned for the locus of the value;
2347 this may or may not be TARGET. */
2350 expand_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
2353 enum mode_class class;
2354 enum machine_mode wider_mode;
2356 rtx last = get_last_insn ();
2359 class = GET_MODE_CLASS (mode);
2362 op0 = force_not_mem (op0);
2364 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2366 int icode = (int) unoptab->handlers[(int) mode].insn_code;
2367 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2373 temp = gen_reg_rtx (mode);
2375 if (GET_MODE (xop0) != VOIDmode
2376 && GET_MODE (xop0) != mode0)
2377 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2379 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2381 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2382 xop0 = copy_to_mode_reg (mode0, xop0);
2384 if (!insn_data[icode].operand[0].predicate (temp, mode))
2385 temp = gen_reg_rtx (mode);
2387 pat = GEN_FCN (icode) (temp, xop0);
2390 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
2391 && ! add_equal_note (pat, temp, unoptab->code, xop0, NULL_RTX))
2393 delete_insns_since (last);
2394 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
2402 delete_insns_since (last);
2405 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2407 /* Widening clz needs special treatment. */
2408 if (unoptab == clz_optab)
2410 temp = widen_clz (mode, op0, target);
2417 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2418 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
2419 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2421 if (unoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing)
2425 /* For certain operations, we need not actually extend
2426 the narrow operand, as long as we will truncate the
2427 results to the same narrowness. */
2429 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2430 (unoptab == neg_optab
2431 || unoptab == one_cmpl_optab)
2432 && class == MODE_INT);
2434 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2439 if (class != MODE_INT)
2442 target = gen_reg_rtx (mode);
2443 convert_move (target, temp, 0);
2447 return gen_lowpart (mode, temp);
2450 delete_insns_since (last);
2454 /* These can be done a word at a time. */
2455 if (unoptab == one_cmpl_optab
2456 && class == MODE_INT
2457 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
2458 && unoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
2463 if (target == 0 || target == op0)
2464 target = gen_reg_rtx (mode);
2468 /* Do the actual arithmetic. */
2469 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
2471 rtx target_piece = operand_subword (target, i, 1, mode);
2472 rtx x = expand_unop (word_mode, unoptab,
2473 operand_subword_force (op0, i, mode),
2474 target_piece, unsignedp);
2476 if (target_piece != x)
2477 emit_move_insn (target_piece, x);
2480 insns = get_insns ();
2483 emit_no_conflict_block (insns, target, op0, NULL_RTX,
2484 gen_rtx_fmt_e (unoptab->code, mode,
2489 if (unoptab->code == NEG)
2491 /* Try negating floating point values by flipping the sign bit. */
2492 if (class == MODE_FLOAT)
2494 temp = expand_absneg_bit (NEG, mode, op0, target);
2499 /* If there is no negation pattern, and we have no negative zero,
2500 try subtracting from zero. */
2501 if (!HONOR_SIGNED_ZEROS (mode))
2503 temp = expand_binop (mode, (unoptab == negv_optab
2504 ? subv_optab : sub_optab),
2505 CONST0_RTX (mode), op0, target,
2506 unsignedp, OPTAB_DIRECT);
2512 /* Try calculating parity (x) as popcount (x) % 2. */
2513 if (unoptab == parity_optab)
2515 temp = expand_parity (mode, op0, target);
2521 /* Now try a library call in this mode. */
2522 if (unoptab->handlers[(int) mode].libfunc)
2526 enum machine_mode outmode = mode;
2528 /* All of these functions return small values. Thus we choose to
2529 have them return something that isn't a double-word. */
2530 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
2531 || unoptab == popcount_optab || unoptab == parity_optab)
2533 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node)));
2537 /* Pass 1 for NO_QUEUE so we don't lose any increments
2538 if the libcall is cse'd or moved. */
2539 value = emit_library_call_value (unoptab->handlers[(int) mode].libfunc,
2540 NULL_RTX, LCT_CONST, outmode,
2542 insns = get_insns ();
2545 target = gen_reg_rtx (outmode);
2546 emit_libcall_block (insns, target, value,
2547 gen_rtx_fmt_e (unoptab->code, mode, op0));
2552 /* It can't be done in this mode. Can we do it in a wider mode? */
2554 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2556 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
2557 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2559 if ((unoptab->handlers[(int) wider_mode].insn_code
2560 != CODE_FOR_nothing)
2561 || unoptab->handlers[(int) wider_mode].libfunc)
2565 /* For certain operations, we need not actually extend
2566 the narrow operand, as long as we will truncate the
2567 results to the same narrowness. */
2569 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2570 (unoptab == neg_optab
2571 || unoptab == one_cmpl_optab)
2572 && class == MODE_INT);
2574 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2577 /* If we are generating clz using wider mode, adjust the
2579 if (unoptab == clz_optab && temp != 0)
2580 temp = expand_binop (wider_mode, sub_optab, temp,
2581 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2582 - GET_MODE_BITSIZE (mode)),
2583 target, true, OPTAB_DIRECT);
2587 if (class != MODE_INT)
2590 target = gen_reg_rtx (mode);
2591 convert_move (target, temp, 0);
2595 return gen_lowpart (mode, temp);
2598 delete_insns_since (last);
2603 /* One final attempt at implementing negation via subtraction,
2604 this time allowing widening of the operand. */
2605 if (unoptab->code == NEG && !HONOR_SIGNED_ZEROS (mode))
2608 temp = expand_binop (mode,
2609 unoptab == negv_optab ? subv_optab : sub_optab,
2610 CONST0_RTX (mode), op0,
2611 target, unsignedp, OPTAB_LIB_WIDEN);
2619 /* Emit code to compute the absolute value of OP0, with result to
2620 TARGET if convenient. (TARGET may be 0.) The return value says
2621 where the result actually is to be found.
2623 MODE is the mode of the operand; the mode of the result is
2624 different but can be deduced from MODE.
2629 expand_abs_nojump (enum machine_mode mode, rtx op0, rtx target,
2630 int result_unsignedp)
2635 result_unsignedp = 1;
2637 /* First try to do it with a special abs instruction. */
2638 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
2643 /* For floating point modes, try clearing the sign bit. */
2644 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2646 temp = expand_absneg_bit (ABS, mode, op0, target);
2651 /* If we have a MAX insn, we can do this as MAX (x, -x). */
2652 if (smax_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
2653 && !HONOR_SIGNED_ZEROS (mode))
2655 rtx last = get_last_insn ();
2657 temp = expand_unop (mode, neg_optab, op0, NULL_RTX, 0);
2659 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
2665 delete_insns_since (last);
2668 /* If this machine has expensive jumps, we can do integer absolute
2669 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2670 where W is the width of MODE. */
2672 if (GET_MODE_CLASS (mode) == MODE_INT && BRANCH_COST >= 2)
2674 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
2675 size_int (GET_MODE_BITSIZE (mode) - 1),
2678 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
2681 temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab,
2682 temp, extended, target, 0, OPTAB_LIB_WIDEN);
2692 expand_abs (enum machine_mode mode, rtx op0, rtx target,
2693 int result_unsignedp, int safe)
2698 result_unsignedp = 1;
2700 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
2704 /* If that does not win, use conditional jump and negate. */
2706 /* It is safe to use the target if it is the same
2707 as the source if this is also a pseudo register */
2708 if (op0 == target && REG_P (op0)
2709 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
2712 op1 = gen_label_rtx ();
2713 if (target == 0 || ! safe
2714 || GET_MODE (target) != mode
2715 || (MEM_P (target) && MEM_VOLATILE_P (target))
2717 && REGNO (target) < FIRST_PSEUDO_REGISTER))
2718 target = gen_reg_rtx (mode);
2720 emit_move_insn (target, op0);
2723 /* If this mode is an integer too wide to compare properly,
2724 compare word by word. Rely on CSE to optimize constant cases. */
2725 if (GET_MODE_CLASS (mode) == MODE_INT
2726 && ! can_compare_p (GE, mode, ccp_jump))
2727 do_jump_by_parts_greater_rtx (mode, 0, target, const0_rtx,
2730 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
2731 NULL_RTX, NULL_RTX, op1);
2733 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
2736 emit_move_insn (target, op0);
2742 /* A subroutine of expand_copysign, perform the copysign operation using the
2743 abs and neg primitives advertised to exist on the target. The assumption
2744 is that we have a split register file, and leaving op0 in fp registers,
2745 and not playing with subregs so much, will help the register allocator. */
2748 expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target,
2749 int bitpos, bool op0_is_abs)
2751 enum machine_mode imode;
2752 HOST_WIDE_INT hi, lo;
2761 op0 = expand_unop (mode, abs_optab, op0, target, 0);
2768 if (target == NULL_RTX)
2769 target = copy_to_reg (op0);
2771 emit_move_insn (target, op0);
2774 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2776 imode = int_mode_for_mode (mode);
2777 if (imode == BLKmode)
2779 op1 = gen_lowpart (imode, op1);
2784 if (FLOAT_WORDS_BIG_ENDIAN)
2785 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2787 word = bitpos / BITS_PER_WORD;
2788 bitpos = bitpos % BITS_PER_WORD;
2789 op1 = operand_subword_force (op1, word, mode);
2792 if (bitpos < HOST_BITS_PER_WIDE_INT)
2795 lo = (HOST_WIDE_INT) 1 << bitpos;
2799 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2803 op1 = expand_binop (imode, and_optab, op1,
2804 immed_double_const (lo, hi, imode),
2805 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2807 label = gen_label_rtx ();
2808 emit_cmp_and_jump_insns (op1, const0_rtx, EQ, NULL_RTX, imode, 1, label);
2810 if (GET_CODE (op0) == CONST_DOUBLE)
2811 op0 = simplify_unary_operation (NEG, mode, op0, mode);
2813 op0 = expand_unop (mode, neg_optab, op0, target, 0);
2815 emit_move_insn (target, op0);
2823 /* A subroutine of expand_copysign, perform the entire copysign operation
2824 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
2825 is true if op0 is known to have its sign bit clear. */
2828 expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
2829 int bitpos, bool op0_is_abs)
2831 enum machine_mode imode;
2832 HOST_WIDE_INT hi, lo;
2833 int word, nwords, i;
2836 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2838 imode = int_mode_for_mode (mode);
2839 if (imode == BLKmode)
2848 if (FLOAT_WORDS_BIG_ENDIAN)
2849 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2851 word = bitpos / BITS_PER_WORD;
2852 bitpos = bitpos % BITS_PER_WORD;
2853 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2856 if (bitpos < HOST_BITS_PER_WIDE_INT)
2859 lo = (HOST_WIDE_INT) 1 << bitpos;
2863 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2867 if (target == 0 || target == op0 || target == op1)
2868 target = gen_reg_rtx (mode);
2874 for (i = 0; i < nwords; ++i)
2876 rtx targ_piece = operand_subword (target, i, 1, mode);
2877 rtx op0_piece = operand_subword_force (op0, i, mode);
2882 op0_piece = expand_binop (imode, and_optab, op0_piece,
2883 immed_double_const (~lo, ~hi, imode),
2884 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2886 op1 = expand_binop (imode, and_optab,
2887 operand_subword_force (op1, i, mode),
2888 immed_double_const (lo, hi, imode),
2889 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2891 temp = expand_binop (imode, ior_optab, op0_piece, op1,
2892 targ_piece, 1, OPTAB_LIB_WIDEN);
2893 if (temp != targ_piece)
2894 emit_move_insn (targ_piece, temp);
2897 emit_move_insn (targ_piece, op0_piece);
2900 insns = get_insns ();
2903 emit_no_conflict_block (insns, target, op0, op1, NULL_RTX);
2907 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
2908 immed_double_const (lo, hi, imode),
2909 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2911 op0 = gen_lowpart (imode, op0);
2913 op0 = expand_binop (imode, and_optab, op0,
2914 immed_double_const (~lo, ~hi, imode),
2915 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2917 temp = expand_binop (imode, ior_optab, op0, op1,
2918 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2919 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2925 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
2926 scalar floating point mode. Return NULL if we do not know how to
2927 expand the operation inline. */
2930 expand_copysign (rtx op0, rtx op1, rtx target)
2932 enum machine_mode mode = GET_MODE (op0);
2933 const struct real_format *fmt;
2937 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
2938 gcc_assert (GET_MODE (op1) == mode);
2940 /* First try to do it with a special instruction. */
2941 temp = expand_binop (mode, copysign_optab, op0, op1,
2942 target, 0, OPTAB_DIRECT);
2946 fmt = REAL_MODE_FORMAT (mode);
2947 if (fmt == NULL || !fmt->has_signed_zero)
2951 if (GET_CODE (op0) == CONST_DOUBLE)
2953 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
2954 op0 = simplify_unary_operation (ABS, mode, op0, mode);
2958 if (fmt->signbit_ro >= 0
2959 && (GET_CODE (op0) == CONST_DOUBLE
2960 || (neg_optab->handlers[mode].insn_code != CODE_FOR_nothing
2961 && abs_optab->handlers[mode].insn_code != CODE_FOR_nothing)))
2963 temp = expand_copysign_absneg (mode, op0, op1, target,
2964 fmt->signbit_ro, op0_is_abs);
2969 if (fmt->signbit_rw < 0)
2971 return expand_copysign_bit (mode, op0, op1, target,
2972 fmt->signbit_rw, op0_is_abs);
2975 /* Generate an instruction whose insn-code is INSN_CODE,
2976 with two operands: an output TARGET and an input OP0.
2977 TARGET *must* be nonzero, and the output is always stored there.
2978 CODE is an rtx code such that (CODE OP0) is an rtx that describes
2979 the value that is stored into TARGET. */
2982 emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code)
2985 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2990 /* Sign and zero extension from memory is often done specially on
2991 RISC machines, so forcing into a register here can pessimize
2993 if (flag_force_mem && code != SIGN_EXTEND && code != ZERO_EXTEND)
2994 op0 = force_not_mem (op0);
2996 /* Now, if insn does not accept our operands, put them into pseudos. */
2998 if (!insn_data[icode].operand[1].predicate (op0, mode0))
2999 op0 = copy_to_mode_reg (mode0, op0);
3001 if (!insn_data[icode].operand[0].predicate (temp, GET_MODE (temp))
3002 || (flag_force_mem && MEM_P (temp)))
3003 temp = gen_reg_rtx (GET_MODE (temp));
3005 pat = GEN_FCN (icode) (temp, op0);
3007 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && code != UNKNOWN)
3008 add_equal_note (pat, temp, code, op0, NULL_RTX);
3013 emit_move_insn (target, temp);
3016 struct no_conflict_data
3018 rtx target, first, insn;
3022 /* Called via note_stores by emit_no_conflict_block. Set P->must_stay
3023 if the currently examined clobber / store has to stay in the list of
3024 insns that constitute the actual no_conflict block. */
3026 no_conflict_move_test (rtx dest, rtx set, void *p0)
3028 struct no_conflict_data *p= p0;
3030 /* If this inns directly contributes to setting the target, it must stay. */
3031 if (reg_overlap_mentioned_p (p->target, dest))
3032 p->must_stay = true;
3033 /* If we haven't committed to keeping any other insns in the list yet,
3034 there is nothing more to check. */
3035 else if (p->insn == p->first)
3037 /* If this insn sets / clobbers a register that feeds one of the insns
3038 already in the list, this insn has to stay too. */
3039 else if (reg_mentioned_p (dest, PATTERN (p->first))
3040 || reg_used_between_p (dest, p->first, p->insn)
3041 /* Likewise if this insn depends on a register set by a previous
3042 insn in the list. */
3043 || (GET_CODE (set) == SET
3044 && (modified_in_p (SET_SRC (set), p->first)
3045 || modified_between_p (SET_SRC (set), p->first, p->insn))))
3046 p->must_stay = true;
3049 /* Emit code to perform a series of operations on a multi-word quantity, one
3052 Such a block is preceded by a CLOBBER of the output, consists of multiple
3053 insns, each setting one word of the output, and followed by a SET copying
3054 the output to itself.
3056 Each of the insns setting words of the output receives a REG_NO_CONFLICT
3057 note indicating that it doesn't conflict with the (also multi-word)
3058 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
3061 INSNS is a block of code generated to perform the operation, not including
3062 the CLOBBER and final copy. All insns that compute intermediate values
3063 are first emitted, followed by the block as described above.
3065 TARGET, OP0, and OP1 are the output and inputs of the operations,
3066 respectively. OP1 may be zero for a unary operation.
3068 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
3071 If TARGET is not a register, INSNS is simply emitted with no special
3072 processing. Likewise if anything in INSNS is not an INSN or if
3073 there is a libcall block inside INSNS.
3075 The final insn emitted is returned. */
3078 emit_no_conflict_block (rtx insns, rtx target, rtx op0, rtx op1, rtx equiv)
3080 rtx prev, next, first, last, insn;
3082 if (!REG_P (target) || reload_in_progress)
3083 return emit_insn (insns);
3085 for (insn = insns; insn; insn = NEXT_INSN (insn))
3086 if (!NONJUMP_INSN_P (insn)
3087 || find_reg_note (insn, REG_LIBCALL, NULL_RTX))
3088 return emit_insn (insns);
3090 /* First emit all insns that do not store into words of the output and remove
3091 these from the list. */
3092 for (insn = insns; insn; insn = next)
3095 struct no_conflict_data data;
3097 next = NEXT_INSN (insn);
3099 /* Some ports (cris) create a libcall regions at their own. We must
3100 avoid any potential nesting of LIBCALLs. */
3101 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3102 remove_note (insn, note);
3103 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3104 remove_note (insn, note);
3106 data.target = target;
3110 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3111 if (! data.must_stay)
3113 if (PREV_INSN (insn))
3114 NEXT_INSN (PREV_INSN (insn)) = next;
3119 PREV_INSN (next) = PREV_INSN (insn);
3125 prev = get_last_insn ();
3127 /* Now write the CLOBBER of the output, followed by the setting of each
3128 of the words, followed by the final copy. */
3129 if (target != op0 && target != op1)
3130 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
3132 for (insn = insns; insn; insn = next)
3134 next = NEXT_INSN (insn);
3137 if (op1 && REG_P (op1))
3138 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op1,
3141 if (op0 && REG_P (op0))
3142 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op0,
3146 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3147 != CODE_FOR_nothing)
3149 last = emit_move_insn (target, target);
3151 set_unique_reg_note (last, REG_EQUAL, equiv);
3155 last = get_last_insn ();
3157 /* Remove any existing REG_EQUAL note from "last", or else it will
3158 be mistaken for a note referring to the full contents of the
3159 alleged libcall value when found together with the REG_RETVAL
3160 note added below. An existing note can come from an insn
3161 expansion at "last". */
3162 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3166 first = get_insns ();
3168 first = NEXT_INSN (prev);
3170 /* Encapsulate the block so it gets manipulated as a unit. */
3171 REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
3173 REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first, REG_NOTES (last));
3178 /* Emit code to make a call to a constant function or a library call.
3180 INSNS is a list containing all insns emitted in the call.
3181 These insns leave the result in RESULT. Our block is to copy RESULT
3182 to TARGET, which is logically equivalent to EQUIV.
3184 We first emit any insns that set a pseudo on the assumption that these are
3185 loading constants into registers; doing so allows them to be safely cse'ed
3186 between blocks. Then we emit all the other insns in the block, followed by
3187 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3188 note with an operand of EQUIV.
3190 Moving assignments to pseudos outside of the block is done to improve
3191 the generated code, but is not required to generate correct code,
3192 hence being unable to move an assignment is not grounds for not making
3193 a libcall block. There are two reasons why it is safe to leave these
3194 insns inside the block: First, we know that these pseudos cannot be
3195 used in generated RTL outside the block since they are created for
3196 temporary purposes within the block. Second, CSE will not record the
3197 values of anything set inside a libcall block, so we know they must
3198 be dead at the end of the block.
3200 Except for the first group of insns (the ones setting pseudos), the
3201 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3204 emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv)
3206 rtx final_dest = target;
3207 rtx prev, next, first, last, insn;
3209 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3210 into a MEM later. Protect the libcall block from this change. */
3211 if (! REG_P (target) || REG_USERVAR_P (target))
3212 target = gen_reg_rtx (GET_MODE (target));
3214 /* If we're using non-call exceptions, a libcall corresponding to an
3215 operation that may trap may also trap. */
3216 if (flag_non_call_exceptions && may_trap_p (equiv))
3218 for (insn = insns; insn; insn = NEXT_INSN (insn))
3221 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3223 if (note != 0 && INTVAL (XEXP (note, 0)) <= 0)
3224 remove_note (insn, note);
3228 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3229 reg note to indicate that this call cannot throw or execute a nonlocal
3230 goto (unless there is already a REG_EH_REGION note, in which case
3232 for (insn = insns; insn; insn = NEXT_INSN (insn))
3235 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3238 XEXP (note, 0) = constm1_rtx;
3240 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx,
3244 /* First emit all insns that set pseudos. Remove them from the list as
3245 we go. Avoid insns that set pseudos which were referenced in previous
3246 insns. These can be generated by move_by_pieces, for example,
3247 to update an address. Similarly, avoid insns that reference things
3248 set in previous insns. */
3250 for (insn = insns; insn; insn = next)
3252 rtx set = single_set (insn);
3255 /* Some ports (cris) create a libcall regions at their own. We must
3256 avoid any potential nesting of LIBCALLs. */
3257 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3258 remove_note (insn, note);
3259 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3260 remove_note (insn, note);
3262 next = NEXT_INSN (insn);
3264 if (set != 0 && REG_P (SET_DEST (set))
3265 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
3267 || ((! INSN_P(insns)
3268 || ! reg_mentioned_p (SET_DEST (set), PATTERN (insns)))
3269 && ! reg_used_between_p (SET_DEST (set), insns, insn)
3270 && ! modified_in_p (SET_SRC (set), insns)
3271 && ! modified_between_p (SET_SRC (set), insns, insn))))
3273 if (PREV_INSN (insn))
3274 NEXT_INSN (PREV_INSN (insn)) = next;
3279 PREV_INSN (next) = PREV_INSN (insn);
3284 /* Some ports use a loop to copy large arguments onto the stack.
3285 Don't move anything outside such a loop. */
3290 prev = get_last_insn ();
3292 /* Write the remaining insns followed by the final copy. */
3294 for (insn = insns; insn; insn = next)
3296 next = NEXT_INSN (insn);
3301 last = emit_move_insn (target, result);
3302 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3303 != CODE_FOR_nothing)
3304 set_unique_reg_note (last, REG_EQUAL, copy_rtx (equiv));
3307 /* Remove any existing REG_EQUAL note from "last", or else it will
3308 be mistaken for a note referring to the full contents of the
3309 libcall value when found together with the REG_RETVAL note added
3310 below. An existing note can come from an insn expansion at
3312 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3315 if (final_dest != target)
3316 emit_move_insn (final_dest, target);
3319 first = get_insns ();
3321 first = NEXT_INSN (prev);
3323 /* Encapsulate the block so it gets manipulated as a unit. */
3324 if (!flag_non_call_exceptions || !may_trap_p (equiv))
3326 /* We can't attach the REG_LIBCALL and REG_RETVAL notes
3327 when the encapsulated region would not be in one basic block,
3328 i.e. when there is a control_flow_insn_p insn between FIRST and LAST.
3330 bool attach_libcall_retval_notes = true;
3331 next = NEXT_INSN (last);
3332 for (insn = first; insn != next; insn = NEXT_INSN (insn))
3333 if (control_flow_insn_p (insn))
3335 attach_libcall_retval_notes = false;
3339 if (attach_libcall_retval_notes)
3341 REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
3343 REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first,
3349 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3350 PURPOSE describes how this comparison will be used. CODE is the rtx
3351 comparison code we will be using.
3353 ??? Actually, CODE is slightly weaker than that. A target is still
3354 required to implement all of the normal bcc operations, but not
3355 required to implement all (or any) of the unordered bcc operations. */
3358 can_compare_p (enum rtx_code code, enum machine_mode mode,
3359 enum can_compare_purpose purpose)
3363 if (cmp_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3365 if (purpose == ccp_jump)
3366 return bcc_gen_fctn[(int) code] != NULL;
3367 else if (purpose == ccp_store_flag)
3368 return setcc_gen_code[(int) code] != CODE_FOR_nothing;
3370 /* There's only one cmov entry point, and it's allowed to fail. */
3373 if (purpose == ccp_jump
3374 && cbranch_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3376 if (purpose == ccp_cmov
3377 && cmov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3379 if (purpose == ccp_store_flag
3380 && cstore_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3382 mode = GET_MODE_WIDER_MODE (mode);
3384 while (mode != VOIDmode);
3389 /* This function is called when we are going to emit a compare instruction that
3390 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3392 *PMODE is the mode of the inputs (in case they are const_int).
3393 *PUNSIGNEDP nonzero says that the operands are unsigned;
3394 this matters if they need to be widened.
3396 If they have mode BLKmode, then SIZE specifies the size of both operands.
3398 This function performs all the setup necessary so that the caller only has
3399 to emit a single comparison insn. This setup can involve doing a BLKmode
3400 comparison or emitting a library call to perform the comparison if no insn
3401 is available to handle it.
3402 The values which are passed in through pointers can be modified; the caller
3403 should perform the comparison on the modified values. Constant
3404 comparisons must have already been folded. */
3407 prepare_cmp_insn (rtx *px, rtx *py, enum rtx_code *pcomparison, rtx size,
3408 enum machine_mode *pmode, int *punsignedp,
3409 enum can_compare_purpose purpose)
3411 enum machine_mode mode = *pmode;
3412 rtx x = *px, y = *py;
3413 int unsignedp = *punsignedp;
3414 enum mode_class class;
3416 class = GET_MODE_CLASS (mode);
3418 if (mode != BLKmode && flag_force_mem)
3420 /* Load duplicate non-volatile operands once. */
3421 if (rtx_equal_p (x, y) && ! volatile_refs_p (x))
3423 x = force_not_mem (x);
3428 x = force_not_mem (x);
3429 y = force_not_mem (y);
3433 /* If we are inside an appropriately-short loop and we are optimizing,
3434 force expensive constants into a register. */
3435 if (CONSTANT_P (x) && optimize
3436 && rtx_cost (x, COMPARE) > COSTS_N_INSNS (1))
3437 x = force_reg (mode, x);
3439 if (CONSTANT_P (y) && optimize
3440 && rtx_cost (y, COMPARE) > COSTS_N_INSNS (1))
3441 y = force_reg (mode, y);
3444 /* Make sure if we have a canonical comparison. The RTL
3445 documentation states that canonical comparisons are required only
3446 for targets which have cc0. */
3447 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
3450 /* Don't let both operands fail to indicate the mode. */
3451 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
3452 x = force_reg (mode, x);
3454 /* Handle all BLKmode compares. */
3456 if (mode == BLKmode)
3458 enum machine_mode cmp_mode, result_mode;
3459 enum insn_code cmp_code;
3464 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
3468 /* Try to use a memory block compare insn - either cmpstr
3469 or cmpmem will do. */
3470 for (cmp_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
3471 cmp_mode != VOIDmode;
3472 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode))
3474 cmp_code = cmpmem_optab[cmp_mode];
3475 if (cmp_code == CODE_FOR_nothing)
3476 cmp_code = cmpstr_optab[cmp_mode];
3477 if (cmp_code == CODE_FOR_nothing)
3480 /* Must make sure the size fits the insn's mode. */
3481 if ((GET_CODE (size) == CONST_INT
3482 && INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode)))
3483 || (GET_MODE_BITSIZE (GET_MODE (size))
3484 > GET_MODE_BITSIZE (cmp_mode)))
3487 result_mode = insn_data[cmp_code].operand[0].mode;
3488 result = gen_reg_rtx (result_mode);
3489 size = convert_to_mode (cmp_mode, size, 1);
3490 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
3494 *pmode = result_mode;
3498 /* Otherwise call a library function, memcmp. */
3499 libfunc = memcmp_libfunc;
3500 length_type = sizetype;
3501 result_mode = TYPE_MODE (integer_type_node);
3502 cmp_mode = TYPE_MODE (length_type);
3503 size = convert_to_mode (TYPE_MODE (length_type), size,
3504 TYPE_UNSIGNED (length_type));
3506 result = emit_library_call_value (libfunc, 0, LCT_PURE_MAKE_BLOCK,
3513 *pmode = result_mode;
3517 /* Don't allow operands to the compare to trap, as that can put the
3518 compare and branch in different basic blocks. */
3519 if (flag_non_call_exceptions)
3522 x = force_reg (mode, x);
3524 y = force_reg (mode, y);
3529 if (can_compare_p (*pcomparison, mode, purpose))
3532 /* Handle a lib call just for the mode we are using. */
3534 if (cmp_optab->handlers[(int) mode].libfunc && class != MODE_FLOAT)
3536 rtx libfunc = cmp_optab->handlers[(int) mode].libfunc;
3539 /* If we want unsigned, and this mode has a distinct unsigned
3540 comparison routine, use that. */
3541 if (unsignedp && ucmp_optab->handlers[(int) mode].libfunc)
3542 libfunc = ucmp_optab->handlers[(int) mode].libfunc;
3544 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST_MAKE_BLOCK,
3545 word_mode, 2, x, mode, y, mode);
3549 if (TARGET_LIB_INT_CMP_BIASED)
3550 /* Integer comparison returns a result that must be compared
3551 against 1, so that even if we do an unsigned compare
3552 afterward, there is still a value that can represent the
3553 result "less than". */
3563 gcc_assert (class == MODE_FLOAT);
3564 prepare_float_lib_cmp (px, py, pcomparison, pmode, punsignedp);
3567 /* Before emitting an insn with code ICODE, make sure that X, which is going
3568 to be used for operand OPNUM of the insn, is converted from mode MODE to
3569 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3570 that it is accepted by the operand predicate. Return the new value. */
3573 prepare_operand (int icode, rtx x, int opnum, enum machine_mode mode,
3574 enum machine_mode wider_mode, int unsignedp)
3576 if (mode != wider_mode)
3577 x = convert_modes (wider_mode, mode, x, unsignedp);
3579 if (!insn_data[icode].operand[opnum].predicate
3580 (x, insn_data[icode].operand[opnum].mode))
3584 x = copy_to_mode_reg (insn_data[icode].operand[opnum].mode, x);
3590 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3591 we can do the comparison.
3592 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3593 be NULL_RTX which indicates that only a comparison is to be generated. */
3596 emit_cmp_and_jump_insn_1 (rtx x, rtx y, enum machine_mode mode,
3597 enum rtx_code comparison, int unsignedp, rtx label)
3599 rtx test = gen_rtx_fmt_ee (comparison, mode, x, y);
3600 enum mode_class class = GET_MODE_CLASS (mode);
3601 enum machine_mode wider_mode = mode;
3603 /* Try combined insns first. */
3606 enum insn_code icode;
3607 PUT_MODE (test, wider_mode);
3611 icode = cbranch_optab->handlers[(int) wider_mode].insn_code;
3613 if (icode != CODE_FOR_nothing
3614 && insn_data[icode].operand[0].predicate (test, wider_mode))
3616 x = prepare_operand (icode, x, 1, mode, wider_mode, unsignedp);
3617 y = prepare_operand (icode, y, 2, mode, wider_mode, unsignedp);
3618 emit_jump_insn (GEN_FCN (icode) (test, x, y, label));
3623 /* Handle some compares against zero. */
3624 icode = (int) tst_optab->handlers[(int) wider_mode].insn_code;
3625 if (y == CONST0_RTX (mode) && icode != CODE_FOR_nothing)
3627 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
3628 emit_insn (GEN_FCN (icode) (x));
3630 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
3634 /* Handle compares for which there is a directly suitable insn. */
3636 icode = (int) cmp_optab->handlers[(int) wider_mode].insn_code;
3637 if (icode != CODE_FOR_nothing)
3639 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
3640 y = prepare_operand (icode, y, 1, mode, wider_mode, unsignedp);
3641 emit_insn (GEN_FCN (icode) (x, y));
3643 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
3647 if (class != MODE_INT && class != MODE_FLOAT
3648 && class != MODE_COMPLEX_FLOAT)
3651 wider_mode = GET_MODE_WIDER_MODE (wider_mode);
3653 while (wider_mode != VOIDmode);
3658 /* Generate code to compare X with Y so that the condition codes are
3659 set and to jump to LABEL if the condition is true. If X is a
3660 constant and Y is not a constant, then the comparison is swapped to
3661 ensure that the comparison RTL has the canonical form.
3663 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3664 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
3665 the proper branch condition code.
3667 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
3669 MODE is the mode of the inputs (in case they are const_int).
3671 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
3672 be passed unchanged to emit_cmp_insn, then potentially converted into an
3673 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
3676 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
3677 enum machine_mode mode, int unsignedp, rtx label)
3679 rtx op0 = x, op1 = y;
3681 /* Swap operands and condition to ensure canonical RTL. */
3682 if (swap_commutative_operands_p (x, y))
3684 /* If we're not emitting a branch, this means some caller
3689 comparison = swap_condition (comparison);
3693 /* If OP0 is still a constant, then both X and Y must be constants.
3694 Force X into a register to create canonical RTL. */
3695 if (CONSTANT_P (op0))
3696 op0 = force_reg (mode, op0);
3700 comparison = unsigned_condition (comparison);
3702 prepare_cmp_insn (&op0, &op1, &comparison, size, &mode, &unsignedp,
3704 emit_cmp_and_jump_insn_1 (op0, op1, mode, comparison, unsignedp, label);
3707 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
3710 emit_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
3711 enum machine_mode mode, int unsignedp)
3713 emit_cmp_and_jump_insns (x, y, comparison, size, mode, unsignedp, 0);
3716 /* Emit a library call comparison between floating point X and Y.
3717 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
3720 prepare_float_lib_cmp (rtx *px, rtx *py, enum rtx_code *pcomparison,
3721 enum machine_mode *pmode, int *punsignedp)
3723 enum rtx_code comparison = *pcomparison;
3724 enum rtx_code swapped = swap_condition (comparison);
3725 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
3728 enum machine_mode orig_mode = GET_MODE (x);
3729 enum machine_mode mode;
3730 rtx value, target, insns, equiv;
3732 bool reversed_p = false;
3734 for (mode = orig_mode; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
3736 if ((libfunc = code_to_optab[comparison]->handlers[mode].libfunc))
3739 if ((libfunc = code_to_optab[swapped]->handlers[mode].libfunc))
3742 tmp = x; x = y; y = tmp;
3743 comparison = swapped;
3747 if ((libfunc = code_to_optab[reversed]->handlers[mode].libfunc)
3748 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, reversed))
3750 comparison = reversed;
3756 gcc_assert (mode != VOIDmode);
3758 if (mode != orig_mode)
3760 x = convert_to_mode (mode, x, 0);
3761 y = convert_to_mode (mode, y, 0);
3764 /* Attach a REG_EQUAL note describing the semantics of the libcall to
3765 the RTL. The allows the RTL optimizers to delete the libcall if the
3766 condition can be determined at compile-time. */
3767 if (comparison == UNORDERED)
3769 rtx temp = simplify_gen_relational (NE, word_mode, mode, x, x);
3770 equiv = simplify_gen_relational (NE, word_mode, mode, y, y);
3771 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
3772 temp, const_true_rtx, equiv);
3776 equiv = simplify_gen_relational (comparison, word_mode, mode, x, y);
3777 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
3779 rtx true_rtx, false_rtx;
3784 true_rtx = const0_rtx;
3785 false_rtx = const_true_rtx;
3789 true_rtx = const_true_rtx;
3790 false_rtx = const0_rtx;
3794 true_rtx = const1_rtx;
3795 false_rtx = const0_rtx;
3799 true_rtx = const0_rtx;
3800 false_rtx = constm1_rtx;
3804 true_rtx = constm1_rtx;
3805 false_rtx = const0_rtx;
3809 true_rtx = const0_rtx;
3810 false_rtx = const1_rtx;
3816 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
3817 equiv, true_rtx, false_rtx);
3822 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
3823 word_mode, 2, x, mode, y, mode);
3824 insns = get_insns ();
3827 target = gen_reg_rtx (word_mode);
3828 emit_libcall_block (insns, target, value, equiv);
3830 if (comparison == UNORDERED
3831 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
3832 comparison = reversed_p ? EQ : NE;
3837 *pcomparison = comparison;
3841 /* Generate code to indirectly jump to a location given in the rtx LOC. */
3844 emit_indirect_jump (rtx loc)
3846 if (!insn_data[(int) CODE_FOR_indirect_jump].operand[0].predicate
3848 loc = copy_to_mode_reg (Pmode, loc);
3850 emit_jump_insn (gen_indirect_jump (loc));
3854 #ifdef HAVE_conditional_move
3856 /* Emit a conditional move instruction if the machine supports one for that
3857 condition and machine mode.
3859 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3860 the mode to use should they be constants. If it is VOIDmode, they cannot
3863 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
3864 should be stored there. MODE is the mode to use should they be constants.
3865 If it is VOIDmode, they cannot both be constants.
3867 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3868 is not supported. */
3871 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
3872 enum machine_mode cmode, rtx op2, rtx op3,
3873 enum machine_mode mode, int unsignedp)
3875 rtx tem, subtarget, comparison, insn;
3876 enum insn_code icode;
3877 enum rtx_code reversed;
3879 /* If one operand is constant, make it the second one. Only do this
3880 if the other operand is not constant as well. */
3882 if (swap_commutative_operands_p (op0, op1))
3887 code = swap_condition (code);
3890 /* get_condition will prefer to generate LT and GT even if the old
3891 comparison was against zero, so undo that canonicalization here since
3892 comparisons against zero are cheaper. */
3893 if (code == LT && op1 == const1_rtx)
3894 code = LE, op1 = const0_rtx;
3895 else if (code == GT && op1 == constm1_rtx)
3896 code = GE, op1 = const0_rtx;
3898 if (cmode == VOIDmode)
3899 cmode = GET_MODE (op0);
3901 if (swap_commutative_operands_p (op2, op3)
3902 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
3911 if (mode == VOIDmode)
3912 mode = GET_MODE (op2);
3914 icode = movcc_gen_code[mode];
3916 if (icode == CODE_FOR_nothing)
3921 op2 = force_not_mem (op2);
3922 op3 = force_not_mem (op3);
3926 target = gen_reg_rtx (mode);
3930 /* If the insn doesn't accept these operands, put them in pseudos. */
3932 if (!insn_data[icode].operand[0].predicate
3933 (subtarget, insn_data[icode].operand[0].mode))
3934 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
3936 if (!insn_data[icode].operand[2].predicate
3937 (op2, insn_data[icode].operand[2].mode))
3938 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
3940 if (!insn_data[icode].operand[3].predicate
3941 (op3, insn_data[icode].operand[3].mode))
3942 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
3944 /* Everything should now be in the suitable form, so emit the compare insn
3945 and then the conditional move. */
3948 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
3950 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
3951 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
3952 return NULL and let the caller figure out how best to deal with this
3954 if (GET_CODE (comparison) != code)
3957 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
3959 /* If that failed, then give up. */
3965 if (subtarget != target)
3966 convert_move (target, subtarget, 0);
3971 /* Return nonzero if a conditional move of mode MODE is supported.
3973 This function is for combine so it can tell whether an insn that looks
3974 like a conditional move is actually supported by the hardware. If we
3975 guess wrong we lose a bit on optimization, but that's it. */
3976 /* ??? sparc64 supports conditionally moving integers values based on fp
3977 comparisons, and vice versa. How do we handle them? */
3980 can_conditionally_move_p (enum machine_mode mode)
3982 if (movcc_gen_code[mode] != CODE_FOR_nothing)
3988 #endif /* HAVE_conditional_move */
3990 /* Emit a conditional addition instruction if the machine supports one for that
3991 condition and machine mode.
3993 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3994 the mode to use should they be constants. If it is VOIDmode, they cannot
3997 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
3998 should be stored there. MODE is the mode to use should they be constants.
3999 If it is VOIDmode, they cannot both be constants.
4001 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4002 is not supported. */
4005 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
4006 enum machine_mode cmode, rtx op2, rtx op3,
4007 enum machine_mode mode, int unsignedp)
4009 rtx tem, subtarget, comparison, insn;
4010 enum insn_code icode;
4011 enum rtx_code reversed;
4013 /* If one operand is constant, make it the second one. Only do this
4014 if the other operand is not constant as well. */
4016 if (swap_commutative_operands_p (op0, op1))
4021 code = swap_condition (code);
4024 /* get_condition will prefer to generate LT and GT even if the old
4025 comparison was against zero, so undo that canonicalization here since
4026 comparisons against zero are cheaper. */
4027 if (code == LT && op1 == const1_rtx)
4028 code = LE, op1 = const0_rtx;
4029 else if (code == GT && op1 == constm1_rtx)
4030 code = GE, op1 = const0_rtx;
4032 if (cmode == VOIDmode)
4033 cmode = GET_MODE (op0);
4035 if (swap_commutative_operands_p (op2, op3)
4036 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4045 if (mode == VOIDmode)
4046 mode = GET_MODE (op2);
4048 icode = addcc_optab->handlers[(int) mode].insn_code;
4050 if (icode == CODE_FOR_nothing)
4055 op2 = force_not_mem (op2);
4056 op3 = force_not_mem (op3);
4060 target = gen_reg_rtx (mode);
4062 /* If the insn doesn't accept these operands, put them in pseudos. */
4064 if (!insn_data[icode].operand[0].predicate
4065 (target, insn_data[icode].operand[0].mode))
4066 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4070 if (!insn_data[icode].operand[2].predicate
4071 (op2, insn_data[icode].operand[2].mode))
4072 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4074 if (!insn_data[icode].operand[3].predicate
4075 (op3, insn_data[icode].operand[3].mode))
4076 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4078 /* Everything should now be in the suitable form, so emit the compare insn
4079 and then the conditional move. */
4082 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4084 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4085 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4086 return NULL and let the caller figure out how best to deal with this
4088 if (GET_CODE (comparison) != code)
4091 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4093 /* If that failed, then give up. */
4099 if (subtarget != target)
4100 convert_move (target, subtarget, 0);
4105 /* These functions attempt to generate an insn body, rather than
4106 emitting the insn, but if the gen function already emits them, we
4107 make no attempt to turn them back into naked patterns. */
4109 /* Generate and return an insn body to add Y to X. */
4112 gen_add2_insn (rtx x, rtx y)
4114 int icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4116 gcc_assert (insn_data[icode].operand[0].predicate
4117 (x, insn_data[icode].operand[0].mode));
4118 gcc_assert (insn_data[icode].operand[1].predicate
4119 (x, insn_data[icode].operand[1].mode));
4120 gcc_assert (insn_data[icode].operand[2].predicate
4121 (y, insn_data[icode].operand[2].mode));
4123 return GEN_FCN (icode) (x, x, y);
4126 /* Generate and return an insn body to add r1 and c,
4127 storing the result in r0. */
4129 gen_add3_insn (rtx r0, rtx r1, rtx c)
4131 int icode = (int) add_optab->handlers[(int) GET_MODE (r0)].insn_code;
4133 if (icode == CODE_FOR_nothing
4134 || !(insn_data[icode].operand[0].predicate
4135 (r0, insn_data[icode].operand[0].mode))
4136 || !(insn_data[icode].operand[1].predicate
4137 (r1, insn_data[icode].operand[1].mode))
4138 || !(insn_data[icode].operand[2].predicate
4139 (c, insn_data[icode].operand[2].mode)))
4142 return GEN_FCN (icode) (r0, r1, c);
4146 have_add2_insn (rtx x, rtx y)
4150 gcc_assert (GET_MODE (x) != VOIDmode);
4152 icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4154 if (icode == CODE_FOR_nothing)
4157 if (!(insn_data[icode].operand[0].predicate
4158 (x, insn_data[icode].operand[0].mode))
4159 || !(insn_data[icode].operand[1].predicate
4160 (x, insn_data[icode].operand[1].mode))
4161 || !(insn_data[icode].operand[2].predicate
4162 (y, insn_data[icode].operand[2].mode)))
4168 /* Generate and return an insn body to subtract Y from X. */
4171 gen_sub2_insn (rtx x, rtx y)
4173 int icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4175 gcc_assert (insn_data[icode].operand[0].predicate
4176 (x, insn_data[icode].operand[0].mode));
4177 gcc_assert (insn_data[icode].operand[1].predicate
4178 (x, insn_data[icode].operand[1].mode));
4179 gcc_assert (insn_data[icode].operand[2].predicate
4180 (y, insn_data[icode].operand[2].mode));
4182 return GEN_FCN (icode) (x, x, y);
4185 /* Generate and return an insn body to subtract r1 and c,
4186 storing the result in r0. */
4188 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4190 int icode = (int) sub_optab->handlers[(int) GET_MODE (r0)].insn_code;
4192 if (icode == CODE_FOR_nothing
4193 || !(insn_data[icode].operand[0].predicate
4194 (r0, insn_data[icode].operand[0].mode))
4195 || !(insn_data[icode].operand[1].predicate
4196 (r1, insn_data[icode].operand[1].mode))
4197 || !(insn_data[icode].operand[2].predicate
4198 (c, insn_data[icode].operand[2].mode)))
4201 return GEN_FCN (icode) (r0, r1, c);
4205 have_sub2_insn (rtx x, rtx y)
4209 gcc_assert (GET_MODE (x) != VOIDmode);
4211 icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4213 if (icode == CODE_FOR_nothing)
4216 if (!(insn_data[icode].operand[0].predicate
4217 (x, insn_data[icode].operand[0].mode))
4218 || !(insn_data[icode].operand[1].predicate
4219 (x, insn_data[icode].operand[1].mode))
4220 || !(insn_data[icode].operand[2].predicate
4221 (y, insn_data[icode].operand[2].mode)))
4227 /* Generate the body of an instruction to copy Y into X.
4228 It may be a list of insns, if one insn isn't enough. */
4231 gen_move_insn (rtx x, rtx y)
4236 emit_move_insn_1 (x, y);
4242 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4243 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4244 no such operation exists, CODE_FOR_nothing will be returned. */
4247 can_extend_p (enum machine_mode to_mode, enum machine_mode from_mode,
4251 #ifdef HAVE_ptr_extend
4253 return CODE_FOR_ptr_extend;
4256 tab = unsignedp ? zext_optab : sext_optab;
4257 return tab->handlers[to_mode][from_mode].insn_code;
4260 /* Generate the body of an insn to extend Y (with mode MFROM)
4261 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4264 gen_extend_insn (rtx x, rtx y, enum machine_mode mto,
4265 enum machine_mode mfrom, int unsignedp)
4267 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
4268 return GEN_FCN (icode) (x, y);
4271 /* can_fix_p and can_float_p say whether the target machine
4272 can directly convert a given fixed point type to
4273 a given floating point type, or vice versa.
4274 The returned value is the CODE_FOR_... value to use,
4275 or CODE_FOR_nothing if these modes cannot be directly converted.
4277 *TRUNCP_PTR is set to 1 if it is necessary to output
4278 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4280 static enum insn_code
4281 can_fix_p (enum machine_mode fixmode, enum machine_mode fltmode,
4282 int unsignedp, int *truncp_ptr)
4285 enum insn_code icode;
4287 tab = unsignedp ? ufixtrunc_optab : sfixtrunc_optab;
4288 icode = tab->handlers[fixmode][fltmode].insn_code;
4289 if (icode != CODE_FOR_nothing)
4295 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4296 for this to work. We need to rework the fix* and ftrunc* patterns
4297 and documentation. */
4298 tab = unsignedp ? ufix_optab : sfix_optab;
4299 icode = tab->handlers[fixmode][fltmode].insn_code;
4300 if (icode != CODE_FOR_nothing
4301 && ftrunc_optab->handlers[fltmode].insn_code != CODE_FOR_nothing)
4308 return CODE_FOR_nothing;
4311 static enum insn_code
4312 can_float_p (enum machine_mode fltmode, enum machine_mode fixmode,
4317 tab = unsignedp ? ufloat_optab : sfloat_optab;
4318 return tab->handlers[fltmode][fixmode].insn_code;
4321 /* Generate code to convert FROM to floating point
4322 and store in TO. FROM must be fixed point and not VOIDmode.
4323 UNSIGNEDP nonzero means regard FROM as unsigned.
4324 Normally this is done by correcting the final value
4325 if it is negative. */
4328 expand_float (rtx to, rtx from, int unsignedp)
4330 enum insn_code icode;
4332 enum machine_mode fmode, imode;
4334 /* Crash now, because we won't be able to decide which mode to use. */
4335 gcc_assert (GET_MODE (from) != VOIDmode);
4337 /* Look for an insn to do the conversion. Do it in the specified
4338 modes if possible; otherwise convert either input, output or both to
4339 wider mode. If the integer mode is wider than the mode of FROM,
4340 we can do the conversion signed even if the input is unsigned. */
4342 for (fmode = GET_MODE (to); fmode != VOIDmode;
4343 fmode = GET_MODE_WIDER_MODE (fmode))
4344 for (imode = GET_MODE (from); imode != VOIDmode;
4345 imode = GET_MODE_WIDER_MODE (imode))
4347 int doing_unsigned = unsignedp;
4349 if (fmode != GET_MODE (to)
4350 && significand_size (fmode) < GET_MODE_BITSIZE (GET_MODE (from)))
4353 icode = can_float_p (fmode, imode, unsignedp);
4354 if (icode == CODE_FOR_nothing && imode != GET_MODE (from) && unsignedp)
4355 icode = can_float_p (fmode, imode, 0), doing_unsigned = 0;
4357 if (icode != CODE_FOR_nothing)
4359 if (imode != GET_MODE (from))
4360 from = convert_to_mode (imode, from, unsignedp);
4362 if (fmode != GET_MODE (to))
4363 target = gen_reg_rtx (fmode);
4365 emit_unop_insn (icode, target, from,
4366 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
4369 convert_move (to, target, 0);
4374 /* Unsigned integer, and no way to convert directly.
4375 Convert as signed, then conditionally adjust the result. */
4378 rtx label = gen_label_rtx ();
4380 REAL_VALUE_TYPE offset;
4383 from = force_not_mem (from);
4385 /* Look for a usable floating mode FMODE wider than the source and at
4386 least as wide as the target. Using FMODE will avoid rounding woes
4387 with unsigned values greater than the signed maximum value. */
4389 for (fmode = GET_MODE (to); fmode != VOIDmode;
4390 fmode = GET_MODE_WIDER_MODE (fmode))
4391 if (GET_MODE_BITSIZE (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
4392 && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
4395 if (fmode == VOIDmode)
4397 /* There is no such mode. Pretend the target is wide enough. */
4398 fmode = GET_MODE (to);
4400 /* Avoid double-rounding when TO is narrower than FROM. */
4401 if ((significand_size (fmode) + 1)
4402 < GET_MODE_BITSIZE (GET_MODE (from)))
4405 rtx neglabel = gen_label_rtx ();
4407 /* Don't use TARGET if it isn't a register, is a hard register,
4408 or is the wrong mode. */
4410 || REGNO (target) < FIRST_PSEUDO_REGISTER
4411 || GET_MODE (target) != fmode)
4412 target = gen_reg_rtx (fmode);
4414 imode = GET_MODE (from);
4415 do_pending_stack_adjust ();
4417 /* Test whether the sign bit is set. */
4418 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
4421 /* The sign bit is not set. Convert as signed. */
4422 expand_float (target, from, 0);
4423 emit_jump_insn (gen_jump (label));
4426 /* The sign bit is set.
4427 Convert to a usable (positive signed) value by shifting right
4428 one bit, while remembering if a nonzero bit was shifted
4429 out; i.e., compute (from & 1) | (from >> 1). */
4431 emit_label (neglabel);
4432 temp = expand_binop (imode, and_optab, from, const1_rtx,
4433 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4434 temp1 = expand_shift (RSHIFT_EXPR, imode, from, integer_one_node,
4436 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
4438 expand_float (target, temp, 0);
4440 /* Multiply by 2 to undo the shift above. */
4441 temp = expand_binop (fmode, add_optab, target, target,
4442 target, 0, OPTAB_LIB_WIDEN);
4444 emit_move_insn (target, temp);
4446 do_pending_stack_adjust ();
4452 /* If we are about to do some arithmetic to correct for an
4453 unsigned operand, do it in a pseudo-register. */
4455 if (GET_MODE (to) != fmode
4456 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
4457 target = gen_reg_rtx (fmode);
4459 /* Convert as signed integer to floating. */
4460 expand_float (target, from, 0);
4462 /* If FROM is negative (and therefore TO is negative),
4463 correct its value by 2**bitwidth. */
4465 do_pending_stack_adjust ();
4466 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from),
4470 real_2expN (&offset, GET_MODE_BITSIZE (GET_MODE (from)));
4471 temp = expand_binop (fmode, add_optab, target,
4472 CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode),
4473 target, 0, OPTAB_LIB_WIDEN);
4475 emit_move_insn (target, temp);
4477 do_pending_stack_adjust ();
4482 /* No hardware instruction available; call a library routine. */
4487 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
4489 if (GET_MODE_SIZE (GET_MODE (from)) < GET_MODE_SIZE (SImode))
4490 from = convert_to_mode (SImode, from, unsignedp);
4493 from = force_not_mem (from);
4495 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
4496 gcc_assert (libfunc);
4500 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4501 GET_MODE (to), 1, from,
4503 insns = get_insns ();
4506 emit_libcall_block (insns, target, value,
4507 gen_rtx_FLOAT (GET_MODE (to), from));
4512 /* Copy result to requested destination
4513 if we have been computing in a temp location. */
4517 if (GET_MODE (target) == GET_MODE (to))
4518 emit_move_insn (to, target);
4520 convert_move (to, target, 0);
4524 /* Generate code to convert FROM to fixed point and store in TO. FROM
4525 must be floating point. */
4528 expand_fix (rtx to, rtx from, int unsignedp)
4530 enum insn_code icode;
4532 enum machine_mode fmode, imode;
4535 /* We first try to find a pair of modes, one real and one integer, at
4536 least as wide as FROM and TO, respectively, in which we can open-code
4537 this conversion. If the integer mode is wider than the mode of TO,
4538 we can do the conversion either signed or unsigned. */
4540 for (fmode = GET_MODE (from); fmode != VOIDmode;
4541 fmode = GET_MODE_WIDER_MODE (fmode))
4542 for (imode = GET_MODE (to); imode != VOIDmode;
4543 imode = GET_MODE_WIDER_MODE (imode))
4545 int doing_unsigned = unsignedp;
4547 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
4548 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
4549 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
4551 if (icode != CODE_FOR_nothing)
4553 if (fmode != GET_MODE (from))
4554 from = convert_to_mode (fmode, from, 0);
4558 rtx temp = gen_reg_rtx (GET_MODE (from));
4559 from = expand_unop (GET_MODE (from), ftrunc_optab, from,
4563 if (imode != GET_MODE (to))
4564 target = gen_reg_rtx (imode);
4566 emit_unop_insn (icode, target, from,
4567 doing_unsigned ? UNSIGNED_FIX : FIX);
4569 convert_move (to, target, unsignedp);
4574 /* For an unsigned conversion, there is one more way to do it.
4575 If we have a signed conversion, we generate code that compares
4576 the real value to the largest representable positive number. If if
4577 is smaller, the conversion is done normally. Otherwise, subtract
4578 one plus the highest signed number, convert, and add it back.
4580 We only need to check all real modes, since we know we didn't find
4581 anything with a wider integer mode.
4583 This code used to extend FP value into mode wider than the destination.
4584 This is not needed. Consider, for instance conversion from SFmode
4587 The hot path trought the code is dealing with inputs smaller than 2^63
4588 and doing just the conversion, so there is no bits to lose.
4590 In the other path we know the value is positive in the range 2^63..2^64-1
4591 inclusive. (as for other imput overflow happens and result is undefined)
4592 So we know that the most important bit set in mantissa corresponds to
4593 2^63. The subtraction of 2^63 should not generate any rounding as it
4594 simply clears out that bit. The rest is trivial. */
4596 if (unsignedp && GET_MODE_BITSIZE (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
4597 for (fmode = GET_MODE (from); fmode != VOIDmode;
4598 fmode = GET_MODE_WIDER_MODE (fmode))
4599 if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0,
4603 REAL_VALUE_TYPE offset;
4604 rtx limit, lab1, lab2, insn;
4606 bitsize = GET_MODE_BITSIZE (GET_MODE (to));
4607 real_2expN (&offset, bitsize - 1);
4608 limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode);
4609 lab1 = gen_label_rtx ();
4610 lab2 = gen_label_rtx ();
4613 from = force_not_mem (from);
4615 if (fmode != GET_MODE (from))
4616 from = convert_to_mode (fmode, from, 0);
4618 /* See if we need to do the subtraction. */
4619 do_pending_stack_adjust ();
4620 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from),
4623 /* If not, do the signed "fix" and branch around fixup code. */
4624 expand_fix (to, from, 0);
4625 emit_jump_insn (gen_jump (lab2));
4628 /* Otherwise, subtract 2**(N-1), convert to signed number,
4629 then add 2**(N-1). Do the addition using XOR since this
4630 will often generate better code. */
4632 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
4633 NULL_RTX, 0, OPTAB_LIB_WIDEN);
4634 expand_fix (to, target, 0);
4635 target = expand_binop (GET_MODE (to), xor_optab, to,
4637 ((HOST_WIDE_INT) 1 << (bitsize - 1),
4639 to, 1, OPTAB_LIB_WIDEN);
4642 emit_move_insn (to, target);
4646 if (mov_optab->handlers[(int) GET_MODE (to)].insn_code
4647 != CODE_FOR_nothing)
4649 /* Make a place for a REG_NOTE and add it. */
4650 insn = emit_move_insn (to, to);
4651 set_unique_reg_note (insn,
4653 gen_rtx_fmt_e (UNSIGNED_FIX,
4661 /* We can't do it with an insn, so use a library call. But first ensure
4662 that the mode of TO is at least as wide as SImode, since those are the
4663 only library calls we know about. */
4665 if (GET_MODE_SIZE (GET_MODE (to)) < GET_MODE_SIZE (SImode))
4667 target = gen_reg_rtx (SImode);
4669 expand_fix (target, from, unsignedp);
4677 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
4678 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
4679 gcc_assert (libfunc);
4682 from = force_not_mem (from);
4686 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4687 GET_MODE (to), 1, from,
4689 insns = get_insns ();
4692 emit_libcall_block (insns, target, value,
4693 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
4694 GET_MODE (to), from));
4699 if (GET_MODE (to) == GET_MODE (target))
4700 emit_move_insn (to, target);
4702 convert_move (to, target, 0);
4706 /* Report whether we have an instruction to perform the operation
4707 specified by CODE on operands of mode MODE. */
4709 have_insn_for (enum rtx_code code, enum machine_mode mode)
4711 return (code_to_optab[(int) code] != 0
4712 && (code_to_optab[(int) code]->handlers[(int) mode].insn_code
4713 != CODE_FOR_nothing));
4716 /* Create a blank optab. */
4721 optab op = ggc_alloc (sizeof (struct optab));
4722 for (i = 0; i < NUM_MACHINE_MODES; i++)
4724 op->handlers[i].insn_code = CODE_FOR_nothing;
4725 op->handlers[i].libfunc = 0;
4731 static convert_optab
4732 new_convert_optab (void)
4735 convert_optab op = ggc_alloc (sizeof (struct convert_optab));
4736 for (i = 0; i < NUM_MACHINE_MODES; i++)
4737 for (j = 0; j < NUM_MACHINE_MODES; j++)
4739 op->handlers[i][j].insn_code = CODE_FOR_nothing;
4740 op->handlers[i][j].libfunc = 0;
4745 /* Same, but fill in its code as CODE, and write it into the
4746 code_to_optab table. */
4748 init_optab (enum rtx_code code)
4750 optab op = new_optab ();
4752 code_to_optab[(int) code] = op;
4756 /* Same, but fill in its code as CODE, and do _not_ write it into
4757 the code_to_optab table. */
4759 init_optabv (enum rtx_code code)
4761 optab op = new_optab ();
4766 /* Conversion optabs never go in the code_to_optab table. */
4767 static inline convert_optab
4768 init_convert_optab (enum rtx_code code)
4770 convert_optab op = new_convert_optab ();
4775 /* Initialize the libfunc fields of an entire group of entries in some
4776 optab. Each entry is set equal to a string consisting of a leading
4777 pair of underscores followed by a generic operation name followed by
4778 a mode name (downshifted to lowercase) followed by a single character
4779 representing the number of operands for the given operation (which is
4780 usually one of the characters '2', '3', or '4').
4782 OPTABLE is the table in which libfunc fields are to be initialized.
4783 FIRST_MODE is the first machine mode index in the given optab to
4785 LAST_MODE is the last machine mode index in the given optab to
4787 OPNAME is the generic (string) name of the operation.
4788 SUFFIX is the character which specifies the number of operands for
4789 the given generic operation.
4793 init_libfuncs (optab optable, int first_mode, int last_mode,
4794 const char *opname, int suffix)
4797 unsigned opname_len = strlen (opname);
4799 for (mode = first_mode; (int) mode <= (int) last_mode;
4800 mode = (enum machine_mode) ((int) mode + 1))
4802 const char *mname = GET_MODE_NAME (mode);
4803 unsigned mname_len = strlen (mname);
4804 char *libfunc_name = alloca (2 + opname_len + mname_len + 1 + 1);
4811 for (q = opname; *q; )
4813 for (q = mname; *q; q++)
4814 *p++ = TOLOWER (*q);
4818 optable->handlers[(int) mode].libfunc
4819 = init_one_libfunc (ggc_alloc_string (libfunc_name, p - libfunc_name));
4823 /* Initialize the libfunc fields of an entire group of entries in some
4824 optab which correspond to all integer mode operations. The parameters
4825 have the same meaning as similarly named ones for the `init_libfuncs'
4826 routine. (See above). */
4829 init_integral_libfuncs (optab optable, const char *opname, int suffix)
4831 int maxsize = 2*BITS_PER_WORD;
4832 if (maxsize < LONG_LONG_TYPE_SIZE)
4833 maxsize = LONG_LONG_TYPE_SIZE;
4834 init_libfuncs (optable, word_mode,
4835 mode_for_size (maxsize, MODE_INT, 0),
4839 /* Initialize the libfunc fields of an entire group of entries in some
4840 optab which correspond to all real mode operations. The parameters
4841 have the same meaning as similarly named ones for the `init_libfuncs'
4842 routine. (See above). */
4845 init_floating_libfuncs (optab optable, const char *opname, int suffix)
4847 init_libfuncs (optable, MIN_MODE_FLOAT, MAX_MODE_FLOAT, opname, suffix);
4850 /* Initialize the libfunc fields of an entire group of entries of an
4851 inter-mode-class conversion optab. The string formation rules are
4852 similar to the ones for init_libfuncs, above, but instead of having
4853 a mode name and an operand count these functions have two mode names
4854 and no operand count. */
4856 init_interclass_conv_libfuncs (convert_optab tab, const char *opname,
4857 enum mode_class from_class,
4858 enum mode_class to_class)
4860 enum machine_mode first_from_mode = GET_CLASS_NARROWEST_MODE (from_class);
4861 enum machine_mode first_to_mode = GET_CLASS_NARROWEST_MODE (to_class);
4862 size_t opname_len = strlen (opname);
4863 size_t max_mname_len = 0;
4865 enum machine_mode fmode, tmode;
4866 const char *fname, *tname;
4868 char *libfunc_name, *suffix;
4871 for (fmode = first_from_mode;
4873 fmode = GET_MODE_WIDER_MODE (fmode))
4874 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (fmode)));
4876 for (tmode = first_to_mode;
4878 tmode = GET_MODE_WIDER_MODE (tmode))
4879 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (tmode)));
4881 libfunc_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
4882 libfunc_name[0] = '_';
4883 libfunc_name[1] = '_';
4884 memcpy (&libfunc_name[2], opname, opname_len);
4885 suffix = libfunc_name + opname_len + 2;
4887 for (fmode = first_from_mode; fmode != VOIDmode;
4888 fmode = GET_MODE_WIDER_MODE (fmode))
4889 for (tmode = first_to_mode; tmode != VOIDmode;
4890 tmode = GET_MODE_WIDER_MODE (tmode))
4892 fname = GET_MODE_NAME (fmode);
4893 tname = GET_MODE_NAME (tmode);
4896 for (q = fname; *q; p++, q++)
4898 for (q = tname; *q; p++, q++)
4903 tab->handlers[tmode][fmode].libfunc
4904 = init_one_libfunc (ggc_alloc_string (libfunc_name,
4909 /* Initialize the libfunc fields of an entire group of entries of an
4910 intra-mode-class conversion optab. The string formation rules are
4911 similar to the ones for init_libfunc, above. WIDENING says whether
4912 the optab goes from narrow to wide modes or vice versa. These functions
4913 have two mode names _and_ an operand count. */
4915 init_intraclass_conv_libfuncs (convert_optab tab, const char *opname,
4916 enum mode_class class, bool widening)
4918 enum machine_mode first_mode = GET_CLASS_NARROWEST_MODE (class);
4919 size_t opname_len = strlen (opname);
4920 size_t max_mname_len = 0;
4922 enum machine_mode nmode, wmode;
4923 const char *nname, *wname;
4925 char *libfunc_name, *suffix;
4928 for (nmode = first_mode; nmode != VOIDmode;
4929 nmode = GET_MODE_WIDER_MODE (nmode))
4930 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (nmode)));
4932 libfunc_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
4933 libfunc_name[0] = '_';
4934 libfunc_name[1] = '_';
4935 memcpy (&libfunc_name[2], opname, opname_len);
4936 suffix = libfunc_name + opname_len + 2;
4938 for (nmode = first_mode; nmode != VOIDmode;
4939 nmode = GET_MODE_WIDER_MODE (nmode))
4940 for (wmode = GET_MODE_WIDER_MODE (nmode); wmode != VOIDmode;
4941 wmode = GET_MODE_WIDER_MODE (wmode))
4943 nname = GET_MODE_NAME (nmode);
4944 wname = GET_MODE_NAME (wmode);
4947 for (q = widening ? nname : wname; *q; p++, q++)
4949 for (q = widening ? wname : nname; *q; p++, q++)
4955 tab->handlers[widening ? wmode : nmode]
4956 [widening ? nmode : wmode].libfunc
4957 = init_one_libfunc (ggc_alloc_string (libfunc_name,
4964 init_one_libfunc (const char *name)
4968 /* Create a FUNCTION_DECL that can be passed to
4969 targetm.encode_section_info. */
4970 /* ??? We don't have any type information except for this is
4971 a function. Pretend this is "int foo()". */
4972 tree decl = build_decl (FUNCTION_DECL, get_identifier (name),
4973 build_function_type (integer_type_node, NULL_TREE));
4974 DECL_ARTIFICIAL (decl) = 1;
4975 DECL_EXTERNAL (decl) = 1;
4976 TREE_PUBLIC (decl) = 1;
4978 symbol = XEXP (DECL_RTL (decl), 0);
4980 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
4981 are the flags assigned by targetm.encode_section_info. */
4982 SYMBOL_REF_DECL (symbol) = 0;
4987 /* Call this to reset the function entry for one optab (OPTABLE) in mode
4988 MODE to NAME, which should be either 0 or a string constant. */
4990 set_optab_libfunc (optab optable, enum machine_mode mode, const char *name)
4993 optable->handlers[mode].libfunc = init_one_libfunc (name);
4995 optable->handlers[mode].libfunc = 0;
4998 /* Call this to reset the function entry for one conversion optab
4999 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
5000 either 0 or a string constant. */
5002 set_conv_libfunc (convert_optab optable, enum machine_mode tmode,
5003 enum machine_mode fmode, const char *name)
5006 optable->handlers[tmode][fmode].libfunc = init_one_libfunc (name);
5008 optable->handlers[tmode][fmode].libfunc = 0;
5011 /* Call this once to initialize the contents of the optabs
5012 appropriately for the current target machine. */
5019 /* Start by initializing all tables to contain CODE_FOR_nothing. */
5021 for (i = 0; i < NUM_RTX_CODE; i++)
5022 setcc_gen_code[i] = CODE_FOR_nothing;
5024 #ifdef HAVE_conditional_move
5025 for (i = 0; i < NUM_MACHINE_MODES; i++)
5026 movcc_gen_code[i] = CODE_FOR_nothing;
5029 for (i = 0; i < NUM_MACHINE_MODES; i++)
5031 vcond_gen_code[i] = CODE_FOR_nothing;
5032 vcondu_gen_code[i] = CODE_FOR_nothing;
5035 add_optab = init_optab (PLUS);
5036 addv_optab = init_optabv (PLUS);
5037 sub_optab = init_optab (MINUS);
5038 subv_optab = init_optabv (MINUS);
5039 smul_optab = init_optab (MULT);
5040 smulv_optab = init_optabv (MULT);
5041 smul_highpart_optab = init_optab (UNKNOWN);
5042 umul_highpart_optab = init_optab (UNKNOWN);
5043 smul_widen_optab = init_optab (UNKNOWN);
5044 umul_widen_optab = init_optab (UNKNOWN);
5045 sdiv_optab = init_optab (DIV);
5046 sdivv_optab = init_optabv (DIV);
5047 sdivmod_optab = init_optab (UNKNOWN);
5048 udiv_optab = init_optab (UDIV);
5049 udivmod_optab = init_optab (UNKNOWN);
5050 smod_optab = init_optab (MOD);
5051 umod_optab = init_optab (UMOD);
5052 fmod_optab = init_optab (UNKNOWN);
5053 drem_optab = init_optab (UNKNOWN);
5054 ftrunc_optab = init_optab (UNKNOWN);
5055 and_optab = init_optab (AND);
5056 ior_optab = init_optab (IOR);
5057 xor_optab = init_optab (XOR);
5058 ashl_optab = init_optab (ASHIFT);
5059 ashr_optab = init_optab (ASHIFTRT);
5060 lshr_optab = init_optab (LSHIFTRT);
5061 rotl_optab = init_optab (ROTATE);
5062 rotr_optab = init_optab (ROTATERT);
5063 smin_optab = init_optab (SMIN);
5064 smax_optab = init_optab (SMAX);
5065 umin_optab = init_optab (UMIN);
5066 umax_optab = init_optab (UMAX);
5067 pow_optab = init_optab (UNKNOWN);
5068 atan2_optab = init_optab (UNKNOWN);
5070 /* These three have codes assigned exclusively for the sake of
5072 mov_optab = init_optab (SET);
5073 movstrict_optab = init_optab (STRICT_LOW_PART);
5074 cmp_optab = init_optab (COMPARE);
5076 ucmp_optab = init_optab (UNKNOWN);
5077 tst_optab = init_optab (UNKNOWN);
5079 eq_optab = init_optab (EQ);
5080 ne_optab = init_optab (NE);
5081 gt_optab = init_optab (GT);
5082 ge_optab = init_optab (GE);
5083 lt_optab = init_optab (LT);
5084 le_optab = init_optab (LE);
5085 unord_optab = init_optab (UNORDERED);
5087 neg_optab = init_optab (NEG);
5088 negv_optab = init_optabv (NEG);
5089 abs_optab = init_optab (ABS);
5090 absv_optab = init_optabv (ABS);
5091 addcc_optab = init_optab (UNKNOWN);
5092 one_cmpl_optab = init_optab (NOT);
5093 ffs_optab = init_optab (FFS);
5094 clz_optab = init_optab (CLZ);
5095 ctz_optab = init_optab (CTZ);
5096 popcount_optab = init_optab (POPCOUNT);
5097 parity_optab = init_optab (PARITY);
5098 sqrt_optab = init_optab (SQRT);
5099 floor_optab = init_optab (UNKNOWN);
5100 lfloor_optab = init_optab (UNKNOWN);
5101 ceil_optab = init_optab (UNKNOWN);
5102 lceil_optab = init_optab (UNKNOWN);
5103 round_optab = init_optab (UNKNOWN);
5104 btrunc_optab = init_optab (UNKNOWN);
5105 nearbyint_optab = init_optab (UNKNOWN);
5106 rint_optab = init_optab (UNKNOWN);
5107 lrint_optab = init_optab (UNKNOWN);
5108 sincos_optab = init_optab (UNKNOWN);
5109 sin_optab = init_optab (UNKNOWN);
5110 asin_optab = init_optab (UNKNOWN);
5111 cos_optab = init_optab (UNKNOWN);
5112 acos_optab = init_optab (UNKNOWN);
5113 exp_optab = init_optab (UNKNOWN);
5114 exp10_optab = init_optab (UNKNOWN);
5115 exp2_optab = init_optab (UNKNOWN);
5116 expm1_optab = init_optab (UNKNOWN);
5117 ldexp_optab = init_optab (UNKNOWN);
5118 logb_optab = init_optab (UNKNOWN);
5119 ilogb_optab = init_optab (UNKNOWN);
5120 log_optab = init_optab (UNKNOWN);
5121 log10_optab = init_optab (UNKNOWN);
5122 log2_optab = init_optab (UNKNOWN);
5123 log1p_optab = init_optab (UNKNOWN);
5124 tan_optab = init_optab (UNKNOWN);
5125 atan_optab = init_optab (UNKNOWN);
5126 copysign_optab = init_optab (UNKNOWN);
5128 strlen_optab = init_optab (UNKNOWN);
5129 cbranch_optab = init_optab (UNKNOWN);
5130 cmov_optab = init_optab (UNKNOWN);
5131 cstore_optab = init_optab (UNKNOWN);
5132 push_optab = init_optab (UNKNOWN);
5134 reduc_smax_optab = init_optab (UNKNOWN);
5135 reduc_umax_optab = init_optab (UNKNOWN);
5136 reduc_smin_optab = init_optab (UNKNOWN);
5137 reduc_umin_optab = init_optab (UNKNOWN);
5138 reduc_splus_optab = init_optab (UNKNOWN);
5139 reduc_uplus_optab = init_optab (UNKNOWN);
5141 vec_extract_optab = init_optab (UNKNOWN);
5142 vec_set_optab = init_optab (UNKNOWN);
5143 vec_init_optab = init_optab (UNKNOWN);
5144 vec_shl_optab = init_optab (UNKNOWN);
5145 vec_shr_optab = init_optab (UNKNOWN);
5146 vec_realign_load_optab = init_optab (UNKNOWN);
5147 movmisalign_optab = init_optab (UNKNOWN);
5149 powi_optab = init_optab (UNKNOWN);
5152 sext_optab = init_convert_optab (SIGN_EXTEND);
5153 zext_optab = init_convert_optab (ZERO_EXTEND);
5154 trunc_optab = init_convert_optab (TRUNCATE);
5155 sfix_optab = init_convert_optab (FIX);
5156 ufix_optab = init_convert_optab (UNSIGNED_FIX);
5157 sfixtrunc_optab = init_convert_optab (UNKNOWN);
5158 ufixtrunc_optab = init_convert_optab (UNKNOWN);
5159 sfloat_optab = init_convert_optab (FLOAT);
5160 ufloat_optab = init_convert_optab (UNSIGNED_FLOAT);
5162 for (i = 0; i < NUM_MACHINE_MODES; i++)
5164 movmem_optab[i] = CODE_FOR_nothing;
5165 clrmem_optab[i] = CODE_FOR_nothing;
5166 cmpstr_optab[i] = CODE_FOR_nothing;
5167 cmpmem_optab[i] = CODE_FOR_nothing;
5169 sync_add_optab[i] = CODE_FOR_nothing;
5170 sync_sub_optab[i] = CODE_FOR_nothing;
5171 sync_ior_optab[i] = CODE_FOR_nothing;
5172 sync_and_optab[i] = CODE_FOR_nothing;
5173 sync_xor_optab[i] = CODE_FOR_nothing;
5174 sync_nand_optab[i] = CODE_FOR_nothing;
5175 sync_old_add_optab[i] = CODE_FOR_nothing;
5176 sync_old_sub_optab[i] = CODE_FOR_nothing;
5177 sync_old_ior_optab[i] = CODE_FOR_nothing;
5178 sync_old_and_optab[i] = CODE_FOR_nothing;
5179 sync_old_xor_optab[i] = CODE_FOR_nothing;
5180 sync_old_nand_optab[i] = CODE_FOR_nothing;
5181 sync_new_add_optab[i] = CODE_FOR_nothing;
5182 sync_new_sub_optab[i] = CODE_FOR_nothing;
5183 sync_new_ior_optab[i] = CODE_FOR_nothing;
5184 sync_new_and_optab[i] = CODE_FOR_nothing;
5185 sync_new_xor_optab[i] = CODE_FOR_nothing;
5186 sync_new_nand_optab[i] = CODE_FOR_nothing;
5187 sync_compare_and_swap[i] = CODE_FOR_nothing;
5188 sync_compare_and_swap_cc[i] = CODE_FOR_nothing;
5189 sync_lock_test_and_set[i] = CODE_FOR_nothing;
5190 sync_lock_release[i] = CODE_FOR_nothing;
5192 #ifdef HAVE_SECONDARY_RELOADS
5193 reload_in_optab[i] = reload_out_optab[i] = CODE_FOR_nothing;
5197 /* Fill in the optabs with the insns we support. */
5200 /* Initialize the optabs with the names of the library functions. */
5201 init_integral_libfuncs (add_optab, "add", '3');
5202 init_floating_libfuncs (add_optab, "add", '3');
5203 init_integral_libfuncs (addv_optab, "addv", '3');
5204 init_floating_libfuncs (addv_optab, "add", '3');
5205 init_integral_libfuncs (sub_optab, "sub", '3');
5206 init_floating_libfuncs (sub_optab, "sub", '3');
5207 init_integral_libfuncs (subv_optab, "subv", '3');
5208 init_floating_libfuncs (subv_optab, "sub", '3');
5209 init_integral_libfuncs (smul_optab, "mul", '3');
5210 init_floating_libfuncs (smul_optab, "mul", '3');
5211 init_integral_libfuncs (smulv_optab, "mulv", '3');
5212 init_floating_libfuncs (smulv_optab, "mul", '3');
5213 init_integral_libfuncs (sdiv_optab, "div", '3');
5214 init_floating_libfuncs (sdiv_optab, "div", '3');
5215 init_integral_libfuncs (sdivv_optab, "divv", '3');
5216 init_integral_libfuncs (udiv_optab, "udiv", '3');
5217 init_integral_libfuncs (sdivmod_optab, "divmod", '4');
5218 init_integral_libfuncs (udivmod_optab, "udivmod", '4');
5219 init_integral_libfuncs (smod_optab, "mod", '3');
5220 init_integral_libfuncs (umod_optab, "umod", '3');
5221 init_floating_libfuncs (ftrunc_optab, "ftrunc", '2');
5222 init_integral_libfuncs (and_optab, "and", '3');
5223 init_integral_libfuncs (ior_optab, "ior", '3');
5224 init_integral_libfuncs (xor_optab, "xor", '3');
5225 init_integral_libfuncs (ashl_optab, "ashl", '3');
5226 init_integral_libfuncs (ashr_optab, "ashr", '3');
5227 init_integral_libfuncs (lshr_optab, "lshr", '3');
5228 init_integral_libfuncs (smin_optab, "min", '3');
5229 init_floating_libfuncs (smin_optab, "min", '3');
5230 init_integral_libfuncs (smax_optab, "max", '3');
5231 init_floating_libfuncs (smax_optab, "max", '3');
5232 init_integral_libfuncs (umin_optab, "umin", '3');
5233 init_integral_libfuncs (umax_optab, "umax", '3');
5234 init_integral_libfuncs (neg_optab, "neg", '2');
5235 init_floating_libfuncs (neg_optab, "neg", '2');
5236 init_integral_libfuncs (negv_optab, "negv", '2');
5237 init_floating_libfuncs (negv_optab, "neg", '2');
5238 init_integral_libfuncs (one_cmpl_optab, "one_cmpl", '2');
5239 init_integral_libfuncs (ffs_optab, "ffs", '2');
5240 init_integral_libfuncs (clz_optab, "clz", '2');
5241 init_integral_libfuncs (ctz_optab, "ctz", '2');
5242 init_integral_libfuncs (popcount_optab, "popcount", '2');
5243 init_integral_libfuncs (parity_optab, "parity", '2');
5245 /* Comparison libcalls for integers MUST come in pairs,
5247 init_integral_libfuncs (cmp_optab, "cmp", '2');
5248 init_integral_libfuncs (ucmp_optab, "ucmp", '2');
5249 init_floating_libfuncs (cmp_optab, "cmp", '2');
5251 /* EQ etc are floating point only. */
5252 init_floating_libfuncs (eq_optab, "eq", '2');
5253 init_floating_libfuncs (ne_optab, "ne", '2');
5254 init_floating_libfuncs (gt_optab, "gt", '2');
5255 init_floating_libfuncs (ge_optab, "ge", '2');
5256 init_floating_libfuncs (lt_optab, "lt", '2');
5257 init_floating_libfuncs (le_optab, "le", '2');
5258 init_floating_libfuncs (unord_optab, "unord", '2');
5260 init_floating_libfuncs (powi_optab, "powi", '2');
5263 init_interclass_conv_libfuncs (sfloat_optab, "float",
5264 MODE_INT, MODE_FLOAT);
5265 init_interclass_conv_libfuncs (sfix_optab, "fix",
5266 MODE_FLOAT, MODE_INT);
5267 init_interclass_conv_libfuncs (ufix_optab, "fixuns",
5268 MODE_FLOAT, MODE_INT);
5270 /* sext_optab is also used for FLOAT_EXTEND. */
5271 init_intraclass_conv_libfuncs (sext_optab, "extend", MODE_FLOAT, true);
5272 init_intraclass_conv_libfuncs (trunc_optab, "trunc", MODE_FLOAT, false);
5274 /* Use cabs for double complex abs, since systems generally have cabs.
5275 Don't define any libcall for float complex, so that cabs will be used. */
5276 if (complex_double_type_node)
5277 abs_optab->handlers[TYPE_MODE (complex_double_type_node)].libfunc
5278 = init_one_libfunc ("cabs");
5280 /* The ffs function operates on `int'. */
5281 ffs_optab->handlers[(int) mode_for_size (INT_TYPE_SIZE, MODE_INT, 0)].libfunc
5282 = init_one_libfunc ("ffs");
5284 abort_libfunc = init_one_libfunc ("abort");
5285 memcpy_libfunc = init_one_libfunc ("memcpy");
5286 memmove_libfunc = init_one_libfunc ("memmove");
5287 memcmp_libfunc = init_one_libfunc ("memcmp");
5288 memset_libfunc = init_one_libfunc ("memset");
5289 setbits_libfunc = init_one_libfunc ("__setbits");
5291 unwind_resume_libfunc = init_one_libfunc (USING_SJLJ_EXCEPTIONS
5292 ? "_Unwind_SjLj_Resume"
5293 : "_Unwind_Resume");
5294 #ifndef DONT_USE_BUILTIN_SETJMP
5295 setjmp_libfunc = init_one_libfunc ("__builtin_setjmp");
5296 longjmp_libfunc = init_one_libfunc ("__builtin_longjmp");
5298 setjmp_libfunc = init_one_libfunc ("setjmp");
5299 longjmp_libfunc = init_one_libfunc ("longjmp");
5301 unwind_sjlj_register_libfunc = init_one_libfunc ("_Unwind_SjLj_Register");
5302 unwind_sjlj_unregister_libfunc
5303 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5305 /* For function entry/exit instrumentation. */
5306 profile_function_entry_libfunc
5307 = init_one_libfunc ("__cyg_profile_func_enter");
5308 profile_function_exit_libfunc
5309 = init_one_libfunc ("__cyg_profile_func_exit");
5311 gcov_flush_libfunc = init_one_libfunc ("__gcov_flush");
5313 if (HAVE_conditional_trap)
5314 trap_rtx = gen_rtx_fmt_ee (EQ, VOIDmode, NULL_RTX, NULL_RTX);
5316 /* Allow the target to add more libcalls or rename some, etc. */
5317 targetm.init_libfuncs ();
5322 /* Print information about the current contents of the optabs on
5326 debug_optab_libfuncs (void)
5332 /* Dump the arithmetic optabs. */
5333 for (i = 0; i != (int) OTI_MAX; i++)
5334 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5337 struct optab_handlers *h;
5340 h = &o->handlers[j];
5343 gcc_assert (GET_CODE (h->libfunc) = SYMBOL_REF);
5344 fprintf (stderr, "%s\t%s:\t%s\n",
5345 GET_RTX_NAME (o->code),
5347 XSTR (h->libfunc, 0));
5351 /* Dump the conversion optabs. */
5352 for (i = 0; i < (int) CTI_MAX; ++i)
5353 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5354 for (k = 0; k < NUM_MACHINE_MODES; ++k)
5357 struct optab_handlers *h;
5359 o = &convert_optab_table[i];
5360 h = &o->handlers[j][k];
5363 gcc_assert (GET_CODE (h->libfunc) = SYMBOL_REF);
5364 fprintf (stderr, "%s\t%s\t%s:\t%s\n",
5365 GET_RTX_NAME (o->code),
5368 XSTR (h->libfunc, 0));
5376 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5377 CODE. Return 0 on failure. */
5380 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED, rtx op1,
5381 rtx op2 ATTRIBUTE_UNUSED, rtx tcode ATTRIBUTE_UNUSED)
5383 enum machine_mode mode = GET_MODE (op1);
5384 enum insn_code icode;
5387 if (!HAVE_conditional_trap)
5390 if (mode == VOIDmode)
5393 icode = cmp_optab->handlers[(int) mode].insn_code;
5394 if (icode == CODE_FOR_nothing)
5398 op1 = prepare_operand (icode, op1, 0, mode, mode, 0);
5399 op2 = prepare_operand (icode, op2, 1, mode, mode, 0);
5405 emit_insn (GEN_FCN (icode) (op1, op2));
5407 PUT_CODE (trap_rtx, code);
5408 gcc_assert (HAVE_conditional_trap);
5409 insn = gen_conditional_trap (trap_rtx, tcode);
5413 insn = get_insns ();
5420 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5421 or unsigned operation code. */
5423 static enum rtx_code
5424 get_rtx_code (enum tree_code tcode, bool unsignedp)
5436 code = unsignedp ? LTU : LT;
5439 code = unsignedp ? LEU : LE;
5442 code = unsignedp ? GTU : GT;
5445 code = unsignedp ? GEU : GE;
5448 case UNORDERED_EXPR:
5479 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
5480 unsigned operators. Do not generate compare instruction. */
5483 vector_compare_rtx (tree cond, bool unsignedp, enum insn_code icode)
5485 enum rtx_code rcode;
5487 rtx rtx_op0, rtx_op1;
5489 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
5490 ensures that condition is a relational operation. */
5491 gcc_assert (COMPARISON_CLASS_P (cond));
5493 rcode = get_rtx_code (TREE_CODE (cond), unsignedp);
5494 t_op0 = TREE_OPERAND (cond, 0);
5495 t_op1 = TREE_OPERAND (cond, 1);
5497 /* Expand operands. */
5498 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)), 1);
5499 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)), 1);
5501 if (!insn_data[icode].operand[4].predicate (rtx_op0, GET_MODE (rtx_op0))
5502 && GET_MODE (rtx_op0) != VOIDmode)
5503 rtx_op0 = force_reg (GET_MODE (rtx_op0), rtx_op0);
5505 if (!insn_data[icode].operand[5].predicate (rtx_op1, GET_MODE (rtx_op1))
5506 && GET_MODE (rtx_op1) != VOIDmode)
5507 rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1);
5509 return gen_rtx_fmt_ee (rcode, VOIDmode, rtx_op0, rtx_op1);
5512 /* Return insn code for VEC_COND_EXPR EXPR. */
5514 static inline enum insn_code
5515 get_vcond_icode (tree expr, enum machine_mode mode)
5517 enum insn_code icode = CODE_FOR_nothing;
5519 if (TYPE_UNSIGNED (TREE_TYPE (expr)))
5520 icode = vcondu_gen_code[mode];
5522 icode = vcond_gen_code[mode];
5526 /* Return TRUE iff, appropriate vector insns are available
5527 for vector cond expr expr in VMODE mode. */
5530 expand_vec_cond_expr_p (tree expr, enum machine_mode vmode)
5532 if (get_vcond_icode (expr, vmode) == CODE_FOR_nothing)
5537 /* Generate insns for VEC_COND_EXPR. */
5540 expand_vec_cond_expr (tree vec_cond_expr, rtx target)
5542 enum insn_code icode;
5543 rtx comparison, rtx_op1, rtx_op2, cc_op0, cc_op1;
5544 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_cond_expr));
5545 bool unsignedp = TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr));
5547 icode = get_vcond_icode (vec_cond_expr, mode);
5548 if (icode == CODE_FOR_nothing)
5552 target = gen_reg_rtx (mode);
5554 /* Get comparison rtx. First expand both cond expr operands. */
5555 comparison = vector_compare_rtx (TREE_OPERAND (vec_cond_expr, 0),
5557 cc_op0 = XEXP (comparison, 0);
5558 cc_op1 = XEXP (comparison, 1);
5559 /* Expand both operands and force them in reg, if required. */
5560 rtx_op1 = expand_expr (TREE_OPERAND (vec_cond_expr, 1),
5561 NULL_RTX, VOIDmode, 1);
5562 if (!insn_data[icode].operand[1].predicate (rtx_op1, mode)
5563 && mode != VOIDmode)
5564 rtx_op1 = force_reg (mode, rtx_op1);
5566 rtx_op2 = expand_expr (TREE_OPERAND (vec_cond_expr, 2),
5567 NULL_RTX, VOIDmode, 1);
5568 if (!insn_data[icode].operand[2].predicate (rtx_op2, mode)
5569 && mode != VOIDmode)
5570 rtx_op2 = force_reg (mode, rtx_op2);
5572 /* Emit instruction! */
5573 emit_insn (GEN_FCN (icode) (target, rtx_op1, rtx_op2,
5574 comparison, cc_op0, cc_op1));
5580 /* This is an internal subroutine of the other compare_and_swap expanders.
5581 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
5582 operation. TARGET is an optional place to store the value result of
5583 the operation. ICODE is the particular instruction to expand. Return
5584 the result of the operation. */
5587 expand_val_compare_and_swap_1 (rtx mem, rtx old_val, rtx new_val,
5588 rtx target, enum insn_code icode)
5590 enum machine_mode mode = GET_MODE (mem);
5593 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
5594 target = gen_reg_rtx (mode);
5596 if (GET_MODE (old_val) != VOIDmode && GET_MODE (old_val) != mode)
5597 old_val = convert_modes (mode, GET_MODE (old_val), old_val, 1);
5598 if (!insn_data[icode].operand[2].predicate (old_val, mode))
5599 old_val = force_reg (mode, old_val);
5601 if (GET_MODE (new_val) != VOIDmode && GET_MODE (new_val) != mode)
5602 new_val = convert_modes (mode, GET_MODE (new_val), new_val, 1);
5603 if (!insn_data[icode].operand[3].predicate (new_val, mode))
5604 new_val = force_reg (mode, new_val);
5606 insn = GEN_FCN (icode) (target, mem, old_val, new_val);
5607 if (insn == NULL_RTX)
5614 /* Expand a compare-and-swap operation and return its value. */
5617 expand_val_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
5619 enum machine_mode mode = GET_MODE (mem);
5620 enum insn_code icode = sync_compare_and_swap[mode];
5622 if (icode == CODE_FOR_nothing)
5625 return expand_val_compare_and_swap_1 (mem, old_val, new_val, target, icode);
5628 /* Expand a compare-and-swap operation and store true into the result if
5629 the operation was successful and false otherwise. Return the result.
5630 Unlike other routines, TARGET is not optional. */
5633 expand_bool_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
5635 enum machine_mode mode = GET_MODE (mem);
5636 enum insn_code icode;
5637 rtx subtarget, label0, label1;
5639 /* If the target supports a compare-and-swap pattern that simultaneously
5640 sets some flag for success, then use it. Otherwise use the regular
5641 compare-and-swap and follow that immediately with a compare insn. */
5642 icode = sync_compare_and_swap_cc[mode];
5646 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
5648 if (subtarget != NULL_RTX)
5652 case CODE_FOR_nothing:
5653 icode = sync_compare_and_swap[mode];
5654 if (icode == CODE_FOR_nothing)
5657 /* Ensure that if old_val == mem, that we're not comparing
5658 against an old value. */
5659 if (MEM_P (old_val))
5660 old_val = force_reg (mode, old_val);
5662 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
5664 if (subtarget == NULL_RTX)
5667 emit_cmp_insn (subtarget, old_val, EQ, const0_rtx, mode, true);
5670 /* If the target has a sane STORE_FLAG_VALUE, then go ahead and use a
5671 setcc instruction from the beginning. We don't work too hard here,
5672 but it's nice to not be stupid about initial code gen either. */
5673 if (STORE_FLAG_VALUE == 1)
5675 icode = setcc_gen_code[EQ];
5676 if (icode != CODE_FOR_nothing)
5678 enum machine_mode cmode = insn_data[icode].operand[0].mode;
5682 if (!insn_data[icode].operand[0].predicate (target, cmode))
5683 subtarget = gen_reg_rtx (cmode);
5685 insn = GEN_FCN (icode) (subtarget);
5689 if (GET_MODE (target) != GET_MODE (subtarget))
5691 convert_move (target, subtarget, 1);
5699 /* Without an appropriate setcc instruction, use a set of branches to
5700 get 1 and 0 stored into target. Presumably if the target has a
5701 STORE_FLAG_VALUE that isn't 1, then this will get cleaned up by ifcvt. */
5703 label0 = gen_label_rtx ();
5704 label1 = gen_label_rtx ();
5706 emit_jump_insn (bcc_gen_fctn[EQ] (label0));
5707 emit_move_insn (target, const0_rtx);
5708 emit_jump_insn (gen_jump (label1));
5710 emit_label (label0);
5711 emit_move_insn (target, const1_rtx);
5712 emit_label (label1);
5717 /* This is a helper function for the other atomic operations. This function
5718 emits a loop that contains SEQ that iterates until a compare-and-swap
5719 operation at the end succeeds. MEM is the memory to be modified. SEQ is
5720 a set of instructions that takes a value from OLD_REG as an input and
5721 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
5722 set to the current contents of MEM. After SEQ, a compare-and-swap will
5723 attempt to update MEM with NEW_REG. The function returns true when the
5724 loop was generated successfully. */
5727 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
5729 enum machine_mode mode = GET_MODE (mem);
5730 enum insn_code icode;
5731 rtx label, cmp_reg, subtarget;
5733 /* The loop we want to generate looks like
5739 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
5740 if (cmp_reg != old_reg)
5743 Note that we only do the plain load from memory once. Subsequent
5744 iterations use the value loaded by the compare-and-swap pattern. */
5746 label = gen_label_rtx ();
5747 cmp_reg = gen_reg_rtx (mode);
5749 emit_move_insn (cmp_reg, mem);
5751 emit_move_insn (old_reg, cmp_reg);
5755 /* If the target supports a compare-and-swap pattern that simultaneously
5756 sets some flag for success, then use it. Otherwise use the regular
5757 compare-and-swap and follow that immediately with a compare insn. */
5758 icode = sync_compare_and_swap_cc[mode];
5762 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
5764 if (subtarget != NULL_RTX)
5766 gcc_assert (subtarget == cmp_reg);
5771 case CODE_FOR_nothing:
5772 icode = sync_compare_and_swap[mode];
5773 if (icode == CODE_FOR_nothing)
5776 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
5778 if (subtarget == NULL_RTX)
5780 if (subtarget != cmp_reg)
5781 emit_move_insn (cmp_reg, subtarget);
5783 emit_cmp_insn (cmp_reg, old_reg, EQ, const0_rtx, mode, true);
5786 /* ??? Mark this jump predicted not taken? */
5787 emit_jump_insn (bcc_gen_fctn[NE] (label));
5792 /* This function generates the atomic operation MEM CODE= VAL. In this
5793 case, we do not care about any resulting value. Returns NULL if we
5794 cannot generate the operation. */
5797 expand_sync_operation (rtx mem, rtx val, enum rtx_code code)
5799 enum machine_mode mode = GET_MODE (mem);
5800 enum insn_code icode;
5803 /* Look to see if the target supports the operation directly. */
5807 icode = sync_add_optab[mode];
5810 icode = sync_ior_optab[mode];
5813 icode = sync_xor_optab[mode];
5816 icode = sync_and_optab[mode];
5819 icode = sync_nand_optab[mode];
5823 icode = sync_sub_optab[mode];
5824 if (icode == CODE_FOR_nothing)
5826 icode = sync_add_optab[mode];
5827 if (icode != CODE_FOR_nothing)
5829 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
5839 /* Generate the direct operation, if present. */
5840 if (icode != CODE_FOR_nothing)
5842 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
5843 val = convert_modes (mode, GET_MODE (val), val, 1);
5844 if (!insn_data[icode].operand[1].predicate (val, mode))
5845 val = force_reg (mode, val);
5847 insn = GEN_FCN (icode) (mem, val);
5855 /* Failing that, generate a compare-and-swap loop in which we perform the
5856 operation with normal arithmetic instructions. */
5857 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
5859 rtx t0 = gen_reg_rtx (mode), t1;
5866 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
5869 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
5870 true, OPTAB_LIB_WIDEN);
5872 insn = get_insns ();
5875 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
5882 /* This function generates the atomic operation MEM CODE= VAL. In this
5883 case, we do care about the resulting value: if AFTER is true then
5884 return the value MEM holds after the operation, if AFTER is false
5885 then return the value MEM holds before the operation. TARGET is an
5886 optional place for the result value to be stored. */
5889 expand_sync_fetch_operation (rtx mem, rtx val, enum rtx_code code,
5890 bool after, rtx target)
5892 enum machine_mode mode = GET_MODE (mem);
5893 enum insn_code old_code, new_code, icode;
5897 /* Look to see if the target supports the operation directly. */
5901 old_code = sync_old_add_optab[mode];
5902 new_code = sync_new_add_optab[mode];
5905 old_code = sync_old_ior_optab[mode];
5906 new_code = sync_new_ior_optab[mode];
5909 old_code = sync_old_xor_optab[mode];
5910 new_code = sync_new_xor_optab[mode];
5913 old_code = sync_old_and_optab[mode];
5914 new_code = sync_new_and_optab[mode];
5917 old_code = sync_old_nand_optab[mode];
5918 new_code = sync_new_nand_optab[mode];
5922 old_code = sync_old_sub_optab[mode];
5923 new_code = sync_new_sub_optab[mode];
5924 if (old_code == CODE_FOR_nothing && new_code == CODE_FOR_nothing)
5926 old_code = sync_old_add_optab[mode];
5927 new_code = sync_new_add_optab[mode];
5928 if (old_code != CODE_FOR_nothing || new_code != CODE_FOR_nothing)
5930 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
5940 /* If the target does supports the proper new/old operation, great. But
5941 if we only support the opposite old/new operation, check to see if we
5942 can compensate. In the case in which the old value is supported, then
5943 we can always perform the operation again with normal arithmetic. In
5944 the case in which the new value is supported, then we can only handle
5945 this in the case the operation is reversible. */
5950 if (icode == CODE_FOR_nothing)
5953 if (icode != CODE_FOR_nothing)
5960 if (icode == CODE_FOR_nothing
5961 && (code == PLUS || code == MINUS || code == XOR))
5964 if (icode != CODE_FOR_nothing)
5969 /* If we found something supported, great. */
5970 if (icode != CODE_FOR_nothing)
5972 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
5973 target = gen_reg_rtx (mode);
5975 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
5976 val = convert_modes (mode, GET_MODE (val), val, 1);
5977 if (!insn_data[icode].operand[2].predicate (val, mode))
5978 val = force_reg (mode, val);
5980 insn = GEN_FCN (icode) (target, mem, val);
5985 /* If we need to compensate for using an operation with the
5986 wrong return value, do so now. */
5993 else if (code == MINUS)
5998 target = expand_simple_unop (mode, NOT, target, NULL_RTX, true);
5999 target = expand_simple_binop (mode, code, target, val, NULL_RTX,
6000 true, OPTAB_LIB_WIDEN);
6007 /* Failing that, generate a compare-and-swap loop in which we perform the
6008 operation with normal arithmetic instructions. */
6009 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6011 rtx t0 = gen_reg_rtx (mode), t1;
6013 if (!target || !register_operand (target, mode))
6014 target = gen_reg_rtx (mode);
6019 emit_move_insn (target, t0);
6023 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
6026 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
6027 true, OPTAB_LIB_WIDEN);
6029 emit_move_insn (target, t1);
6031 insn = get_insns ();
6034 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
6041 /* This function expands a test-and-set operation. Ideally we atomically
6042 store VAL in MEM and return the previous value in MEM. Some targets
6043 may not support this operation and only support VAL with the constant 1;
6044 in this case while the return value will be 0/1, but the exact value
6045 stored in MEM is target defined. TARGET is an option place to stick
6046 the return value. */
6049 expand_sync_lock_test_and_set (rtx mem, rtx val, rtx target)
6051 enum machine_mode mode = GET_MODE (mem);
6052 enum insn_code icode;
6055 /* If the target supports the test-and-set directly, great. */
6056 icode = sync_lock_test_and_set[mode];
6057 if (icode != CODE_FOR_nothing)
6059 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6060 target = gen_reg_rtx (mode);
6062 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6063 val = convert_modes (mode, GET_MODE (val), val, 1);
6064 if (!insn_data[icode].operand[2].predicate (val, mode))
6065 val = force_reg (mode, val);
6067 insn = GEN_FCN (icode) (target, mem, val);
6075 /* Otherwise, use a compare-and-swap loop for the exchange. */
6076 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6078 if (!target || !register_operand (target, mode))
6079 target = gen_reg_rtx (mode);
6080 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6081 val = convert_modes (mode, GET_MODE (val), val, 1);
6082 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
6089 #include "gt-optabs.h"