1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
25 #include "coretypes.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
45 #include "basic-block.h"
48 /* Each optab contains info on how this target machine
49 can perform a particular operation
50 for all sizes and kinds of operands.
52 The operation to be performed is often specified
53 by passing one of these optabs as an argument.
55 See expr.h for documentation of these optabs. */
57 optab optab_table[OTI_MAX];
59 rtx libfunc_table[LTI_MAX];
61 /* Tables of patterns for converting one mode to another. */
62 convert_optab convert_optab_table[COI_MAX];
64 /* Contains the optab used for each rtx code. */
65 optab code_to_optab[NUM_RTX_CODE + 1];
67 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
68 gives the gen_function to make a branch to test that condition. */
70 rtxfun bcc_gen_fctn[NUM_RTX_CODE];
72 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
73 gives the insn code to make a store-condition insn
74 to test that condition. */
76 enum insn_code setcc_gen_code[NUM_RTX_CODE];
78 #ifdef HAVE_conditional_move
79 /* Indexed by the machine mode, gives the insn code to make a conditional
80 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
81 setcc_gen_code to cut down on the number of named patterns. Consider a day
82 when a lot more rtx codes are conditional (eg: for the ARM). */
84 enum insn_code movcc_gen_code[NUM_MACHINE_MODES];
87 /* Indexed by the machine mode, gives the insn code for vector conditional
90 enum insn_code vcond_gen_code[NUM_MACHINE_MODES];
91 enum insn_code vcondu_gen_code[NUM_MACHINE_MODES];
93 /* The insn generating function can not take an rtx_code argument.
94 TRAP_RTX is used as an rtx argument. Its code is replaced with
95 the code to be used in the trap insn and all other fields are ignored. */
96 static GTY(()) rtx trap_rtx;
98 static int add_equal_note (rtx, rtx, enum rtx_code, rtx, rtx);
99 static rtx widen_operand (rtx, enum machine_mode, enum machine_mode, int,
101 static void prepare_cmp_insn (rtx *, rtx *, enum rtx_code *, rtx,
102 enum machine_mode *, int *,
103 enum can_compare_purpose);
104 static enum insn_code can_fix_p (enum machine_mode, enum machine_mode, int,
106 static enum insn_code can_float_p (enum machine_mode, enum machine_mode, int);
107 static optab new_optab (void);
108 static convert_optab new_convert_optab (void);
109 static inline optab init_optab (enum rtx_code);
110 static inline optab init_optabv (enum rtx_code);
111 static inline convert_optab init_convert_optab (enum rtx_code);
112 static void init_libfuncs (optab, int, int, const char *, int);
113 static void init_integral_libfuncs (optab, const char *, int);
114 static void init_floating_libfuncs (optab, const char *, int);
115 static void init_interclass_conv_libfuncs (convert_optab, const char *,
116 enum mode_class, enum mode_class);
117 static void init_intraclass_conv_libfuncs (convert_optab, const char *,
118 enum mode_class, bool);
119 static void emit_cmp_and_jump_insn_1 (rtx, rtx, enum machine_mode,
120 enum rtx_code, int, rtx);
121 static void prepare_float_lib_cmp (rtx *, rtx *, enum rtx_code *,
122 enum machine_mode *, int *);
123 static rtx widen_clz (enum machine_mode, rtx, rtx);
124 static rtx expand_parity (enum machine_mode, rtx, rtx);
125 static enum rtx_code get_rtx_code (enum tree_code, bool);
126 static rtx vector_compare_rtx (tree, bool, enum insn_code);
128 #ifndef HAVE_conditional_trap
129 #define HAVE_conditional_trap 0
130 #define gen_conditional_trap(a,b) (gcc_unreachable (), NULL_RTX)
133 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
134 the result of operation CODE applied to OP0 (and OP1 if it is a binary
137 If the last insn does not set TARGET, don't do anything, but return 1.
139 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
140 don't add the REG_EQUAL note but return 0. Our caller can then try
141 again, ensuring that TARGET is not one of the operands. */
144 add_equal_note (rtx insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
146 rtx last_insn, insn, set;
149 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
151 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
152 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
153 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
154 && GET_RTX_CLASS (code) != RTX_COMPARE
155 && GET_RTX_CLASS (code) != RTX_UNARY)
158 if (GET_CODE (target) == ZERO_EXTRACT)
161 for (last_insn = insns;
162 NEXT_INSN (last_insn) != NULL_RTX;
163 last_insn = NEXT_INSN (last_insn))
166 set = single_set (last_insn);
170 if (! rtx_equal_p (SET_DEST (set), target)
171 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
172 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
173 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
176 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
177 besides the last insn. */
178 if (reg_overlap_mentioned_p (target, op0)
179 || (op1 && reg_overlap_mentioned_p (target, op1)))
181 insn = PREV_INSN (last_insn);
182 while (insn != NULL_RTX)
184 if (reg_set_p (target, insn))
187 insn = PREV_INSN (insn);
191 if (GET_RTX_CLASS (code) == RTX_UNARY)
192 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
194 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
196 set_unique_reg_note (last_insn, REG_EQUAL, note);
201 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
202 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
203 not actually do a sign-extend or zero-extend, but can leave the
204 higher-order bits of the result rtx undefined, for example, in the case
205 of logical operations, but not right shifts. */
208 widen_operand (rtx op, enum machine_mode mode, enum machine_mode oldmode,
209 int unsignedp, int no_extend)
213 /* If we don't have to extend and this is a constant, return it. */
214 if (no_extend && GET_MODE (op) == VOIDmode)
217 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
218 extend since it will be more efficient to do so unless the signedness of
219 a promoted object differs from our extension. */
221 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
222 && SUBREG_PROMOTED_UNSIGNED_P (op) == unsignedp))
223 return convert_modes (mode, oldmode, op, unsignedp);
225 /* If MODE is no wider than a single word, we return a paradoxical
227 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
228 return gen_rtx_SUBREG (mode, force_reg (GET_MODE (op), op), 0);
230 /* Otherwise, get an object of MODE, clobber it, and set the low-order
233 result = gen_reg_rtx (mode);
234 emit_insn (gen_rtx_CLOBBER (VOIDmode, result));
235 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
239 /* Return the optab used for computing the operation given by
240 the tree code, CODE. This function is not always usable (for
241 example, it cannot give complete results for multiplication
242 or division) but probably ought to be relied on more widely
243 throughout the expander. */
245 optab_for_tree_code (enum tree_code code, tree type)
257 return one_cmpl_optab;
266 return TYPE_UNSIGNED (type) ? umod_optab : smod_optab;
274 return TYPE_UNSIGNED (type) ? udiv_optab : sdiv_optab;
280 return TYPE_UNSIGNED (type) ? lshr_optab : ashr_optab;
289 return TYPE_UNSIGNED (type) ? umax_optab : smax_optab;
292 return TYPE_UNSIGNED (type) ? umin_optab : smin_optab;
294 case REALIGN_LOAD_EXPR:
295 return vec_realign_load_optab;
298 return TYPE_UNSIGNED (type) ? reduc_umax_optab : reduc_smax_optab;
301 return TYPE_UNSIGNED (type) ? reduc_umin_optab : reduc_smin_optab;
303 case REDUC_PLUS_EXPR:
304 return TYPE_UNSIGNED (type) ? reduc_uplus_optab : reduc_splus_optab;
306 case VEC_LSHIFT_EXPR:
307 return vec_shl_optab;
309 case VEC_RSHIFT_EXPR:
310 return vec_shr_optab;
316 trapv = flag_trapv && INTEGRAL_TYPE_P (type) && !TYPE_UNSIGNED (type);
320 return trapv ? addv_optab : add_optab;
323 return trapv ? subv_optab : sub_optab;
326 return trapv ? smulv_optab : smul_optab;
329 return trapv ? negv_optab : neg_optab;
332 return trapv ? absv_optab : abs_optab;
340 /* Generate code to perform an operation specified by TERNARY_OPTAB
341 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
343 UNSIGNEDP is for the case where we have to widen the operands
344 to perform the operation. It says to use zero-extension.
346 If TARGET is nonzero, the value
347 is generated there, if it is convenient to do so.
348 In all cases an rtx is returned for the locus of the value;
349 this may or may not be TARGET. */
352 expand_ternary_op (enum machine_mode mode, optab ternary_optab, rtx op0,
353 rtx op1, rtx op2, rtx target, int unsignedp)
355 int icode = (int) ternary_optab->handlers[(int) mode].insn_code;
356 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
357 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
358 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
361 rtx xop0 = op0, xop1 = op1, xop2 = op2;
363 gcc_assert (ternary_optab->handlers[(int) mode].insn_code
364 != CODE_FOR_nothing);
366 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
367 temp = gen_reg_rtx (mode);
371 /* In case the insn wants input operands in modes different from
372 those of the actual operands, convert the operands. It would
373 seem that we don't need to convert CONST_INTs, but we do, so
374 that they're properly zero-extended, sign-extended or truncated
377 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
378 xop0 = convert_modes (mode0,
379 GET_MODE (op0) != VOIDmode
384 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
385 xop1 = convert_modes (mode1,
386 GET_MODE (op1) != VOIDmode
391 if (GET_MODE (op2) != mode2 && mode2 != VOIDmode)
392 xop2 = convert_modes (mode2,
393 GET_MODE (op2) != VOIDmode
398 /* Now, if insn's predicates don't allow our operands, put them into
401 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
402 && mode0 != VOIDmode)
403 xop0 = copy_to_mode_reg (mode0, xop0);
405 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
406 && mode1 != VOIDmode)
407 xop1 = copy_to_mode_reg (mode1, xop1);
409 if (!insn_data[icode].operand[3].predicate (xop2, mode2)
410 && mode2 != VOIDmode)
411 xop2 = copy_to_mode_reg (mode2, xop2);
413 pat = GEN_FCN (icode) (temp, xop0, xop1, xop2);
420 /* Like expand_binop, but return a constant rtx if the result can be
421 calculated at compile time. The arguments and return value are
422 otherwise the same as for expand_binop. */
425 simplify_expand_binop (enum machine_mode mode, optab binoptab,
426 rtx op0, rtx op1, rtx target, int unsignedp,
427 enum optab_methods methods)
429 if (CONSTANT_P (op0) && CONSTANT_P (op1))
430 return simplify_gen_binary (binoptab->code, mode, op0, op1);
432 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
435 /* Like simplify_expand_binop, but always put the result in TARGET.
436 Return true if the expansion succeeded. */
439 force_expand_binop (enum machine_mode mode, optab binoptab,
440 rtx op0, rtx op1, rtx target, int unsignedp,
441 enum optab_methods methods)
443 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
444 target, unsignedp, methods);
448 emit_move_insn (target, x);
452 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
455 expand_vec_shift_expr (tree vec_shift_expr, rtx target)
457 enum insn_code icode;
458 rtx rtx_op1, rtx_op2;
459 enum machine_mode mode1;
460 enum machine_mode mode2;
461 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_shift_expr));
462 tree vec_oprnd = TREE_OPERAND (vec_shift_expr, 0);
463 tree shift_oprnd = TREE_OPERAND (vec_shift_expr, 1);
467 switch (TREE_CODE (vec_shift_expr))
469 case VEC_RSHIFT_EXPR:
470 shift_optab = vec_shr_optab;
472 case VEC_LSHIFT_EXPR:
473 shift_optab = vec_shl_optab;
479 icode = (int) shift_optab->handlers[(int) mode].insn_code;
480 gcc_assert (icode != CODE_FOR_nothing);
482 mode1 = insn_data[icode].operand[1].mode;
483 mode2 = insn_data[icode].operand[2].mode;
485 rtx_op1 = expand_expr (vec_oprnd, NULL_RTX, VOIDmode, EXPAND_NORMAL);
486 if (!(*insn_data[icode].operand[1].predicate) (rtx_op1, mode1)
487 && mode1 != VOIDmode)
488 rtx_op1 = force_reg (mode1, rtx_op1);
490 rtx_op2 = expand_expr (shift_oprnd, NULL_RTX, VOIDmode, EXPAND_NORMAL);
491 if (!(*insn_data[icode].operand[2].predicate) (rtx_op2, mode2)
492 && mode2 != VOIDmode)
493 rtx_op2 = force_reg (mode2, rtx_op2);
496 || ! (*insn_data[icode].operand[0].predicate) (target, mode))
497 target = gen_reg_rtx (mode);
499 /* Emit instruction */
500 pat = GEN_FCN (icode) (target, rtx_op1, rtx_op2);
507 /* This subroutine of expand_doubleword_shift handles the cases in which
508 the effective shift value is >= BITS_PER_WORD. The arguments and return
509 value are the same as for the parent routine, except that SUPERWORD_OP1
510 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
511 INTO_TARGET may be null if the caller has decided to calculate it. */
514 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
515 rtx outof_target, rtx into_target,
516 int unsignedp, enum optab_methods methods)
518 if (into_target != 0)
519 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
520 into_target, unsignedp, methods))
523 if (outof_target != 0)
525 /* For a signed right shift, we must fill OUTOF_TARGET with copies
526 of the sign bit, otherwise we must fill it with zeros. */
527 if (binoptab != ashr_optab)
528 emit_move_insn (outof_target, CONST0_RTX (word_mode));
530 if (!force_expand_binop (word_mode, binoptab,
531 outof_input, GEN_INT (BITS_PER_WORD - 1),
532 outof_target, unsignedp, methods))
538 /* This subroutine of expand_doubleword_shift handles the cases in which
539 the effective shift value is < BITS_PER_WORD. The arguments and return
540 value are the same as for the parent routine. */
543 expand_subword_shift (enum machine_mode op1_mode, optab binoptab,
544 rtx outof_input, rtx into_input, rtx op1,
545 rtx outof_target, rtx into_target,
546 int unsignedp, enum optab_methods methods,
547 unsigned HOST_WIDE_INT shift_mask)
549 optab reverse_unsigned_shift, unsigned_shift;
552 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
553 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
555 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
556 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
557 the opposite direction to BINOPTAB. */
558 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
560 carries = outof_input;
561 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
562 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
567 /* We must avoid shifting by BITS_PER_WORD bits since that is either
568 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
569 has unknown behavior. Do a single shift first, then shift by the
570 remainder. It's OK to use ~OP1 as the remainder if shift counts
571 are truncated to the mode size. */
572 carries = expand_binop (word_mode, reverse_unsigned_shift,
573 outof_input, const1_rtx, 0, unsignedp, methods);
574 if (shift_mask == BITS_PER_WORD - 1)
576 tmp = immed_double_const (-1, -1, op1_mode);
577 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
582 tmp = immed_double_const (BITS_PER_WORD - 1, 0, op1_mode);
583 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
587 if (tmp == 0 || carries == 0)
589 carries = expand_binop (word_mode, reverse_unsigned_shift,
590 carries, tmp, 0, unsignedp, methods);
594 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
595 so the result can go directly into INTO_TARGET if convenient. */
596 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
597 into_target, unsignedp, methods);
601 /* Now OR in the bits carried over from OUTOF_INPUT. */
602 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
603 into_target, unsignedp, methods))
606 /* Use a standard word_mode shift for the out-of half. */
607 if (outof_target != 0)
608 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
609 outof_target, unsignedp, methods))
616 #ifdef HAVE_conditional_move
617 /* Try implementing expand_doubleword_shift using conditional moves.
618 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
619 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
620 are the shift counts to use in the former and latter case. All other
621 arguments are the same as the parent routine. */
624 expand_doubleword_shift_condmove (enum machine_mode op1_mode, optab binoptab,
625 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
626 rtx outof_input, rtx into_input,
627 rtx subword_op1, rtx superword_op1,
628 rtx outof_target, rtx into_target,
629 int unsignedp, enum optab_methods methods,
630 unsigned HOST_WIDE_INT shift_mask)
632 rtx outof_superword, into_superword;
634 /* Put the superword version of the output into OUTOF_SUPERWORD and
636 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
637 if (outof_target != 0 && subword_op1 == superword_op1)
639 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
640 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
641 into_superword = outof_target;
642 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
643 outof_superword, 0, unsignedp, methods))
648 into_superword = gen_reg_rtx (word_mode);
649 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
650 outof_superword, into_superword,
655 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
656 if (!expand_subword_shift (op1_mode, binoptab,
657 outof_input, into_input, subword_op1,
658 outof_target, into_target,
659 unsignedp, methods, shift_mask))
662 /* Select between them. Do the INTO half first because INTO_SUPERWORD
663 might be the current value of OUTOF_TARGET. */
664 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
665 into_target, into_superword, word_mode, false))
668 if (outof_target != 0)
669 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
670 outof_target, outof_superword,
678 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
679 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
680 input operand; the shift moves bits in the direction OUTOF_INPUT->
681 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
682 of the target. OP1 is the shift count and OP1_MODE is its mode.
683 If OP1 is constant, it will have been truncated as appropriate
684 and is known to be nonzero.
686 If SHIFT_MASK is zero, the result of word shifts is undefined when the
687 shift count is outside the range [0, BITS_PER_WORD). This routine must
688 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
690 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
691 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
692 fill with zeros or sign bits as appropriate.
694 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
695 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
696 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
697 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
700 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
701 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
702 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
703 function wants to calculate it itself.
705 Return true if the shift could be successfully synthesized. */
708 expand_doubleword_shift (enum machine_mode op1_mode, optab binoptab,
709 rtx outof_input, rtx into_input, rtx op1,
710 rtx outof_target, rtx into_target,
711 int unsignedp, enum optab_methods methods,
712 unsigned HOST_WIDE_INT shift_mask)
714 rtx superword_op1, tmp, cmp1, cmp2;
715 rtx subword_label, done_label;
716 enum rtx_code cmp_code;
718 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
719 fill the result with sign or zero bits as appropriate. If so, the value
720 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
721 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
722 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
724 This isn't worthwhile for constant shifts since the optimizers will
725 cope better with in-range shift counts. */
726 if (shift_mask >= BITS_PER_WORD
728 && !CONSTANT_P (op1))
730 if (!expand_doubleword_shift (op1_mode, binoptab,
731 outof_input, into_input, op1,
733 unsignedp, methods, shift_mask))
735 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
736 outof_target, unsignedp, methods))
741 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
742 is true when the effective shift value is less than BITS_PER_WORD.
743 Set SUPERWORD_OP1 to the shift count that should be used to shift
744 OUTOF_INPUT into INTO_TARGET when the condition is false. */
745 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
746 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
748 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
749 is a subword shift count. */
750 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
752 cmp2 = CONST0_RTX (op1_mode);
758 /* Set CMP1 to OP1 - BITS_PER_WORD. */
759 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
761 cmp2 = CONST0_RTX (op1_mode);
763 superword_op1 = cmp1;
768 /* If we can compute the condition at compile time, pick the
769 appropriate subroutine. */
770 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
771 if (tmp != 0 && GET_CODE (tmp) == CONST_INT)
773 if (tmp == const0_rtx)
774 return expand_superword_shift (binoptab, outof_input, superword_op1,
775 outof_target, into_target,
778 return expand_subword_shift (op1_mode, binoptab,
779 outof_input, into_input, op1,
780 outof_target, into_target,
781 unsignedp, methods, shift_mask);
784 #ifdef HAVE_conditional_move
785 /* Try using conditional moves to generate straight-line code. */
787 rtx start = get_last_insn ();
788 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
789 cmp_code, cmp1, cmp2,
790 outof_input, into_input,
792 outof_target, into_target,
793 unsignedp, methods, shift_mask))
795 delete_insns_since (start);
799 /* As a last resort, use branches to select the correct alternative. */
800 subword_label = gen_label_rtx ();
801 done_label = gen_label_rtx ();
803 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
804 0, 0, subword_label);
806 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
807 outof_target, into_target,
811 emit_jump_insn (gen_jump (done_label));
813 emit_label (subword_label);
815 if (!expand_subword_shift (op1_mode, binoptab,
816 outof_input, into_input, op1,
817 outof_target, into_target,
818 unsignedp, methods, shift_mask))
821 emit_label (done_label);
825 /* Subroutine of expand_binop. Perform a double word multiplication of
826 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
827 as the target's word_mode. This function return NULL_RTX if anything
828 goes wrong, in which case it may have already emitted instructions
829 which need to be deleted.
831 If we want to multiply two two-word values and have normal and widening
832 multiplies of single-word values, we can do this with three smaller
833 multiplications. Note that we do not make a REG_NO_CONFLICT block here
834 because we are not operating on one word at a time.
836 The multiplication proceeds as follows:
837 _______________________
838 [__op0_high_|__op0_low__]
839 _______________________
840 * [__op1_high_|__op1_low__]
841 _______________________________________________
842 _______________________
843 (1) [__op0_low__*__op1_low__]
844 _______________________
845 (2a) [__op0_low__*__op1_high_]
846 _______________________
847 (2b) [__op0_high_*__op1_low__]
848 _______________________
849 (3) [__op0_high_*__op1_high_]
852 This gives a 4-word result. Since we are only interested in the
853 lower 2 words, partial result (3) and the upper words of (2a) and
854 (2b) don't need to be calculated. Hence (2a) and (2b) can be
855 calculated using non-widening multiplication.
857 (1), however, needs to be calculated with an unsigned widening
858 multiplication. If this operation is not directly supported we
859 try using a signed widening multiplication and adjust the result.
860 This adjustment works as follows:
862 If both operands are positive then no adjustment is needed.
864 If the operands have different signs, for example op0_low < 0 and
865 op1_low >= 0, the instruction treats the most significant bit of
866 op0_low as a sign bit instead of a bit with significance
867 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
868 with 2**BITS_PER_WORD - op0_low, and two's complements the
869 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
872 Similarly, if both operands are negative, we need to add
873 (op0_low + op1_low) * 2**BITS_PER_WORD.
875 We use a trick to adjust quickly. We logically shift op0_low right
876 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
877 op0_high (op1_high) before it is used to calculate 2b (2a). If no
878 logical shift exists, we do an arithmetic right shift and subtract
882 expand_doubleword_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
883 bool umulp, enum optab_methods methods)
885 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
886 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
887 rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
888 rtx product, adjust, product_high, temp;
890 rtx op0_high = operand_subword_force (op0, high, mode);
891 rtx op0_low = operand_subword_force (op0, low, mode);
892 rtx op1_high = operand_subword_force (op1, high, mode);
893 rtx op1_low = operand_subword_force (op1, low, mode);
895 /* If we're using an unsigned multiply to directly compute the product
896 of the low-order words of the operands and perform any required
897 adjustments of the operands, we begin by trying two more multiplications
898 and then computing the appropriate sum.
900 We have checked above that the required addition is provided.
901 Full-word addition will normally always succeed, especially if
902 it is provided at all, so we don't worry about its failure. The
903 multiplication may well fail, however, so we do handle that. */
907 /* ??? This could be done with emit_store_flag where available. */
908 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
909 NULL_RTX, 1, methods);
911 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
912 NULL_RTX, 0, OPTAB_DIRECT);
915 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
916 NULL_RTX, 0, methods);
919 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
920 NULL_RTX, 0, OPTAB_DIRECT);
927 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
928 NULL_RTX, 0, OPTAB_DIRECT);
932 /* OP0_HIGH should now be dead. */
936 /* ??? This could be done with emit_store_flag where available. */
937 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
938 NULL_RTX, 1, methods);
940 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
941 NULL_RTX, 0, OPTAB_DIRECT);
944 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
945 NULL_RTX, 0, methods);
948 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
949 NULL_RTX, 0, OPTAB_DIRECT);
956 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
957 NULL_RTX, 0, OPTAB_DIRECT);
961 /* OP1_HIGH should now be dead. */
963 adjust = expand_binop (word_mode, add_optab, adjust, temp,
964 adjust, 0, OPTAB_DIRECT);
966 if (target && !REG_P (target))
970 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
971 target, 1, OPTAB_DIRECT);
973 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
974 target, 1, OPTAB_DIRECT);
979 product_high = operand_subword (product, high, 1, mode);
980 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
981 REG_P (product_high) ? product_high : adjust,
983 emit_move_insn (product_high, adjust);
987 /* Wrapper around expand_binop which takes an rtx code to specify
988 the operation to perform, not an optab pointer. All other
989 arguments are the same. */
991 expand_simple_binop (enum machine_mode mode, enum rtx_code code, rtx op0,
992 rtx op1, rtx target, int unsignedp,
993 enum optab_methods methods)
995 optab binop = code_to_optab[(int) code];
998 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
1001 /* Generate code to perform an operation specified by BINOPTAB
1002 on operands OP0 and OP1, with result having machine-mode MODE.
1004 UNSIGNEDP is for the case where we have to widen the operands
1005 to perform the operation. It says to use zero-extension.
1007 If TARGET is nonzero, the value
1008 is generated there, if it is convenient to do so.
1009 In all cases an rtx is returned for the locus of the value;
1010 this may or may not be TARGET. */
1013 expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1,
1014 rtx target, int unsignedp, enum optab_methods methods)
1016 enum optab_methods next_methods
1017 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1018 ? OPTAB_WIDEN : methods);
1019 enum mode_class class;
1020 enum machine_mode wider_mode;
1022 int commutative_op = 0;
1023 int shift_op = (binoptab->code == ASHIFT
1024 || binoptab->code == ASHIFTRT
1025 || binoptab->code == LSHIFTRT
1026 || binoptab->code == ROTATE
1027 || binoptab->code == ROTATERT);
1028 rtx entry_last = get_last_insn ();
1030 bool first_pass_p = true;
1032 class = GET_MODE_CLASS (mode);
1034 /* If subtracting an integer constant, convert this into an addition of
1035 the negated constant. */
1037 if (binoptab == sub_optab && GET_CODE (op1) == CONST_INT)
1039 op1 = negate_rtx (mode, op1);
1040 binoptab = add_optab;
1043 /* If we are inside an appropriately-short loop and we are optimizing,
1044 force expensive constants into a register. */
1045 if (CONSTANT_P (op0) && optimize
1046 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
1048 if (GET_MODE (op0) != VOIDmode)
1049 op0 = convert_modes (mode, VOIDmode, op0, unsignedp);
1050 op0 = force_reg (mode, op0);
1053 if (CONSTANT_P (op1) && optimize
1054 && ! shift_op && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
1056 if (GET_MODE (op1) != VOIDmode)
1057 op1 = convert_modes (mode, VOIDmode, op1, unsignedp);
1058 op1 = force_reg (mode, op1);
1061 /* Record where to delete back to if we backtrack. */
1062 last = get_last_insn ();
1064 /* If operation is commutative,
1065 try to make the first operand a register.
1066 Even better, try to make it the same as the target.
1067 Also try to make the last operand a constant. */
1068 if (GET_RTX_CLASS (binoptab->code) == RTX_COMM_ARITH
1069 || binoptab == smul_widen_optab
1070 || binoptab == umul_widen_optab
1071 || binoptab == smul_highpart_optab
1072 || binoptab == umul_highpart_optab)
1076 if (((target == 0 || REG_P (target))
1080 : rtx_equal_p (op1, target))
1081 || GET_CODE (op0) == CONST_INT)
1091 /* If we can do it with a three-operand insn, do so. */
1093 if (methods != OPTAB_MUST_WIDEN
1094 && binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1096 int icode = (int) binoptab->handlers[(int) mode].insn_code;
1097 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
1098 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
1100 rtx xop0 = op0, xop1 = op1;
1105 temp = gen_reg_rtx (mode);
1107 /* If it is a commutative operator and the modes would match
1108 if we would swap the operands, we can save the conversions. */
1111 if (GET_MODE (op0) != mode0 && GET_MODE (op1) != mode1
1112 && GET_MODE (op0) == mode1 && GET_MODE (op1) == mode0)
1116 tmp = op0; op0 = op1; op1 = tmp;
1117 tmp = xop0; xop0 = xop1; xop1 = tmp;
1121 /* In case the insn wants input operands in modes different from
1122 those of the actual operands, convert the operands. It would
1123 seem that we don't need to convert CONST_INTs, but we do, so
1124 that they're properly zero-extended, sign-extended or truncated
1127 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
1128 xop0 = convert_modes (mode0,
1129 GET_MODE (op0) != VOIDmode
1134 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
1135 xop1 = convert_modes (mode1,
1136 GET_MODE (op1) != VOIDmode
1141 /* Now, if insn's predicates don't allow our operands, put them into
1144 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
1145 && mode0 != VOIDmode)
1146 xop0 = copy_to_mode_reg (mode0, xop0);
1148 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
1149 && mode1 != VOIDmode)
1150 xop1 = copy_to_mode_reg (mode1, xop1);
1152 if (!insn_data[icode].operand[0].predicate (temp, mode))
1153 temp = gen_reg_rtx (mode);
1155 pat = GEN_FCN (icode) (temp, xop0, xop1);
1158 /* If PAT is composed of more than one insn, try to add an appropriate
1159 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1160 operand, call ourselves again, this time without a target. */
1161 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1162 && ! add_equal_note (pat, temp, binoptab->code, xop0, xop1))
1164 delete_insns_since (last);
1165 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1166 unsignedp, methods);
1173 delete_insns_since (last);
1176 /* If we were trying to rotate by a constant value, and that didn't
1177 work, try rotating the other direction before falling back to
1178 shifts and bitwise-or. */
1180 && (binoptab == rotl_optab || binoptab == rotr_optab)
1181 && class == MODE_INT
1182 && GET_CODE (op1) == CONST_INT
1184 && (unsigned int) INTVAL (op1) < GET_MODE_BITSIZE (mode))
1186 first_pass_p = false;
1187 op1 = GEN_INT (GET_MODE_BITSIZE (mode) - INTVAL (op1));
1188 binoptab = binoptab == rotl_optab ? rotr_optab : rotl_optab;
1192 /* If this is a multiply, see if we can do a widening operation that
1193 takes operands of this mode and makes a wider mode. */
1195 if (binoptab == smul_optab
1196 && GET_MODE_WIDER_MODE (mode) != VOIDmode
1197 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1198 ->handlers[(int) GET_MODE_WIDER_MODE (mode)].insn_code)
1199 != CODE_FOR_nothing))
1201 temp = expand_binop (GET_MODE_WIDER_MODE (mode),
1202 unsignedp ? umul_widen_optab : smul_widen_optab,
1203 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1207 if (GET_MODE_CLASS (mode) == MODE_INT
1208 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1209 GET_MODE_BITSIZE (GET_MODE (temp))))
1210 return gen_lowpart (mode, temp);
1212 return convert_to_mode (mode, temp, unsignedp);
1216 /* Look for a wider mode of the same class for which we think we
1217 can open-code the operation. Check for a widening multiply at the
1218 wider mode as well. */
1220 if (CLASS_HAS_WIDER_MODES_P (class)
1221 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1222 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1223 wider_mode != VOIDmode;
1224 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1226 if (binoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
1227 || (binoptab == smul_optab
1228 && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
1229 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1230 ->handlers[(int) GET_MODE_WIDER_MODE (wider_mode)].insn_code)
1231 != CODE_FOR_nothing)))
1233 rtx xop0 = op0, xop1 = op1;
1236 /* For certain integer operations, we need not actually extend
1237 the narrow operands, as long as we will truncate
1238 the results to the same narrowness. */
1240 if ((binoptab == ior_optab || binoptab == and_optab
1241 || binoptab == xor_optab
1242 || binoptab == add_optab || binoptab == sub_optab
1243 || binoptab == smul_optab || binoptab == ashl_optab)
1244 && class == MODE_INT)
1247 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1249 /* The second operand of a shift must always be extended. */
1250 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1251 no_extend && binoptab != ashl_optab);
1253 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1254 unsignedp, OPTAB_DIRECT);
1257 if (class != MODE_INT
1258 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1259 GET_MODE_BITSIZE (wider_mode)))
1262 target = gen_reg_rtx (mode);
1263 convert_move (target, temp, 0);
1267 return gen_lowpart (mode, temp);
1270 delete_insns_since (last);
1274 /* These can be done a word at a time. */
1275 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1276 && class == MODE_INT
1277 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
1278 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1284 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1285 won't be accurate, so use a new target. */
1286 if (target == 0 || target == op0 || target == op1)
1287 target = gen_reg_rtx (mode);
1291 /* Do the actual arithmetic. */
1292 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
1294 rtx target_piece = operand_subword (target, i, 1, mode);
1295 rtx x = expand_binop (word_mode, binoptab,
1296 operand_subword_force (op0, i, mode),
1297 operand_subword_force (op1, i, mode),
1298 target_piece, unsignedp, next_methods);
1303 if (target_piece != x)
1304 emit_move_insn (target_piece, x);
1307 insns = get_insns ();
1310 if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
1312 if (binoptab->code != UNKNOWN)
1314 = gen_rtx_fmt_ee (binoptab->code, mode,
1315 copy_rtx (op0), copy_rtx (op1));
1319 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1324 /* Synthesize double word shifts from single word shifts. */
1325 if ((binoptab == lshr_optab || binoptab == ashl_optab
1326 || binoptab == ashr_optab)
1327 && class == MODE_INT
1328 && (GET_CODE (op1) == CONST_INT || !optimize_size)
1329 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1330 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1331 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1332 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1334 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1335 enum machine_mode op1_mode;
1337 double_shift_mask = targetm.shift_truncation_mask (mode);
1338 shift_mask = targetm.shift_truncation_mask (word_mode);
1339 op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode;
1341 /* Apply the truncation to constant shifts. */
1342 if (double_shift_mask > 0 && GET_CODE (op1) == CONST_INT)
1343 op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
1345 if (op1 == CONST0_RTX (op1_mode))
1348 /* Make sure that this is a combination that expand_doubleword_shift
1349 can handle. See the comments there for details. */
1350 if (double_shift_mask == 0
1351 || (shift_mask == BITS_PER_WORD - 1
1352 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1354 rtx insns, equiv_value;
1355 rtx into_target, outof_target;
1356 rtx into_input, outof_input;
1357 int left_shift, outof_word;
1359 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1360 won't be accurate, so use a new target. */
1361 if (target == 0 || target == op0 || target == op1)
1362 target = gen_reg_rtx (mode);
1366 /* OUTOF_* is the word we are shifting bits away from, and
1367 INTO_* is the word that we are shifting bits towards, thus
1368 they differ depending on the direction of the shift and
1369 WORDS_BIG_ENDIAN. */
1371 left_shift = binoptab == ashl_optab;
1372 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1374 outof_target = operand_subword (target, outof_word, 1, mode);
1375 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1377 outof_input = operand_subword_force (op0, outof_word, mode);
1378 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1380 if (expand_doubleword_shift (op1_mode, binoptab,
1381 outof_input, into_input, op1,
1382 outof_target, into_target,
1383 unsignedp, methods, shift_mask))
1385 insns = get_insns ();
1388 equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, op0, op1);
1389 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1396 /* Synthesize double word rotates from single word shifts. */
1397 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1398 && class == MODE_INT
1399 && GET_CODE (op1) == CONST_INT
1400 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1401 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1402 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1404 rtx insns, equiv_value;
1405 rtx into_target, outof_target;
1406 rtx into_input, outof_input;
1408 int shift_count, left_shift, outof_word;
1410 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1411 won't be accurate, so use a new target. Do this also if target is not
1412 a REG, first because having a register instead may open optimization
1413 opportunities, and second because if target and op0 happen to be MEMs
1414 designating the same location, we would risk clobbering it too early
1415 in the code sequence we generate below. */
1416 if (target == 0 || target == op0 || target == op1 || ! REG_P (target))
1417 target = gen_reg_rtx (mode);
1421 shift_count = INTVAL (op1);
1423 /* OUTOF_* is the word we are shifting bits away from, and
1424 INTO_* is the word that we are shifting bits towards, thus
1425 they differ depending on the direction of the shift and
1426 WORDS_BIG_ENDIAN. */
1428 left_shift = (binoptab == rotl_optab);
1429 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1431 outof_target = operand_subword (target, outof_word, 1, mode);
1432 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1434 outof_input = operand_subword_force (op0, outof_word, mode);
1435 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1437 if (shift_count == BITS_PER_WORD)
1439 /* This is just a word swap. */
1440 emit_move_insn (outof_target, into_input);
1441 emit_move_insn (into_target, outof_input);
1446 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1447 rtx first_shift_count, second_shift_count;
1448 optab reverse_unsigned_shift, unsigned_shift;
1450 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1451 ? lshr_optab : ashl_optab);
1453 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1454 ? ashl_optab : lshr_optab);
1456 if (shift_count > BITS_PER_WORD)
1458 first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1459 second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1463 first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1464 second_shift_count = GEN_INT (shift_count);
1467 into_temp1 = expand_binop (word_mode, unsigned_shift,
1468 outof_input, first_shift_count,
1469 NULL_RTX, unsignedp, next_methods);
1470 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1471 into_input, second_shift_count,
1472 NULL_RTX, unsignedp, next_methods);
1474 if (into_temp1 != 0 && into_temp2 != 0)
1475 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1476 into_target, unsignedp, next_methods);
1480 if (inter != 0 && inter != into_target)
1481 emit_move_insn (into_target, inter);
1483 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1484 into_input, first_shift_count,
1485 NULL_RTX, unsignedp, next_methods);
1486 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1487 outof_input, second_shift_count,
1488 NULL_RTX, unsignedp, next_methods);
1490 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1491 inter = expand_binop (word_mode, ior_optab,
1492 outof_temp1, outof_temp2,
1493 outof_target, unsignedp, next_methods);
1495 if (inter != 0 && inter != outof_target)
1496 emit_move_insn (outof_target, inter);
1499 insns = get_insns ();
1504 if (binoptab->code != UNKNOWN)
1505 equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, op0, op1);
1509 /* We can't make this a no conflict block if this is a word swap,
1510 because the word swap case fails if the input and output values
1511 are in the same register. */
1512 if (shift_count != BITS_PER_WORD)
1513 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1522 /* These can be done a word at a time by propagating carries. */
1523 if ((binoptab == add_optab || binoptab == sub_optab)
1524 && class == MODE_INT
1525 && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
1526 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1529 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1530 const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
1531 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1532 rtx xop0, xop1, xtarget;
1534 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1535 value is one of those, use it. Otherwise, use 1 since it is the
1536 one easiest to get. */
1537 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1538 int normalizep = STORE_FLAG_VALUE;
1543 /* Prepare the operands. */
1544 xop0 = force_reg (mode, op0);
1545 xop1 = force_reg (mode, op1);
1547 xtarget = gen_reg_rtx (mode);
1549 if (target == 0 || !REG_P (target))
1552 /* Indicate for flow that the entire target reg is being set. */
1554 emit_insn (gen_rtx_CLOBBER (VOIDmode, xtarget));
1556 /* Do the actual arithmetic. */
1557 for (i = 0; i < nwords; i++)
1559 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1560 rtx target_piece = operand_subword (xtarget, index, 1, mode);
1561 rtx op0_piece = operand_subword_force (xop0, index, mode);
1562 rtx op1_piece = operand_subword_force (xop1, index, mode);
1565 /* Main add/subtract of the input operands. */
1566 x = expand_binop (word_mode, binoptab,
1567 op0_piece, op1_piece,
1568 target_piece, unsignedp, next_methods);
1574 /* Store carry from main add/subtract. */
1575 carry_out = gen_reg_rtx (word_mode);
1576 carry_out = emit_store_flag_force (carry_out,
1577 (binoptab == add_optab
1580 word_mode, 1, normalizep);
1587 /* Add/subtract previous carry to main result. */
1588 newx = expand_binop (word_mode,
1589 normalizep == 1 ? binoptab : otheroptab,
1591 NULL_RTX, 1, next_methods);
1595 /* Get out carry from adding/subtracting carry in. */
1596 rtx carry_tmp = gen_reg_rtx (word_mode);
1597 carry_tmp = emit_store_flag_force (carry_tmp,
1598 (binoptab == add_optab
1601 word_mode, 1, normalizep);
1603 /* Logical-ior the two poss. carry together. */
1604 carry_out = expand_binop (word_mode, ior_optab,
1605 carry_out, carry_tmp,
1606 carry_out, 0, next_methods);
1610 emit_move_insn (target_piece, newx);
1614 if (x != target_piece)
1615 emit_move_insn (target_piece, x);
1618 carry_in = carry_out;
1621 if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
1623 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
1624 || ! rtx_equal_p (target, xtarget))
1626 rtx temp = emit_move_insn (target, xtarget);
1628 set_unique_reg_note (temp,
1630 gen_rtx_fmt_ee (binoptab->code, mode,
1641 delete_insns_since (last);
1644 /* Attempt to synthesize double word multiplies using a sequence of word
1645 mode multiplications. We first attempt to generate a sequence using a
1646 more efficient unsigned widening multiply, and if that fails we then
1647 try using a signed widening multiply. */
1649 if (binoptab == smul_optab
1650 && class == MODE_INT
1651 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1652 && smul_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1653 && add_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1655 rtx product = NULL_RTX;
1657 if (umul_widen_optab->handlers[(int) mode].insn_code
1658 != CODE_FOR_nothing)
1660 product = expand_doubleword_mult (mode, op0, op1, target,
1663 delete_insns_since (last);
1666 if (product == NULL_RTX
1667 && smul_widen_optab->handlers[(int) mode].insn_code
1668 != CODE_FOR_nothing)
1670 product = expand_doubleword_mult (mode, op0, op1, target,
1673 delete_insns_since (last);
1676 if (product != NULL_RTX)
1678 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1680 temp = emit_move_insn (target ? target : product, product);
1681 set_unique_reg_note (temp,
1683 gen_rtx_fmt_ee (MULT, mode,
1691 /* It can't be open-coded in this mode.
1692 Use a library call if one is available and caller says that's ok. */
1694 if (binoptab->handlers[(int) mode].libfunc
1695 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
1699 enum machine_mode op1_mode = mode;
1706 op1_mode = word_mode;
1707 /* Specify unsigned here,
1708 since negative shift counts are meaningless. */
1709 op1x = convert_to_mode (word_mode, op1, 1);
1712 if (GET_MODE (op0) != VOIDmode
1713 && GET_MODE (op0) != mode)
1714 op0 = convert_to_mode (mode, op0, unsignedp);
1716 /* Pass 1 for NO_QUEUE so we don't lose any increments
1717 if the libcall is cse'd or moved. */
1718 value = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
1719 NULL_RTX, LCT_CONST, mode, 2,
1720 op0, mode, op1x, op1_mode);
1722 insns = get_insns ();
1725 target = gen_reg_rtx (mode);
1726 emit_libcall_block (insns, target, value,
1727 gen_rtx_fmt_ee (binoptab->code, mode, op0, op1));
1732 delete_insns_since (last);
1734 /* It can't be done in this mode. Can we do it in a wider mode? */
1736 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
1737 || methods == OPTAB_MUST_WIDEN))
1739 /* Caller says, don't even try. */
1740 delete_insns_since (entry_last);
1744 /* Compute the value of METHODS to pass to recursive calls.
1745 Don't allow widening to be tried recursively. */
1747 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
1749 /* Look for a wider mode of the same class for which it appears we can do
1752 if (CLASS_HAS_WIDER_MODES_P (class))
1754 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1755 wider_mode != VOIDmode;
1756 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1758 if ((binoptab->handlers[(int) wider_mode].insn_code
1759 != CODE_FOR_nothing)
1760 || (methods == OPTAB_LIB
1761 && binoptab->handlers[(int) wider_mode].libfunc))
1763 rtx xop0 = op0, xop1 = op1;
1766 /* For certain integer operations, we need not actually extend
1767 the narrow operands, as long as we will truncate
1768 the results to the same narrowness. */
1770 if ((binoptab == ior_optab || binoptab == and_optab
1771 || binoptab == xor_optab
1772 || binoptab == add_optab || binoptab == sub_optab
1773 || binoptab == smul_optab || binoptab == ashl_optab)
1774 && class == MODE_INT)
1777 xop0 = widen_operand (xop0, wider_mode, mode,
1778 unsignedp, no_extend);
1780 /* The second operand of a shift must always be extended. */
1781 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1782 no_extend && binoptab != ashl_optab);
1784 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1785 unsignedp, methods);
1788 if (class != MODE_INT
1789 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1790 GET_MODE_BITSIZE (wider_mode)))
1793 target = gen_reg_rtx (mode);
1794 convert_move (target, temp, 0);
1798 return gen_lowpart (mode, temp);
1801 delete_insns_since (last);
1806 delete_insns_since (entry_last);
1810 /* Expand a binary operator which has both signed and unsigned forms.
1811 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1814 If we widen unsigned operands, we may use a signed wider operation instead
1815 of an unsigned wider operation, since the result would be the same. */
1818 sign_expand_binop (enum machine_mode mode, optab uoptab, optab soptab,
1819 rtx op0, rtx op1, rtx target, int unsignedp,
1820 enum optab_methods methods)
1823 optab direct_optab = unsignedp ? uoptab : soptab;
1824 struct optab wide_soptab;
1826 /* Do it without widening, if possible. */
1827 temp = expand_binop (mode, direct_optab, op0, op1, target,
1828 unsignedp, OPTAB_DIRECT);
1829 if (temp || methods == OPTAB_DIRECT)
1832 /* Try widening to a signed int. Make a fake signed optab that
1833 hides any signed insn for direct use. */
1834 wide_soptab = *soptab;
1835 wide_soptab.handlers[(int) mode].insn_code = CODE_FOR_nothing;
1836 wide_soptab.handlers[(int) mode].libfunc = 0;
1838 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
1839 unsignedp, OPTAB_WIDEN);
1841 /* For unsigned operands, try widening to an unsigned int. */
1842 if (temp == 0 && unsignedp)
1843 temp = expand_binop (mode, uoptab, op0, op1, target,
1844 unsignedp, OPTAB_WIDEN);
1845 if (temp || methods == OPTAB_WIDEN)
1848 /* Use the right width lib call if that exists. */
1849 temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_LIB);
1850 if (temp || methods == OPTAB_LIB)
1853 /* Must widen and use a lib call, use either signed or unsigned. */
1854 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
1855 unsignedp, methods);
1859 return expand_binop (mode, uoptab, op0, op1, target,
1860 unsignedp, methods);
1864 /* Generate code to perform an operation specified by UNOPPTAB
1865 on operand OP0, with two results to TARG0 and TARG1.
1866 We assume that the order of the operands for the instruction
1867 is TARG0, TARG1, OP0.
1869 Either TARG0 or TARG1 may be zero, but what that means is that
1870 the result is not actually wanted. We will generate it into
1871 a dummy pseudo-reg and discard it. They may not both be zero.
1873 Returns 1 if this operation can be performed; 0 if not. */
1876 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
1879 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
1880 enum mode_class class;
1881 enum machine_mode wider_mode;
1882 rtx entry_last = get_last_insn ();
1885 class = GET_MODE_CLASS (mode);
1888 targ0 = gen_reg_rtx (mode);
1890 targ1 = gen_reg_rtx (mode);
1892 /* Record where to go back to if we fail. */
1893 last = get_last_insn ();
1895 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1897 int icode = (int) unoptab->handlers[(int) mode].insn_code;
1898 enum machine_mode mode0 = insn_data[icode].operand[2].mode;
1902 if (GET_MODE (xop0) != VOIDmode
1903 && GET_MODE (xop0) != mode0)
1904 xop0 = convert_to_mode (mode0, xop0, unsignedp);
1906 /* Now, if insn doesn't accept these operands, put them into pseudos. */
1907 if (!insn_data[icode].operand[2].predicate (xop0, mode0))
1908 xop0 = copy_to_mode_reg (mode0, xop0);
1910 /* We could handle this, but we should always be called with a pseudo
1911 for our targets and all insns should take them as outputs. */
1912 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
1913 gcc_assert (insn_data[icode].operand[1].predicate (targ1, mode));
1915 pat = GEN_FCN (icode) (targ0, targ1, xop0);
1922 delete_insns_since (last);
1925 /* It can't be done in this mode. Can we do it in a wider mode? */
1927 if (CLASS_HAS_WIDER_MODES_P (class))
1929 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1930 wider_mode != VOIDmode;
1931 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1933 if (unoptab->handlers[(int) wider_mode].insn_code
1934 != CODE_FOR_nothing)
1936 rtx t0 = gen_reg_rtx (wider_mode);
1937 rtx t1 = gen_reg_rtx (wider_mode);
1938 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
1940 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
1942 convert_move (targ0, t0, unsignedp);
1943 convert_move (targ1, t1, unsignedp);
1947 delete_insns_since (last);
1952 delete_insns_since (entry_last);
1956 /* Generate code to perform an operation specified by BINOPTAB
1957 on operands OP0 and OP1, with two results to TARG1 and TARG2.
1958 We assume that the order of the operands for the instruction
1959 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
1960 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
1962 Either TARG0 or TARG1 may be zero, but what that means is that
1963 the result is not actually wanted. We will generate it into
1964 a dummy pseudo-reg and discard it. They may not both be zero.
1966 Returns 1 if this operation can be performed; 0 if not. */
1969 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
1972 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
1973 enum mode_class class;
1974 enum machine_mode wider_mode;
1975 rtx entry_last = get_last_insn ();
1978 class = GET_MODE_CLASS (mode);
1980 /* If we are inside an appropriately-short loop and we are optimizing,
1981 force expensive constants into a register. */
1982 if (CONSTANT_P (op0) && optimize
1983 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
1984 op0 = force_reg (mode, op0);
1986 if (CONSTANT_P (op1) && optimize
1987 && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
1988 op1 = force_reg (mode, op1);
1991 targ0 = gen_reg_rtx (mode);
1993 targ1 = gen_reg_rtx (mode);
1995 /* Record where to go back to if we fail. */
1996 last = get_last_insn ();
1998 if (binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2000 int icode = (int) binoptab->handlers[(int) mode].insn_code;
2001 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2002 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
2004 rtx xop0 = op0, xop1 = op1;
2006 /* In case the insn wants input operands in modes different from
2007 those of the actual operands, convert the operands. It would
2008 seem that we don't need to convert CONST_INTs, but we do, so
2009 that they're properly zero-extended, sign-extended or truncated
2012 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
2013 xop0 = convert_modes (mode0,
2014 GET_MODE (op0) != VOIDmode
2019 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
2020 xop1 = convert_modes (mode1,
2021 GET_MODE (op1) != VOIDmode
2026 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2027 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2028 xop0 = copy_to_mode_reg (mode0, xop0);
2030 if (!insn_data[icode].operand[2].predicate (xop1, mode1))
2031 xop1 = copy_to_mode_reg (mode1, xop1);
2033 /* We could handle this, but we should always be called with a pseudo
2034 for our targets and all insns should take them as outputs. */
2035 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2036 gcc_assert (insn_data[icode].operand[3].predicate (targ1, mode));
2038 pat = GEN_FCN (icode) (targ0, xop0, xop1, targ1);
2045 delete_insns_since (last);
2048 /* It can't be done in this mode. Can we do it in a wider mode? */
2050 if (CLASS_HAS_WIDER_MODES_P (class))
2052 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2053 wider_mode != VOIDmode;
2054 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2056 if (binoptab->handlers[(int) wider_mode].insn_code
2057 != CODE_FOR_nothing)
2059 rtx t0 = gen_reg_rtx (wider_mode);
2060 rtx t1 = gen_reg_rtx (wider_mode);
2061 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2062 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2064 if (expand_twoval_binop (binoptab, cop0, cop1,
2067 convert_move (targ0, t0, unsignedp);
2068 convert_move (targ1, t1, unsignedp);
2072 delete_insns_since (last);
2077 delete_insns_since (entry_last);
2081 /* Expand the two-valued library call indicated by BINOPTAB, but
2082 preserve only one of the values. If TARG0 is non-NULL, the first
2083 value is placed into TARG0; otherwise the second value is placed
2084 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2085 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2086 This routine assumes that the value returned by the library call is
2087 as if the return value was of an integral mode twice as wide as the
2088 mode of OP0. Returns 1 if the call was successful. */
2091 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2092 rtx targ0, rtx targ1, enum rtx_code code)
2094 enum machine_mode mode;
2095 enum machine_mode libval_mode;
2099 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2100 gcc_assert (!targ0 != !targ1);
2102 mode = GET_MODE (op0);
2103 if (!binoptab->handlers[(int) mode].libfunc)
2106 /* The value returned by the library function will have twice as
2107 many bits as the nominal MODE. */
2108 libval_mode = smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode),
2111 libval = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
2112 NULL_RTX, LCT_CONST,
2116 /* Get the part of VAL containing the value that we want. */
2117 libval = simplify_gen_subreg (mode, libval, libval_mode,
2118 targ0 ? 0 : GET_MODE_SIZE (mode));
2119 insns = get_insns ();
2121 /* Move the into the desired location. */
2122 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2123 gen_rtx_fmt_ee (code, mode, op0, op1));
2129 /* Wrapper around expand_unop which takes an rtx code to specify
2130 the operation to perform, not an optab pointer. All other
2131 arguments are the same. */
2133 expand_simple_unop (enum machine_mode mode, enum rtx_code code, rtx op0,
2134 rtx target, int unsignedp)
2136 optab unop = code_to_optab[(int) code];
2139 return expand_unop (mode, unop, op0, target, unsignedp);
2145 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2147 widen_clz (enum machine_mode mode, rtx op0, rtx target)
2149 enum mode_class class = GET_MODE_CLASS (mode);
2150 if (CLASS_HAS_WIDER_MODES_P (class))
2152 enum machine_mode wider_mode;
2153 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2154 wider_mode != VOIDmode;
2155 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2157 if (clz_optab->handlers[(int) wider_mode].insn_code
2158 != CODE_FOR_nothing)
2160 rtx xop0, temp, last;
2162 last = get_last_insn ();
2165 target = gen_reg_rtx (mode);
2166 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2167 temp = expand_unop (wider_mode, clz_optab, xop0, NULL_RTX, true);
2169 temp = expand_binop (wider_mode, sub_optab, temp,
2170 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2171 - GET_MODE_BITSIZE (mode)),
2172 target, true, OPTAB_DIRECT);
2174 delete_insns_since (last);
2183 /* Try calculating (parity x) as (and (popcount x) 1), where
2184 popcount can also be done in a wider mode. */
2186 expand_parity (enum machine_mode mode, rtx op0, rtx target)
2188 enum mode_class class = GET_MODE_CLASS (mode);
2189 if (CLASS_HAS_WIDER_MODES_P (class))
2191 enum machine_mode wider_mode;
2192 for (wider_mode = mode; wider_mode != VOIDmode;
2193 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2195 if (popcount_optab->handlers[(int) wider_mode].insn_code
2196 != CODE_FOR_nothing)
2198 rtx xop0, temp, last;
2200 last = get_last_insn ();
2203 target = gen_reg_rtx (mode);
2204 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2205 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2208 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2209 target, true, OPTAB_DIRECT);
2211 delete_insns_since (last);
2220 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2221 conditions, VAL may already be a SUBREG against which we cannot generate
2222 a further SUBREG. In this case, we expect forcing the value into a
2223 register will work around the situation. */
2226 lowpart_subreg_maybe_copy (enum machine_mode omode, rtx val,
2227 enum machine_mode imode)
2230 ret = lowpart_subreg (omode, val, imode);
2233 val = force_reg (imode, val);
2234 ret = lowpart_subreg (omode, val, imode);
2235 gcc_assert (ret != NULL);
2240 /* Expand a floating point absolute value or negation operation via a
2241 logical operation on the sign bit. */
2244 expand_absneg_bit (enum rtx_code code, enum machine_mode mode,
2245 rtx op0, rtx target)
2247 const struct real_format *fmt;
2248 int bitpos, word, nwords, i;
2249 enum machine_mode imode;
2250 HOST_WIDE_INT hi, lo;
2253 /* The format has to have a simple sign bit. */
2254 fmt = REAL_MODE_FORMAT (mode);
2258 bitpos = fmt->signbit_rw;
2262 /* Don't create negative zeros if the format doesn't support them. */
2263 if (code == NEG && !fmt->has_signed_zero)
2266 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2268 imode = int_mode_for_mode (mode);
2269 if (imode == BLKmode)
2278 if (FLOAT_WORDS_BIG_ENDIAN)
2279 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2281 word = bitpos / BITS_PER_WORD;
2282 bitpos = bitpos % BITS_PER_WORD;
2283 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2286 if (bitpos < HOST_BITS_PER_WIDE_INT)
2289 lo = (HOST_WIDE_INT) 1 << bitpos;
2293 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2299 if (target == 0 || target == op0)
2300 target = gen_reg_rtx (mode);
2306 for (i = 0; i < nwords; ++i)
2308 rtx targ_piece = operand_subword (target, i, 1, mode);
2309 rtx op0_piece = operand_subword_force (op0, i, mode);
2313 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2315 immed_double_const (lo, hi, imode),
2316 targ_piece, 1, OPTAB_LIB_WIDEN);
2317 if (temp != targ_piece)
2318 emit_move_insn (targ_piece, temp);
2321 emit_move_insn (targ_piece, op0_piece);
2324 insns = get_insns ();
2327 temp = gen_rtx_fmt_e (code, mode, copy_rtx (op0));
2328 emit_no_conflict_block (insns, target, op0, NULL_RTX, temp);
2332 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2333 gen_lowpart (imode, op0),
2334 immed_double_const (lo, hi, imode),
2335 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2336 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2338 set_unique_reg_note (get_last_insn (), REG_EQUAL,
2339 gen_rtx_fmt_e (code, mode, copy_rtx (op0)));
2345 /* Generate code to perform an operation specified by UNOPTAB
2346 on operand OP0, with result having machine-mode MODE.
2348 UNSIGNEDP is for the case where we have to widen the operands
2349 to perform the operation. It says to use zero-extension.
2351 If TARGET is nonzero, the value
2352 is generated there, if it is convenient to do so.
2353 In all cases an rtx is returned for the locus of the value;
2354 this may or may not be TARGET. */
2357 expand_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
2360 enum mode_class class;
2361 enum machine_mode wider_mode;
2363 rtx last = get_last_insn ();
2366 class = GET_MODE_CLASS (mode);
2368 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2370 int icode = (int) unoptab->handlers[(int) mode].insn_code;
2371 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2377 temp = gen_reg_rtx (mode);
2379 if (GET_MODE (xop0) != VOIDmode
2380 && GET_MODE (xop0) != mode0)
2381 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2383 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2385 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2386 xop0 = copy_to_mode_reg (mode0, xop0);
2388 if (!insn_data[icode].operand[0].predicate (temp, mode))
2389 temp = gen_reg_rtx (mode);
2391 pat = GEN_FCN (icode) (temp, xop0);
2394 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
2395 && ! add_equal_note (pat, temp, unoptab->code, xop0, NULL_RTX))
2397 delete_insns_since (last);
2398 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
2406 delete_insns_since (last);
2409 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2411 /* Widening clz needs special treatment. */
2412 if (unoptab == clz_optab)
2414 temp = widen_clz (mode, op0, target);
2421 if (CLASS_HAS_WIDER_MODES_P (class))
2422 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2423 wider_mode != VOIDmode;
2424 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2426 if (unoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing)
2430 /* For certain operations, we need not actually extend
2431 the narrow operand, as long as we will truncate the
2432 results to the same narrowness. */
2434 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2435 (unoptab == neg_optab
2436 || unoptab == one_cmpl_optab)
2437 && class == MODE_INT);
2439 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2444 if (class != MODE_INT
2445 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
2446 GET_MODE_BITSIZE (wider_mode)))
2449 target = gen_reg_rtx (mode);
2450 convert_move (target, temp, 0);
2454 return gen_lowpart (mode, temp);
2457 delete_insns_since (last);
2461 /* These can be done a word at a time. */
2462 if (unoptab == one_cmpl_optab
2463 && class == MODE_INT
2464 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
2465 && unoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
2470 if (target == 0 || target == op0)
2471 target = gen_reg_rtx (mode);
2475 /* Do the actual arithmetic. */
2476 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
2478 rtx target_piece = operand_subword (target, i, 1, mode);
2479 rtx x = expand_unop (word_mode, unoptab,
2480 operand_subword_force (op0, i, mode),
2481 target_piece, unsignedp);
2483 if (target_piece != x)
2484 emit_move_insn (target_piece, x);
2487 insns = get_insns ();
2490 emit_no_conflict_block (insns, target, op0, NULL_RTX,
2491 gen_rtx_fmt_e (unoptab->code, mode,
2496 if (unoptab->code == NEG)
2498 /* Try negating floating point values by flipping the sign bit. */
2499 if (SCALAR_FLOAT_MODE_P (mode))
2501 temp = expand_absneg_bit (NEG, mode, op0, target);
2506 /* If there is no negation pattern, and we have no negative zero,
2507 try subtracting from zero. */
2508 if (!HONOR_SIGNED_ZEROS (mode))
2510 temp = expand_binop (mode, (unoptab == negv_optab
2511 ? subv_optab : sub_optab),
2512 CONST0_RTX (mode), op0, target,
2513 unsignedp, OPTAB_DIRECT);
2519 /* Try calculating parity (x) as popcount (x) % 2. */
2520 if (unoptab == parity_optab)
2522 temp = expand_parity (mode, op0, target);
2528 /* Now try a library call in this mode. */
2529 if (unoptab->handlers[(int) mode].libfunc)
2533 enum machine_mode outmode = mode;
2535 /* All of these functions return small values. Thus we choose to
2536 have them return something that isn't a double-word. */
2537 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
2538 || unoptab == popcount_optab || unoptab == parity_optab)
2540 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node)));
2544 /* Pass 1 for NO_QUEUE so we don't lose any increments
2545 if the libcall is cse'd or moved. */
2546 value = emit_library_call_value (unoptab->handlers[(int) mode].libfunc,
2547 NULL_RTX, LCT_CONST, outmode,
2549 insns = get_insns ();
2552 target = gen_reg_rtx (outmode);
2553 emit_libcall_block (insns, target, value,
2554 gen_rtx_fmt_e (unoptab->code, mode, op0));
2559 /* It can't be done in this mode. Can we do it in a wider mode? */
2561 if (CLASS_HAS_WIDER_MODES_P (class))
2563 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2564 wider_mode != VOIDmode;
2565 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2567 if ((unoptab->handlers[(int) wider_mode].insn_code
2568 != CODE_FOR_nothing)
2569 || unoptab->handlers[(int) wider_mode].libfunc)
2573 /* For certain operations, we need not actually extend
2574 the narrow operand, as long as we will truncate the
2575 results to the same narrowness. */
2577 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2578 (unoptab == neg_optab
2579 || unoptab == one_cmpl_optab)
2580 && class == MODE_INT);
2582 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2585 /* If we are generating clz using wider mode, adjust the
2587 if (unoptab == clz_optab && temp != 0)
2588 temp = expand_binop (wider_mode, sub_optab, temp,
2589 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2590 - GET_MODE_BITSIZE (mode)),
2591 target, true, OPTAB_DIRECT);
2595 if (class != MODE_INT)
2598 target = gen_reg_rtx (mode);
2599 convert_move (target, temp, 0);
2603 return gen_lowpart (mode, temp);
2606 delete_insns_since (last);
2611 /* One final attempt at implementing negation via subtraction,
2612 this time allowing widening of the operand. */
2613 if (unoptab->code == NEG && !HONOR_SIGNED_ZEROS (mode))
2616 temp = expand_binop (mode,
2617 unoptab == negv_optab ? subv_optab : sub_optab,
2618 CONST0_RTX (mode), op0,
2619 target, unsignedp, OPTAB_LIB_WIDEN);
2627 /* Emit code to compute the absolute value of OP0, with result to
2628 TARGET if convenient. (TARGET may be 0.) The return value says
2629 where the result actually is to be found.
2631 MODE is the mode of the operand; the mode of the result is
2632 different but can be deduced from MODE.
2637 expand_abs_nojump (enum machine_mode mode, rtx op0, rtx target,
2638 int result_unsignedp)
2643 result_unsignedp = 1;
2645 /* First try to do it with a special abs instruction. */
2646 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
2651 /* For floating point modes, try clearing the sign bit. */
2652 if (SCALAR_FLOAT_MODE_P (mode))
2654 temp = expand_absneg_bit (ABS, mode, op0, target);
2659 /* If we have a MAX insn, we can do this as MAX (x, -x). */
2660 if (smax_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
2661 && !HONOR_SIGNED_ZEROS (mode))
2663 rtx last = get_last_insn ();
2665 temp = expand_unop (mode, neg_optab, op0, NULL_RTX, 0);
2667 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
2673 delete_insns_since (last);
2676 /* If this machine has expensive jumps, we can do integer absolute
2677 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2678 where W is the width of MODE. */
2680 if (GET_MODE_CLASS (mode) == MODE_INT && BRANCH_COST >= 2)
2682 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
2683 size_int (GET_MODE_BITSIZE (mode) - 1),
2686 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
2689 temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab,
2690 temp, extended, target, 0, OPTAB_LIB_WIDEN);
2700 expand_abs (enum machine_mode mode, rtx op0, rtx target,
2701 int result_unsignedp, int safe)
2706 result_unsignedp = 1;
2708 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
2712 /* If that does not win, use conditional jump and negate. */
2714 /* It is safe to use the target if it is the same
2715 as the source if this is also a pseudo register */
2716 if (op0 == target && REG_P (op0)
2717 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
2720 op1 = gen_label_rtx ();
2721 if (target == 0 || ! safe
2722 || GET_MODE (target) != mode
2723 || (MEM_P (target) && MEM_VOLATILE_P (target))
2725 && REGNO (target) < FIRST_PSEUDO_REGISTER))
2726 target = gen_reg_rtx (mode);
2728 emit_move_insn (target, op0);
2731 /* If this mode is an integer too wide to compare properly,
2732 compare word by word. Rely on CSE to optimize constant cases. */
2733 if (GET_MODE_CLASS (mode) == MODE_INT
2734 && ! can_compare_p (GE, mode, ccp_jump))
2735 do_jump_by_parts_greater_rtx (mode, 0, target, const0_rtx,
2738 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
2739 NULL_RTX, NULL_RTX, op1);
2741 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
2744 emit_move_insn (target, op0);
2750 /* A subroutine of expand_copysign, perform the copysign operation using the
2751 abs and neg primitives advertised to exist on the target. The assumption
2752 is that we have a split register file, and leaving op0 in fp registers,
2753 and not playing with subregs so much, will help the register allocator. */
2756 expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target,
2757 int bitpos, bool op0_is_abs)
2759 enum machine_mode imode;
2760 HOST_WIDE_INT hi, lo;
2769 op0 = expand_unop (mode, abs_optab, op0, target, 0);
2776 if (target == NULL_RTX)
2777 target = copy_to_reg (op0);
2779 emit_move_insn (target, op0);
2782 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2784 imode = int_mode_for_mode (mode);
2785 if (imode == BLKmode)
2787 op1 = gen_lowpart (imode, op1);
2792 if (FLOAT_WORDS_BIG_ENDIAN)
2793 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2795 word = bitpos / BITS_PER_WORD;
2796 bitpos = bitpos % BITS_PER_WORD;
2797 op1 = operand_subword_force (op1, word, mode);
2800 if (bitpos < HOST_BITS_PER_WIDE_INT)
2803 lo = (HOST_WIDE_INT) 1 << bitpos;
2807 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2811 op1 = expand_binop (imode, and_optab, op1,
2812 immed_double_const (lo, hi, imode),
2813 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2815 label = gen_label_rtx ();
2816 emit_cmp_and_jump_insns (op1, const0_rtx, EQ, NULL_RTX, imode, 1, label);
2818 if (GET_CODE (op0) == CONST_DOUBLE)
2819 op0 = simplify_unary_operation (NEG, mode, op0, mode);
2821 op0 = expand_unop (mode, neg_optab, op0, target, 0);
2823 emit_move_insn (target, op0);
2831 /* A subroutine of expand_copysign, perform the entire copysign operation
2832 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
2833 is true if op0 is known to have its sign bit clear. */
2836 expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
2837 int bitpos, bool op0_is_abs)
2839 enum machine_mode imode;
2840 HOST_WIDE_INT hi, lo;
2841 int word, nwords, i;
2844 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2846 imode = int_mode_for_mode (mode);
2847 if (imode == BLKmode)
2856 if (FLOAT_WORDS_BIG_ENDIAN)
2857 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2859 word = bitpos / BITS_PER_WORD;
2860 bitpos = bitpos % BITS_PER_WORD;
2861 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2864 if (bitpos < HOST_BITS_PER_WIDE_INT)
2867 lo = (HOST_WIDE_INT) 1 << bitpos;
2871 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2875 if (target == 0 || target == op0 || target == op1)
2876 target = gen_reg_rtx (mode);
2882 for (i = 0; i < nwords; ++i)
2884 rtx targ_piece = operand_subword (target, i, 1, mode);
2885 rtx op0_piece = operand_subword_force (op0, i, mode);
2890 op0_piece = expand_binop (imode, and_optab, op0_piece,
2891 immed_double_const (~lo, ~hi, imode),
2892 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2894 op1 = expand_binop (imode, and_optab,
2895 operand_subword_force (op1, i, mode),
2896 immed_double_const (lo, hi, imode),
2897 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2899 temp = expand_binop (imode, ior_optab, op0_piece, op1,
2900 targ_piece, 1, OPTAB_LIB_WIDEN);
2901 if (temp != targ_piece)
2902 emit_move_insn (targ_piece, temp);
2905 emit_move_insn (targ_piece, op0_piece);
2908 insns = get_insns ();
2911 emit_no_conflict_block (insns, target, op0, op1, NULL_RTX);
2915 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
2916 immed_double_const (lo, hi, imode),
2917 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2919 op0 = gen_lowpart (imode, op0);
2921 op0 = expand_binop (imode, and_optab, op0,
2922 immed_double_const (~lo, ~hi, imode),
2923 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2925 temp = expand_binop (imode, ior_optab, op0, op1,
2926 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2927 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2933 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
2934 scalar floating point mode. Return NULL if we do not know how to
2935 expand the operation inline. */
2938 expand_copysign (rtx op0, rtx op1, rtx target)
2940 enum machine_mode mode = GET_MODE (op0);
2941 const struct real_format *fmt;
2945 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
2946 gcc_assert (GET_MODE (op1) == mode);
2948 /* First try to do it with a special instruction. */
2949 temp = expand_binop (mode, copysign_optab, op0, op1,
2950 target, 0, OPTAB_DIRECT);
2954 fmt = REAL_MODE_FORMAT (mode);
2955 if (fmt == NULL || !fmt->has_signed_zero)
2959 if (GET_CODE (op0) == CONST_DOUBLE)
2961 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
2962 op0 = simplify_unary_operation (ABS, mode, op0, mode);
2966 if (fmt->signbit_ro >= 0
2967 && (GET_CODE (op0) == CONST_DOUBLE
2968 || (neg_optab->handlers[mode].insn_code != CODE_FOR_nothing
2969 && abs_optab->handlers[mode].insn_code != CODE_FOR_nothing)))
2971 temp = expand_copysign_absneg (mode, op0, op1, target,
2972 fmt->signbit_ro, op0_is_abs);
2977 if (fmt->signbit_rw < 0)
2979 return expand_copysign_bit (mode, op0, op1, target,
2980 fmt->signbit_rw, op0_is_abs);
2983 /* Generate an instruction whose insn-code is INSN_CODE,
2984 with two operands: an output TARGET and an input OP0.
2985 TARGET *must* be nonzero, and the output is always stored there.
2986 CODE is an rtx code such that (CODE OP0) is an rtx that describes
2987 the value that is stored into TARGET. */
2990 emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code)
2993 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2998 /* Now, if insn does not accept our operands, put them into pseudos. */
3000 if (!insn_data[icode].operand[1].predicate (op0, mode0))
3001 op0 = copy_to_mode_reg (mode0, op0);
3003 if (!insn_data[icode].operand[0].predicate (temp, GET_MODE (temp)))
3004 temp = gen_reg_rtx (GET_MODE (temp));
3006 pat = GEN_FCN (icode) (temp, op0);
3008 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && code != UNKNOWN)
3009 add_equal_note (pat, temp, code, op0, NULL_RTX);
3014 emit_move_insn (target, temp);
3017 struct no_conflict_data
3019 rtx target, first, insn;
3023 /* Called via note_stores by emit_no_conflict_block and emit_libcall_block.
3024 Set P->must_stay if the currently examined clobber / store has to stay
3025 in the list of insns that constitute the actual no_conflict block /
3028 no_conflict_move_test (rtx dest, rtx set, void *p0)
3030 struct no_conflict_data *p= p0;
3032 /* If this inns directly contributes to setting the target, it must stay. */
3033 if (reg_overlap_mentioned_p (p->target, dest))
3034 p->must_stay = true;
3035 /* If we haven't committed to keeping any other insns in the list yet,
3036 there is nothing more to check. */
3037 else if (p->insn == p->first)
3039 /* If this insn sets / clobbers a register that feeds one of the insns
3040 already in the list, this insn has to stay too. */
3041 else if (reg_overlap_mentioned_p (dest, PATTERN (p->first))
3042 || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest)))
3043 || reg_used_between_p (dest, p->first, p->insn)
3044 /* Likewise if this insn depends on a register set by a previous
3045 insn in the list, or if it sets a result (presumably a hard
3046 register) that is set or clobbered by a previous insn.
3047 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3048 SET_DEST perform the former check on the address, and the latter
3049 check on the MEM. */
3050 || (GET_CODE (set) == SET
3051 && (modified_in_p (SET_SRC (set), p->first)
3052 || modified_in_p (SET_DEST (set), p->first)
3053 || modified_between_p (SET_SRC (set), p->first, p->insn)
3054 || modified_between_p (SET_DEST (set), p->first, p->insn))))
3055 p->must_stay = true;
3058 /* Emit code to perform a series of operations on a multi-word quantity, one
3061 Such a block is preceded by a CLOBBER of the output, consists of multiple
3062 insns, each setting one word of the output, and followed by a SET copying
3063 the output to itself.
3065 Each of the insns setting words of the output receives a REG_NO_CONFLICT
3066 note indicating that it doesn't conflict with the (also multi-word)
3067 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
3070 INSNS is a block of code generated to perform the operation, not including
3071 the CLOBBER and final copy. All insns that compute intermediate values
3072 are first emitted, followed by the block as described above.
3074 TARGET, OP0, and OP1 are the output and inputs of the operations,
3075 respectively. OP1 may be zero for a unary operation.
3077 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
3080 If TARGET is not a register, INSNS is simply emitted with no special
3081 processing. Likewise if anything in INSNS is not an INSN or if
3082 there is a libcall block inside INSNS.
3084 The final insn emitted is returned. */
3087 emit_no_conflict_block (rtx insns, rtx target, rtx op0, rtx op1, rtx equiv)
3089 rtx prev, next, first, last, insn;
3091 if (!REG_P (target) || reload_in_progress)
3092 return emit_insn (insns);
3094 for (insn = insns; insn; insn = NEXT_INSN (insn))
3095 if (!NONJUMP_INSN_P (insn)
3096 || find_reg_note (insn, REG_LIBCALL, NULL_RTX))
3097 return emit_insn (insns);
3099 /* First emit all insns that do not store into words of the output and remove
3100 these from the list. */
3101 for (insn = insns; insn; insn = next)
3104 struct no_conflict_data data;
3106 next = NEXT_INSN (insn);
3108 /* Some ports (cris) create a libcall regions at their own. We must
3109 avoid any potential nesting of LIBCALLs. */
3110 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3111 remove_note (insn, note);
3112 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3113 remove_note (insn, note);
3115 data.target = target;
3119 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3120 if (! data.must_stay)
3122 if (PREV_INSN (insn))
3123 NEXT_INSN (PREV_INSN (insn)) = next;
3128 PREV_INSN (next) = PREV_INSN (insn);
3134 prev = get_last_insn ();
3136 /* Now write the CLOBBER of the output, followed by the setting of each
3137 of the words, followed by the final copy. */
3138 if (target != op0 && target != op1)
3139 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
3141 for (insn = insns; insn; insn = next)
3143 next = NEXT_INSN (insn);
3146 if (op1 && REG_P (op1))
3147 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op1,
3150 if (op0 && REG_P (op0))
3151 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op0,
3155 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3156 != CODE_FOR_nothing)
3158 last = emit_move_insn (target, target);
3160 set_unique_reg_note (last, REG_EQUAL, equiv);
3164 last = get_last_insn ();
3166 /* Remove any existing REG_EQUAL note from "last", or else it will
3167 be mistaken for a note referring to the full contents of the
3168 alleged libcall value when found together with the REG_RETVAL
3169 note added below. An existing note can come from an insn
3170 expansion at "last". */
3171 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3175 first = get_insns ();
3177 first = NEXT_INSN (prev);
3179 /* Encapsulate the block so it gets manipulated as a unit. */
3180 REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
3182 REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first, REG_NOTES (last));
3187 /* Emit code to make a call to a constant function or a library call.
3189 INSNS is a list containing all insns emitted in the call.
3190 These insns leave the result in RESULT. Our block is to copy RESULT
3191 to TARGET, which is logically equivalent to EQUIV.
3193 We first emit any insns that set a pseudo on the assumption that these are
3194 loading constants into registers; doing so allows them to be safely cse'ed
3195 between blocks. Then we emit all the other insns in the block, followed by
3196 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3197 note with an operand of EQUIV.
3199 Moving assignments to pseudos outside of the block is done to improve
3200 the generated code, but is not required to generate correct code,
3201 hence being unable to move an assignment is not grounds for not making
3202 a libcall block. There are two reasons why it is safe to leave these
3203 insns inside the block: First, we know that these pseudos cannot be
3204 used in generated RTL outside the block since they are created for
3205 temporary purposes within the block. Second, CSE will not record the
3206 values of anything set inside a libcall block, so we know they must
3207 be dead at the end of the block.
3209 Except for the first group of insns (the ones setting pseudos), the
3210 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3213 emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv)
3215 rtx final_dest = target;
3216 rtx prev, next, first, last, insn;
3218 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3219 into a MEM later. Protect the libcall block from this change. */
3220 if (! REG_P (target) || REG_USERVAR_P (target))
3221 target = gen_reg_rtx (GET_MODE (target));
3223 /* If we're using non-call exceptions, a libcall corresponding to an
3224 operation that may trap may also trap. */
3225 if (flag_non_call_exceptions && may_trap_p (equiv))
3227 for (insn = insns; insn; insn = NEXT_INSN (insn))
3230 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3232 if (note != 0 && INTVAL (XEXP (note, 0)) <= 0)
3233 remove_note (insn, note);
3237 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3238 reg note to indicate that this call cannot throw or execute a nonlocal
3239 goto (unless there is already a REG_EH_REGION note, in which case
3241 for (insn = insns; insn; insn = NEXT_INSN (insn))
3244 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3247 XEXP (note, 0) = constm1_rtx;
3249 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx,
3253 /* First emit all insns that set pseudos. Remove them from the list as
3254 we go. Avoid insns that set pseudos which were referenced in previous
3255 insns. These can be generated by move_by_pieces, for example,
3256 to update an address. Similarly, avoid insns that reference things
3257 set in previous insns. */
3259 for (insn = insns; insn; insn = next)
3261 rtx set = single_set (insn);
3264 /* Some ports (cris) create a libcall regions at their own. We must
3265 avoid any potential nesting of LIBCALLs. */
3266 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3267 remove_note (insn, note);
3268 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3269 remove_note (insn, note);
3271 next = NEXT_INSN (insn);
3273 if (set != 0 && REG_P (SET_DEST (set))
3274 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3276 struct no_conflict_data data;
3278 data.target = const0_rtx;
3282 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3283 if (! data.must_stay)
3285 if (PREV_INSN (insn))
3286 NEXT_INSN (PREV_INSN (insn)) = next;
3291 PREV_INSN (next) = PREV_INSN (insn);
3297 /* Some ports use a loop to copy large arguments onto the stack.
3298 Don't move anything outside such a loop. */
3303 prev = get_last_insn ();
3305 /* Write the remaining insns followed by the final copy. */
3307 for (insn = insns; insn; insn = next)
3309 next = NEXT_INSN (insn);
3314 last = emit_move_insn (target, result);
3315 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3316 != CODE_FOR_nothing)
3317 set_unique_reg_note (last, REG_EQUAL, copy_rtx (equiv));
3320 /* Remove any existing REG_EQUAL note from "last", or else it will
3321 be mistaken for a note referring to the full contents of the
3322 libcall value when found together with the REG_RETVAL note added
3323 below. An existing note can come from an insn expansion at
3325 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3328 if (final_dest != target)
3329 emit_move_insn (final_dest, target);
3332 first = get_insns ();
3334 first = NEXT_INSN (prev);
3336 /* Encapsulate the block so it gets manipulated as a unit. */
3337 if (!flag_non_call_exceptions || !may_trap_p (equiv))
3339 /* We can't attach the REG_LIBCALL and REG_RETVAL notes
3340 when the encapsulated region would not be in one basic block,
3341 i.e. when there is a control_flow_insn_p insn between FIRST and LAST.
3343 bool attach_libcall_retval_notes = true;
3344 next = NEXT_INSN (last);
3345 for (insn = first; insn != next; insn = NEXT_INSN (insn))
3346 if (control_flow_insn_p (insn))
3348 attach_libcall_retval_notes = false;
3352 if (attach_libcall_retval_notes)
3354 REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
3356 REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first,
3362 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3363 PURPOSE describes how this comparison will be used. CODE is the rtx
3364 comparison code we will be using.
3366 ??? Actually, CODE is slightly weaker than that. A target is still
3367 required to implement all of the normal bcc operations, but not
3368 required to implement all (or any) of the unordered bcc operations. */
3371 can_compare_p (enum rtx_code code, enum machine_mode mode,
3372 enum can_compare_purpose purpose)
3376 if (cmp_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3378 if (purpose == ccp_jump)
3379 return bcc_gen_fctn[(int) code] != NULL;
3380 else if (purpose == ccp_store_flag)
3381 return setcc_gen_code[(int) code] != CODE_FOR_nothing;
3383 /* There's only one cmov entry point, and it's allowed to fail. */
3386 if (purpose == ccp_jump
3387 && cbranch_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3389 if (purpose == ccp_cmov
3390 && cmov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3392 if (purpose == ccp_store_flag
3393 && cstore_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3395 mode = GET_MODE_WIDER_MODE (mode);
3397 while (mode != VOIDmode);
3402 /* This function is called when we are going to emit a compare instruction that
3403 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3405 *PMODE is the mode of the inputs (in case they are const_int).
3406 *PUNSIGNEDP nonzero says that the operands are unsigned;
3407 this matters if they need to be widened.
3409 If they have mode BLKmode, then SIZE specifies the size of both operands.
3411 This function performs all the setup necessary so that the caller only has
3412 to emit a single comparison insn. This setup can involve doing a BLKmode
3413 comparison or emitting a library call to perform the comparison if no insn
3414 is available to handle it.
3415 The values which are passed in through pointers can be modified; the caller
3416 should perform the comparison on the modified values. Constant
3417 comparisons must have already been folded. */
3420 prepare_cmp_insn (rtx *px, rtx *py, enum rtx_code *pcomparison, rtx size,
3421 enum machine_mode *pmode, int *punsignedp,
3422 enum can_compare_purpose purpose)
3424 enum machine_mode mode = *pmode;
3425 rtx x = *px, y = *py;
3426 int unsignedp = *punsignedp;
3428 /* If we are inside an appropriately-short loop and we are optimizing,
3429 force expensive constants into a register. */
3430 if (CONSTANT_P (x) && optimize
3431 && rtx_cost (x, COMPARE) > COSTS_N_INSNS (1))
3432 x = force_reg (mode, x);
3434 if (CONSTANT_P (y) && optimize
3435 && rtx_cost (y, COMPARE) > COSTS_N_INSNS (1))
3436 y = force_reg (mode, y);
3439 /* Make sure if we have a canonical comparison. The RTL
3440 documentation states that canonical comparisons are required only
3441 for targets which have cc0. */
3442 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
3445 /* Don't let both operands fail to indicate the mode. */
3446 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
3447 x = force_reg (mode, x);
3449 /* Handle all BLKmode compares. */
3451 if (mode == BLKmode)
3453 enum machine_mode cmp_mode, result_mode;
3454 enum insn_code cmp_code;
3459 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
3463 /* Try to use a memory block compare insn - either cmpstr
3464 or cmpmem will do. */
3465 for (cmp_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
3466 cmp_mode != VOIDmode;
3467 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode))
3469 cmp_code = cmpmem_optab[cmp_mode];
3470 if (cmp_code == CODE_FOR_nothing)
3471 cmp_code = cmpstr_optab[cmp_mode];
3472 if (cmp_code == CODE_FOR_nothing)
3473 cmp_code = cmpstrn_optab[cmp_mode];
3474 if (cmp_code == CODE_FOR_nothing)
3477 /* Must make sure the size fits the insn's mode. */
3478 if ((GET_CODE (size) == CONST_INT
3479 && INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode)))
3480 || (GET_MODE_BITSIZE (GET_MODE (size))
3481 > GET_MODE_BITSIZE (cmp_mode)))
3484 result_mode = insn_data[cmp_code].operand[0].mode;
3485 result = gen_reg_rtx (result_mode);
3486 size = convert_to_mode (cmp_mode, size, 1);
3487 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
3491 *pmode = result_mode;
3495 /* Otherwise call a library function, memcmp. */
3496 libfunc = memcmp_libfunc;
3497 length_type = sizetype;
3498 result_mode = TYPE_MODE (integer_type_node);
3499 cmp_mode = TYPE_MODE (length_type);
3500 size = convert_to_mode (TYPE_MODE (length_type), size,
3501 TYPE_UNSIGNED (length_type));
3503 result = emit_library_call_value (libfunc, 0, LCT_PURE_MAKE_BLOCK,
3510 *pmode = result_mode;
3514 /* Don't allow operands to the compare to trap, as that can put the
3515 compare and branch in different basic blocks. */
3516 if (flag_non_call_exceptions)
3519 x = force_reg (mode, x);
3521 y = force_reg (mode, y);
3526 if (can_compare_p (*pcomparison, mode, purpose))
3529 /* Handle a lib call just for the mode we are using. */
3531 if (cmp_optab->handlers[(int) mode].libfunc && !SCALAR_FLOAT_MODE_P (mode))
3533 rtx libfunc = cmp_optab->handlers[(int) mode].libfunc;
3536 /* If we want unsigned, and this mode has a distinct unsigned
3537 comparison routine, use that. */
3538 if (unsignedp && ucmp_optab->handlers[(int) mode].libfunc)
3539 libfunc = ucmp_optab->handlers[(int) mode].libfunc;
3541 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST_MAKE_BLOCK,
3542 word_mode, 2, x, mode, y, mode);
3546 if (TARGET_LIB_INT_CMP_BIASED)
3547 /* Integer comparison returns a result that must be compared
3548 against 1, so that even if we do an unsigned compare
3549 afterward, there is still a value that can represent the
3550 result "less than". */
3560 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3561 prepare_float_lib_cmp (px, py, pcomparison, pmode, punsignedp);
3564 /* Before emitting an insn with code ICODE, make sure that X, which is going
3565 to be used for operand OPNUM of the insn, is converted from mode MODE to
3566 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3567 that it is accepted by the operand predicate. Return the new value. */
3570 prepare_operand (int icode, rtx x, int opnum, enum machine_mode mode,
3571 enum machine_mode wider_mode, int unsignedp)
3573 if (mode != wider_mode)
3574 x = convert_modes (wider_mode, mode, x, unsignedp);
3576 if (!insn_data[icode].operand[opnum].predicate
3577 (x, insn_data[icode].operand[opnum].mode))
3581 x = copy_to_mode_reg (insn_data[icode].operand[opnum].mode, x);
3587 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3588 we can do the comparison.
3589 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3590 be NULL_RTX which indicates that only a comparison is to be generated. */
3593 emit_cmp_and_jump_insn_1 (rtx x, rtx y, enum machine_mode mode,
3594 enum rtx_code comparison, int unsignedp, rtx label)
3596 rtx test = gen_rtx_fmt_ee (comparison, mode, x, y);
3597 enum mode_class class = GET_MODE_CLASS (mode);
3598 enum machine_mode wider_mode = mode;
3600 /* Try combined insns first. */
3603 enum insn_code icode;
3604 PUT_MODE (test, wider_mode);
3608 icode = cbranch_optab->handlers[(int) wider_mode].insn_code;
3610 if (icode != CODE_FOR_nothing
3611 && insn_data[icode].operand[0].predicate (test, wider_mode))
3613 x = prepare_operand (icode, x, 1, mode, wider_mode, unsignedp);
3614 y = prepare_operand (icode, y, 2, mode, wider_mode, unsignedp);
3615 emit_jump_insn (GEN_FCN (icode) (test, x, y, label));
3620 /* Handle some compares against zero. */
3621 icode = (int) tst_optab->handlers[(int) wider_mode].insn_code;
3622 if (y == CONST0_RTX (mode) && icode != CODE_FOR_nothing)
3624 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
3625 emit_insn (GEN_FCN (icode) (x));
3627 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
3631 /* Handle compares for which there is a directly suitable insn. */
3633 icode = (int) cmp_optab->handlers[(int) wider_mode].insn_code;
3634 if (icode != CODE_FOR_nothing)
3636 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
3637 y = prepare_operand (icode, y, 1, mode, wider_mode, unsignedp);
3638 emit_insn (GEN_FCN (icode) (x, y));
3640 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
3644 if (!CLASS_HAS_WIDER_MODES_P (class))
3647 wider_mode = GET_MODE_WIDER_MODE (wider_mode);
3649 while (wider_mode != VOIDmode);
3654 /* Generate code to compare X with Y so that the condition codes are
3655 set and to jump to LABEL if the condition is true. If X is a
3656 constant and Y is not a constant, then the comparison is swapped to
3657 ensure that the comparison RTL has the canonical form.
3659 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3660 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
3661 the proper branch condition code.
3663 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
3665 MODE is the mode of the inputs (in case they are const_int).
3667 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
3668 be passed unchanged to emit_cmp_insn, then potentially converted into an
3669 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
3672 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
3673 enum machine_mode mode, int unsignedp, rtx label)
3675 rtx op0 = x, op1 = y;
3677 /* Swap operands and condition to ensure canonical RTL. */
3678 if (swap_commutative_operands_p (x, y))
3680 /* If we're not emitting a branch, this means some caller
3685 comparison = swap_condition (comparison);
3689 /* If OP0 is still a constant, then both X and Y must be constants.
3690 Force X into a register to create canonical RTL. */
3691 if (CONSTANT_P (op0))
3692 op0 = force_reg (mode, op0);
3696 comparison = unsigned_condition (comparison);
3698 prepare_cmp_insn (&op0, &op1, &comparison, size, &mode, &unsignedp,
3700 emit_cmp_and_jump_insn_1 (op0, op1, mode, comparison, unsignedp, label);
3703 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
3706 emit_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
3707 enum machine_mode mode, int unsignedp)
3709 emit_cmp_and_jump_insns (x, y, comparison, size, mode, unsignedp, 0);
3712 /* Emit a library call comparison between floating point X and Y.
3713 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
3716 prepare_float_lib_cmp (rtx *px, rtx *py, enum rtx_code *pcomparison,
3717 enum machine_mode *pmode, int *punsignedp)
3719 enum rtx_code comparison = *pcomparison;
3720 enum rtx_code swapped = swap_condition (comparison);
3721 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
3724 enum machine_mode orig_mode = GET_MODE (x);
3725 enum machine_mode mode;
3726 rtx value, target, insns, equiv;
3728 bool reversed_p = false;
3730 for (mode = orig_mode;
3732 mode = GET_MODE_WIDER_MODE (mode))
3734 if ((libfunc = code_to_optab[comparison]->handlers[mode].libfunc))
3737 if ((libfunc = code_to_optab[swapped]->handlers[mode].libfunc))
3740 tmp = x; x = y; y = tmp;
3741 comparison = swapped;
3745 if ((libfunc = code_to_optab[reversed]->handlers[mode].libfunc)
3746 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, reversed))
3748 comparison = reversed;
3754 gcc_assert (mode != VOIDmode);
3756 if (mode != orig_mode)
3758 x = convert_to_mode (mode, x, 0);
3759 y = convert_to_mode (mode, y, 0);
3762 /* Attach a REG_EQUAL note describing the semantics of the libcall to
3763 the RTL. The allows the RTL optimizers to delete the libcall if the
3764 condition can be determined at compile-time. */
3765 if (comparison == UNORDERED)
3767 rtx temp = simplify_gen_relational (NE, word_mode, mode, x, x);
3768 equiv = simplify_gen_relational (NE, word_mode, mode, y, y);
3769 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
3770 temp, const_true_rtx, equiv);
3774 equiv = simplify_gen_relational (comparison, word_mode, mode, x, y);
3775 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
3777 rtx true_rtx, false_rtx;
3782 true_rtx = const0_rtx;
3783 false_rtx = const_true_rtx;
3787 true_rtx = const_true_rtx;
3788 false_rtx = const0_rtx;
3792 true_rtx = const1_rtx;
3793 false_rtx = const0_rtx;
3797 true_rtx = const0_rtx;
3798 false_rtx = constm1_rtx;
3802 true_rtx = constm1_rtx;
3803 false_rtx = const0_rtx;
3807 true_rtx = const0_rtx;
3808 false_rtx = const1_rtx;
3814 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
3815 equiv, true_rtx, false_rtx);
3820 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
3821 word_mode, 2, x, mode, y, mode);
3822 insns = get_insns ();
3825 target = gen_reg_rtx (word_mode);
3826 emit_libcall_block (insns, target, value, equiv);
3828 if (comparison == UNORDERED
3829 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
3830 comparison = reversed_p ? EQ : NE;
3835 *pcomparison = comparison;
3839 /* Generate code to indirectly jump to a location given in the rtx LOC. */
3842 emit_indirect_jump (rtx loc)
3844 if (!insn_data[(int) CODE_FOR_indirect_jump].operand[0].predicate
3846 loc = copy_to_mode_reg (Pmode, loc);
3848 emit_jump_insn (gen_indirect_jump (loc));
3852 #ifdef HAVE_conditional_move
3854 /* Emit a conditional move instruction if the machine supports one for that
3855 condition and machine mode.
3857 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3858 the mode to use should they be constants. If it is VOIDmode, they cannot
3861 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
3862 should be stored there. MODE is the mode to use should they be constants.
3863 If it is VOIDmode, they cannot both be constants.
3865 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3866 is not supported. */
3869 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
3870 enum machine_mode cmode, rtx op2, rtx op3,
3871 enum machine_mode mode, int unsignedp)
3873 rtx tem, subtarget, comparison, insn;
3874 enum insn_code icode;
3875 enum rtx_code reversed;
3877 /* If one operand is constant, make it the second one. Only do this
3878 if the other operand is not constant as well. */
3880 if (swap_commutative_operands_p (op0, op1))
3885 code = swap_condition (code);
3888 /* get_condition will prefer to generate LT and GT even if the old
3889 comparison was against zero, so undo that canonicalization here since
3890 comparisons against zero are cheaper. */
3891 if (code == LT && op1 == const1_rtx)
3892 code = LE, op1 = const0_rtx;
3893 else if (code == GT && op1 == constm1_rtx)
3894 code = GE, op1 = const0_rtx;
3896 if (cmode == VOIDmode)
3897 cmode = GET_MODE (op0);
3899 if (swap_commutative_operands_p (op2, op3)
3900 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
3909 if (mode == VOIDmode)
3910 mode = GET_MODE (op2);
3912 icode = movcc_gen_code[mode];
3914 if (icode == CODE_FOR_nothing)
3918 target = gen_reg_rtx (mode);
3922 /* If the insn doesn't accept these operands, put them in pseudos. */
3924 if (!insn_data[icode].operand[0].predicate
3925 (subtarget, insn_data[icode].operand[0].mode))
3926 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
3928 if (!insn_data[icode].operand[2].predicate
3929 (op2, insn_data[icode].operand[2].mode))
3930 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
3932 if (!insn_data[icode].operand[3].predicate
3933 (op3, insn_data[icode].operand[3].mode))
3934 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
3936 /* Everything should now be in the suitable form, so emit the compare insn
3937 and then the conditional move. */
3940 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
3942 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
3943 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
3944 return NULL and let the caller figure out how best to deal with this
3946 if (GET_CODE (comparison) != code)
3949 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
3951 /* If that failed, then give up. */
3957 if (subtarget != target)
3958 convert_move (target, subtarget, 0);
3963 /* Return nonzero if a conditional move of mode MODE is supported.
3965 This function is for combine so it can tell whether an insn that looks
3966 like a conditional move is actually supported by the hardware. If we
3967 guess wrong we lose a bit on optimization, but that's it. */
3968 /* ??? sparc64 supports conditionally moving integers values based on fp
3969 comparisons, and vice versa. How do we handle them? */
3972 can_conditionally_move_p (enum machine_mode mode)
3974 if (movcc_gen_code[mode] != CODE_FOR_nothing)
3980 #endif /* HAVE_conditional_move */
3982 /* Emit a conditional addition instruction if the machine supports one for that
3983 condition and machine mode.
3985 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3986 the mode to use should they be constants. If it is VOIDmode, they cannot
3989 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
3990 should be stored there. MODE is the mode to use should they be constants.
3991 If it is VOIDmode, they cannot both be constants.
3993 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3994 is not supported. */
3997 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
3998 enum machine_mode cmode, rtx op2, rtx op3,
3999 enum machine_mode mode, int unsignedp)
4001 rtx tem, subtarget, comparison, insn;
4002 enum insn_code icode;
4003 enum rtx_code reversed;
4005 /* If one operand is constant, make it the second one. Only do this
4006 if the other operand is not constant as well. */
4008 if (swap_commutative_operands_p (op0, op1))
4013 code = swap_condition (code);
4016 /* get_condition will prefer to generate LT and GT even if the old
4017 comparison was against zero, so undo that canonicalization here since
4018 comparisons against zero are cheaper. */
4019 if (code == LT && op1 == const1_rtx)
4020 code = LE, op1 = const0_rtx;
4021 else if (code == GT && op1 == constm1_rtx)
4022 code = GE, op1 = const0_rtx;
4024 if (cmode == VOIDmode)
4025 cmode = GET_MODE (op0);
4027 if (swap_commutative_operands_p (op2, op3)
4028 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4037 if (mode == VOIDmode)
4038 mode = GET_MODE (op2);
4040 icode = addcc_optab->handlers[(int) mode].insn_code;
4042 if (icode == CODE_FOR_nothing)
4046 target = gen_reg_rtx (mode);
4048 /* If the insn doesn't accept these operands, put them in pseudos. */
4050 if (!insn_data[icode].operand[0].predicate
4051 (target, insn_data[icode].operand[0].mode))
4052 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4056 if (!insn_data[icode].operand[2].predicate
4057 (op2, insn_data[icode].operand[2].mode))
4058 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4060 if (!insn_data[icode].operand[3].predicate
4061 (op3, insn_data[icode].operand[3].mode))
4062 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4064 /* Everything should now be in the suitable form, so emit the compare insn
4065 and then the conditional move. */
4068 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4070 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4071 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4072 return NULL and let the caller figure out how best to deal with this
4074 if (GET_CODE (comparison) != code)
4077 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4079 /* If that failed, then give up. */
4085 if (subtarget != target)
4086 convert_move (target, subtarget, 0);
4091 /* These functions attempt to generate an insn body, rather than
4092 emitting the insn, but if the gen function already emits them, we
4093 make no attempt to turn them back into naked patterns. */
4095 /* Generate and return an insn body to add Y to X. */
4098 gen_add2_insn (rtx x, rtx y)
4100 int icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4102 gcc_assert (insn_data[icode].operand[0].predicate
4103 (x, insn_data[icode].operand[0].mode));
4104 gcc_assert (insn_data[icode].operand[1].predicate
4105 (x, insn_data[icode].operand[1].mode));
4106 gcc_assert (insn_data[icode].operand[2].predicate
4107 (y, insn_data[icode].operand[2].mode));
4109 return GEN_FCN (icode) (x, x, y);
4112 /* Generate and return an insn body to add r1 and c,
4113 storing the result in r0. */
4115 gen_add3_insn (rtx r0, rtx r1, rtx c)
4117 int icode = (int) add_optab->handlers[(int) GET_MODE (r0)].insn_code;
4119 if (icode == CODE_FOR_nothing
4120 || !(insn_data[icode].operand[0].predicate
4121 (r0, insn_data[icode].operand[0].mode))
4122 || !(insn_data[icode].operand[1].predicate
4123 (r1, insn_data[icode].operand[1].mode))
4124 || !(insn_data[icode].operand[2].predicate
4125 (c, insn_data[icode].operand[2].mode)))
4128 return GEN_FCN (icode) (r0, r1, c);
4132 have_add2_insn (rtx x, rtx y)
4136 gcc_assert (GET_MODE (x) != VOIDmode);
4138 icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4140 if (icode == CODE_FOR_nothing)
4143 if (!(insn_data[icode].operand[0].predicate
4144 (x, insn_data[icode].operand[0].mode))
4145 || !(insn_data[icode].operand[1].predicate
4146 (x, insn_data[icode].operand[1].mode))
4147 || !(insn_data[icode].operand[2].predicate
4148 (y, insn_data[icode].operand[2].mode)))
4154 /* Generate and return an insn body to subtract Y from X. */
4157 gen_sub2_insn (rtx x, rtx y)
4159 int icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4161 gcc_assert (insn_data[icode].operand[0].predicate
4162 (x, insn_data[icode].operand[0].mode));
4163 gcc_assert (insn_data[icode].operand[1].predicate
4164 (x, insn_data[icode].operand[1].mode));
4165 gcc_assert (insn_data[icode].operand[2].predicate
4166 (y, insn_data[icode].operand[2].mode));
4168 return GEN_FCN (icode) (x, x, y);
4171 /* Generate and return an insn body to subtract r1 and c,
4172 storing the result in r0. */
4174 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4176 int icode = (int) sub_optab->handlers[(int) GET_MODE (r0)].insn_code;
4178 if (icode == CODE_FOR_nothing
4179 || !(insn_data[icode].operand[0].predicate
4180 (r0, insn_data[icode].operand[0].mode))
4181 || !(insn_data[icode].operand[1].predicate
4182 (r1, insn_data[icode].operand[1].mode))
4183 || !(insn_data[icode].operand[2].predicate
4184 (c, insn_data[icode].operand[2].mode)))
4187 return GEN_FCN (icode) (r0, r1, c);
4191 have_sub2_insn (rtx x, rtx y)
4195 gcc_assert (GET_MODE (x) != VOIDmode);
4197 icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4199 if (icode == CODE_FOR_nothing)
4202 if (!(insn_data[icode].operand[0].predicate
4203 (x, insn_data[icode].operand[0].mode))
4204 || !(insn_data[icode].operand[1].predicate
4205 (x, insn_data[icode].operand[1].mode))
4206 || !(insn_data[icode].operand[2].predicate
4207 (y, insn_data[icode].operand[2].mode)))
4213 /* Generate the body of an instruction to copy Y into X.
4214 It may be a list of insns, if one insn isn't enough. */
4217 gen_move_insn (rtx x, rtx y)
4222 emit_move_insn_1 (x, y);
4228 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4229 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4230 no such operation exists, CODE_FOR_nothing will be returned. */
4233 can_extend_p (enum machine_mode to_mode, enum machine_mode from_mode,
4237 #ifdef HAVE_ptr_extend
4239 return CODE_FOR_ptr_extend;
4242 tab = unsignedp ? zext_optab : sext_optab;
4243 return tab->handlers[to_mode][from_mode].insn_code;
4246 /* Generate the body of an insn to extend Y (with mode MFROM)
4247 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4250 gen_extend_insn (rtx x, rtx y, enum machine_mode mto,
4251 enum machine_mode mfrom, int unsignedp)
4253 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
4254 return GEN_FCN (icode) (x, y);
4257 /* can_fix_p and can_float_p say whether the target machine
4258 can directly convert a given fixed point type to
4259 a given floating point type, or vice versa.
4260 The returned value is the CODE_FOR_... value to use,
4261 or CODE_FOR_nothing if these modes cannot be directly converted.
4263 *TRUNCP_PTR is set to 1 if it is necessary to output
4264 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4266 static enum insn_code
4267 can_fix_p (enum machine_mode fixmode, enum machine_mode fltmode,
4268 int unsignedp, int *truncp_ptr)
4271 enum insn_code icode;
4273 tab = unsignedp ? ufixtrunc_optab : sfixtrunc_optab;
4274 icode = tab->handlers[fixmode][fltmode].insn_code;
4275 if (icode != CODE_FOR_nothing)
4281 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4282 for this to work. We need to rework the fix* and ftrunc* patterns
4283 and documentation. */
4284 tab = unsignedp ? ufix_optab : sfix_optab;
4285 icode = tab->handlers[fixmode][fltmode].insn_code;
4286 if (icode != CODE_FOR_nothing
4287 && ftrunc_optab->handlers[fltmode].insn_code != CODE_FOR_nothing)
4294 return CODE_FOR_nothing;
4297 static enum insn_code
4298 can_float_p (enum machine_mode fltmode, enum machine_mode fixmode,
4303 tab = unsignedp ? ufloat_optab : sfloat_optab;
4304 return tab->handlers[fltmode][fixmode].insn_code;
4307 /* Generate code to convert FROM to floating point
4308 and store in TO. FROM must be fixed point and not VOIDmode.
4309 UNSIGNEDP nonzero means regard FROM as unsigned.
4310 Normally this is done by correcting the final value
4311 if it is negative. */
4314 expand_float (rtx to, rtx from, int unsignedp)
4316 enum insn_code icode;
4318 enum machine_mode fmode, imode;
4319 bool can_do_signed = false;
4321 /* Crash now, because we won't be able to decide which mode to use. */
4322 gcc_assert (GET_MODE (from) != VOIDmode);
4324 /* Look for an insn to do the conversion. Do it in the specified
4325 modes if possible; otherwise convert either input, output or both to
4326 wider mode. If the integer mode is wider than the mode of FROM,
4327 we can do the conversion signed even if the input is unsigned. */
4329 for (fmode = GET_MODE (to); fmode != VOIDmode;
4330 fmode = GET_MODE_WIDER_MODE (fmode))
4331 for (imode = GET_MODE (from); imode != VOIDmode;
4332 imode = GET_MODE_WIDER_MODE (imode))
4334 int doing_unsigned = unsignedp;
4336 if (fmode != GET_MODE (to)
4337 && significand_size (fmode) < GET_MODE_BITSIZE (GET_MODE (from)))
4340 icode = can_float_p (fmode, imode, unsignedp);
4341 if (icode == CODE_FOR_nothing && unsignedp)
4343 enum insn_code scode = can_float_p (fmode, imode, 0);
4344 if (scode != CODE_FOR_nothing)
4345 can_do_signed = true;
4346 if (imode != GET_MODE (from))
4347 icode = scode, doing_unsigned = 0;
4350 if (icode != CODE_FOR_nothing)
4352 if (imode != GET_MODE (from))
4353 from = convert_to_mode (imode, from, unsignedp);
4355 if (fmode != GET_MODE (to))
4356 target = gen_reg_rtx (fmode);
4358 emit_unop_insn (icode, target, from,
4359 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
4362 convert_move (to, target, 0);
4367 /* Unsigned integer, and no way to convert directly.
4368 Convert as signed, then conditionally adjust the result. */
4369 if (unsignedp && can_do_signed)
4371 rtx label = gen_label_rtx ();
4373 REAL_VALUE_TYPE offset;
4375 /* Look for a usable floating mode FMODE wider than the source and at
4376 least as wide as the target. Using FMODE will avoid rounding woes
4377 with unsigned values greater than the signed maximum value. */
4379 for (fmode = GET_MODE (to); fmode != VOIDmode;
4380 fmode = GET_MODE_WIDER_MODE (fmode))
4381 if (GET_MODE_BITSIZE (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
4382 && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
4385 if (fmode == VOIDmode)
4387 /* There is no such mode. Pretend the target is wide enough. */
4388 fmode = GET_MODE (to);
4390 /* Avoid double-rounding when TO is narrower than FROM. */
4391 if ((significand_size (fmode) + 1)
4392 < GET_MODE_BITSIZE (GET_MODE (from)))
4395 rtx neglabel = gen_label_rtx ();
4397 /* Don't use TARGET if it isn't a register, is a hard register,
4398 or is the wrong mode. */
4400 || REGNO (target) < FIRST_PSEUDO_REGISTER
4401 || GET_MODE (target) != fmode)
4402 target = gen_reg_rtx (fmode);
4404 imode = GET_MODE (from);
4405 do_pending_stack_adjust ();
4407 /* Test whether the sign bit is set. */
4408 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
4411 /* The sign bit is not set. Convert as signed. */
4412 expand_float (target, from, 0);
4413 emit_jump_insn (gen_jump (label));
4416 /* The sign bit is set.
4417 Convert to a usable (positive signed) value by shifting right
4418 one bit, while remembering if a nonzero bit was shifted
4419 out; i.e., compute (from & 1) | (from >> 1). */
4421 emit_label (neglabel);
4422 temp = expand_binop (imode, and_optab, from, const1_rtx,
4423 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4424 temp1 = expand_shift (RSHIFT_EXPR, imode, from, integer_one_node,
4426 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
4428 expand_float (target, temp, 0);
4430 /* Multiply by 2 to undo the shift above. */
4431 temp = expand_binop (fmode, add_optab, target, target,
4432 target, 0, OPTAB_LIB_WIDEN);
4434 emit_move_insn (target, temp);
4436 do_pending_stack_adjust ();
4442 /* If we are about to do some arithmetic to correct for an
4443 unsigned operand, do it in a pseudo-register. */
4445 if (GET_MODE (to) != fmode
4446 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
4447 target = gen_reg_rtx (fmode);
4449 /* Convert as signed integer to floating. */
4450 expand_float (target, from, 0);
4452 /* If FROM is negative (and therefore TO is negative),
4453 correct its value by 2**bitwidth. */
4455 do_pending_stack_adjust ();
4456 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from),
4460 real_2expN (&offset, GET_MODE_BITSIZE (GET_MODE (from)));
4461 temp = expand_binop (fmode, add_optab, target,
4462 CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode),
4463 target, 0, OPTAB_LIB_WIDEN);
4465 emit_move_insn (target, temp);
4467 do_pending_stack_adjust ();
4472 /* No hardware instruction available; call a library routine. */
4477 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
4479 if (GET_MODE_SIZE (GET_MODE (from)) < GET_MODE_SIZE (SImode))
4480 from = convert_to_mode (SImode, from, unsignedp);
4482 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
4483 gcc_assert (libfunc);
4487 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4488 GET_MODE (to), 1, from,
4490 insns = get_insns ();
4493 emit_libcall_block (insns, target, value,
4494 gen_rtx_FLOAT (GET_MODE (to), from));
4499 /* Copy result to requested destination
4500 if we have been computing in a temp location. */
4504 if (GET_MODE (target) == GET_MODE (to))
4505 emit_move_insn (to, target);
4507 convert_move (to, target, 0);
4511 /* Generate code to convert FROM to fixed point and store in TO. FROM
4512 must be floating point. */
4515 expand_fix (rtx to, rtx from, int unsignedp)
4517 enum insn_code icode;
4519 enum machine_mode fmode, imode;
4522 /* We first try to find a pair of modes, one real and one integer, at
4523 least as wide as FROM and TO, respectively, in which we can open-code
4524 this conversion. If the integer mode is wider than the mode of TO,
4525 we can do the conversion either signed or unsigned. */
4527 for (fmode = GET_MODE (from); fmode != VOIDmode;
4528 fmode = GET_MODE_WIDER_MODE (fmode))
4529 for (imode = GET_MODE (to); imode != VOIDmode;
4530 imode = GET_MODE_WIDER_MODE (imode))
4532 int doing_unsigned = unsignedp;
4534 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
4535 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
4536 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
4538 if (icode != CODE_FOR_nothing)
4540 if (fmode != GET_MODE (from))
4541 from = convert_to_mode (fmode, from, 0);
4545 rtx temp = gen_reg_rtx (GET_MODE (from));
4546 from = expand_unop (GET_MODE (from), ftrunc_optab, from,
4550 if (imode != GET_MODE (to))
4551 target = gen_reg_rtx (imode);
4553 emit_unop_insn (icode, target, from,
4554 doing_unsigned ? UNSIGNED_FIX : FIX);
4556 convert_move (to, target, unsignedp);
4561 /* For an unsigned conversion, there is one more way to do it.
4562 If we have a signed conversion, we generate code that compares
4563 the real value to the largest representable positive number. If if
4564 is smaller, the conversion is done normally. Otherwise, subtract
4565 one plus the highest signed number, convert, and add it back.
4567 We only need to check all real modes, since we know we didn't find
4568 anything with a wider integer mode.
4570 This code used to extend FP value into mode wider than the destination.
4571 This is not needed. Consider, for instance conversion from SFmode
4574 The hot path trought the code is dealing with inputs smaller than 2^63
4575 and doing just the conversion, so there is no bits to lose.
4577 In the other path we know the value is positive in the range 2^63..2^64-1
4578 inclusive. (as for other imput overflow happens and result is undefined)
4579 So we know that the most important bit set in mantissa corresponds to
4580 2^63. The subtraction of 2^63 should not generate any rounding as it
4581 simply clears out that bit. The rest is trivial. */
4583 if (unsignedp && GET_MODE_BITSIZE (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
4584 for (fmode = GET_MODE (from); fmode != VOIDmode;
4585 fmode = GET_MODE_WIDER_MODE (fmode))
4586 if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0,
4590 REAL_VALUE_TYPE offset;
4591 rtx limit, lab1, lab2, insn;
4593 bitsize = GET_MODE_BITSIZE (GET_MODE (to));
4594 real_2expN (&offset, bitsize - 1);
4595 limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode);
4596 lab1 = gen_label_rtx ();
4597 lab2 = gen_label_rtx ();
4599 if (fmode != GET_MODE (from))
4600 from = convert_to_mode (fmode, from, 0);
4602 /* See if we need to do the subtraction. */
4603 do_pending_stack_adjust ();
4604 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from),
4607 /* If not, do the signed "fix" and branch around fixup code. */
4608 expand_fix (to, from, 0);
4609 emit_jump_insn (gen_jump (lab2));
4612 /* Otherwise, subtract 2**(N-1), convert to signed number,
4613 then add 2**(N-1). Do the addition using XOR since this
4614 will often generate better code. */
4616 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
4617 NULL_RTX, 0, OPTAB_LIB_WIDEN);
4618 expand_fix (to, target, 0);
4619 target = expand_binop (GET_MODE (to), xor_optab, to,
4621 ((HOST_WIDE_INT) 1 << (bitsize - 1),
4623 to, 1, OPTAB_LIB_WIDEN);
4626 emit_move_insn (to, target);
4630 if (mov_optab->handlers[(int) GET_MODE (to)].insn_code
4631 != CODE_FOR_nothing)
4633 /* Make a place for a REG_NOTE and add it. */
4634 insn = emit_move_insn (to, to);
4635 set_unique_reg_note (insn,
4637 gen_rtx_fmt_e (UNSIGNED_FIX,
4645 /* We can't do it with an insn, so use a library call. But first ensure
4646 that the mode of TO is at least as wide as SImode, since those are the
4647 only library calls we know about. */
4649 if (GET_MODE_SIZE (GET_MODE (to)) < GET_MODE_SIZE (SImode))
4651 target = gen_reg_rtx (SImode);
4653 expand_fix (target, from, unsignedp);
4661 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
4662 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
4663 gcc_assert (libfunc);
4667 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4668 GET_MODE (to), 1, from,
4670 insns = get_insns ();
4673 emit_libcall_block (insns, target, value,
4674 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
4675 GET_MODE (to), from));
4680 if (GET_MODE (to) == GET_MODE (target))
4681 emit_move_insn (to, target);
4683 convert_move (to, target, 0);
4687 /* Report whether we have an instruction to perform the operation
4688 specified by CODE on operands of mode MODE. */
4690 have_insn_for (enum rtx_code code, enum machine_mode mode)
4692 return (code_to_optab[(int) code] != 0
4693 && (code_to_optab[(int) code]->handlers[(int) mode].insn_code
4694 != CODE_FOR_nothing));
4697 /* Create a blank optab. */
4702 optab op = ggc_alloc (sizeof (struct optab));
4703 for (i = 0; i < NUM_MACHINE_MODES; i++)
4705 op->handlers[i].insn_code = CODE_FOR_nothing;
4706 op->handlers[i].libfunc = 0;
4712 static convert_optab
4713 new_convert_optab (void)
4716 convert_optab op = ggc_alloc (sizeof (struct convert_optab));
4717 for (i = 0; i < NUM_MACHINE_MODES; i++)
4718 for (j = 0; j < NUM_MACHINE_MODES; j++)
4720 op->handlers[i][j].insn_code = CODE_FOR_nothing;
4721 op->handlers[i][j].libfunc = 0;
4726 /* Same, but fill in its code as CODE, and write it into the
4727 code_to_optab table. */
4729 init_optab (enum rtx_code code)
4731 optab op = new_optab ();
4733 code_to_optab[(int) code] = op;
4737 /* Same, but fill in its code as CODE, and do _not_ write it into
4738 the code_to_optab table. */
4740 init_optabv (enum rtx_code code)
4742 optab op = new_optab ();
4747 /* Conversion optabs never go in the code_to_optab table. */
4748 static inline convert_optab
4749 init_convert_optab (enum rtx_code code)
4751 convert_optab op = new_convert_optab ();
4756 /* Initialize the libfunc fields of an entire group of entries in some
4757 optab. Each entry is set equal to a string consisting of a leading
4758 pair of underscores followed by a generic operation name followed by
4759 a mode name (downshifted to lowercase) followed by a single character
4760 representing the number of operands for the given operation (which is
4761 usually one of the characters '2', '3', or '4').
4763 OPTABLE is the table in which libfunc fields are to be initialized.
4764 FIRST_MODE is the first machine mode index in the given optab to
4766 LAST_MODE is the last machine mode index in the given optab to
4768 OPNAME is the generic (string) name of the operation.
4769 SUFFIX is the character which specifies the number of operands for
4770 the given generic operation.
4774 init_libfuncs (optab optable, int first_mode, int last_mode,
4775 const char *opname, int suffix)
4778 unsigned opname_len = strlen (opname);
4780 for (mode = first_mode; (int) mode <= (int) last_mode;
4781 mode = (enum machine_mode) ((int) mode + 1))
4783 const char *mname = GET_MODE_NAME (mode);
4784 unsigned mname_len = strlen (mname);
4785 char *libfunc_name = alloca (2 + opname_len + mname_len + 1 + 1);
4792 for (q = opname; *q; )
4794 for (q = mname; *q; q++)
4795 *p++ = TOLOWER (*q);
4799 optable->handlers[(int) mode].libfunc
4800 = init_one_libfunc (ggc_alloc_string (libfunc_name, p - libfunc_name));
4804 /* Initialize the libfunc fields of an entire group of entries in some
4805 optab which correspond to all integer mode operations. The parameters
4806 have the same meaning as similarly named ones for the `init_libfuncs'
4807 routine. (See above). */
4810 init_integral_libfuncs (optab optable, const char *opname, int suffix)
4812 int maxsize = 2*BITS_PER_WORD;
4813 if (maxsize < LONG_LONG_TYPE_SIZE)
4814 maxsize = LONG_LONG_TYPE_SIZE;
4815 init_libfuncs (optable, word_mode,
4816 mode_for_size (maxsize, MODE_INT, 0),
4820 /* Initialize the libfunc fields of an entire group of entries in some
4821 optab which correspond to all real mode operations. The parameters
4822 have the same meaning as similarly named ones for the `init_libfuncs'
4823 routine. (See above). */
4826 init_floating_libfuncs (optab optable, const char *opname, int suffix)
4828 init_libfuncs (optable, MIN_MODE_FLOAT, MAX_MODE_FLOAT, opname, suffix);
4831 /* Initialize the libfunc fields of an entire group of entries of an
4832 inter-mode-class conversion optab. The string formation rules are
4833 similar to the ones for init_libfuncs, above, but instead of having
4834 a mode name and an operand count these functions have two mode names
4835 and no operand count. */
4837 init_interclass_conv_libfuncs (convert_optab tab, const char *opname,
4838 enum mode_class from_class,
4839 enum mode_class to_class)
4841 enum machine_mode first_from_mode = GET_CLASS_NARROWEST_MODE (from_class);
4842 enum machine_mode first_to_mode = GET_CLASS_NARROWEST_MODE (to_class);
4843 size_t opname_len = strlen (opname);
4844 size_t max_mname_len = 0;
4846 enum machine_mode fmode, tmode;
4847 const char *fname, *tname;
4849 char *libfunc_name, *suffix;
4852 for (fmode = first_from_mode;
4854 fmode = GET_MODE_WIDER_MODE (fmode))
4855 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (fmode)));
4857 for (tmode = first_to_mode;
4859 tmode = GET_MODE_WIDER_MODE (tmode))
4860 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (tmode)));
4862 libfunc_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
4863 libfunc_name[0] = '_';
4864 libfunc_name[1] = '_';
4865 memcpy (&libfunc_name[2], opname, opname_len);
4866 suffix = libfunc_name + opname_len + 2;
4868 for (fmode = first_from_mode; fmode != VOIDmode;
4869 fmode = GET_MODE_WIDER_MODE (fmode))
4870 for (tmode = first_to_mode; tmode != VOIDmode;
4871 tmode = GET_MODE_WIDER_MODE (tmode))
4873 fname = GET_MODE_NAME (fmode);
4874 tname = GET_MODE_NAME (tmode);
4877 for (q = fname; *q; p++, q++)
4879 for (q = tname; *q; p++, q++)
4884 tab->handlers[tmode][fmode].libfunc
4885 = init_one_libfunc (ggc_alloc_string (libfunc_name,
4890 /* Initialize the libfunc fields of an entire group of entries of an
4891 intra-mode-class conversion optab. The string formation rules are
4892 similar to the ones for init_libfunc, above. WIDENING says whether
4893 the optab goes from narrow to wide modes or vice versa. These functions
4894 have two mode names _and_ an operand count. */
4896 init_intraclass_conv_libfuncs (convert_optab tab, const char *opname,
4897 enum mode_class class, bool widening)
4899 enum machine_mode first_mode = GET_CLASS_NARROWEST_MODE (class);
4900 size_t opname_len = strlen (opname);
4901 size_t max_mname_len = 0;
4903 enum machine_mode nmode, wmode;
4904 const char *nname, *wname;
4906 char *libfunc_name, *suffix;
4909 for (nmode = first_mode; nmode != VOIDmode;
4910 nmode = GET_MODE_WIDER_MODE (nmode))
4911 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (nmode)));
4913 libfunc_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
4914 libfunc_name[0] = '_';
4915 libfunc_name[1] = '_';
4916 memcpy (&libfunc_name[2], opname, opname_len);
4917 suffix = libfunc_name + opname_len + 2;
4919 for (nmode = first_mode; nmode != VOIDmode;
4920 nmode = GET_MODE_WIDER_MODE (nmode))
4921 for (wmode = GET_MODE_WIDER_MODE (nmode); wmode != VOIDmode;
4922 wmode = GET_MODE_WIDER_MODE (wmode))
4924 nname = GET_MODE_NAME (nmode);
4925 wname = GET_MODE_NAME (wmode);
4928 for (q = widening ? nname : wname; *q; p++, q++)
4930 for (q = widening ? wname : nname; *q; p++, q++)
4936 tab->handlers[widening ? wmode : nmode]
4937 [widening ? nmode : wmode].libfunc
4938 = init_one_libfunc (ggc_alloc_string (libfunc_name,
4945 init_one_libfunc (const char *name)
4949 /* Create a FUNCTION_DECL that can be passed to
4950 targetm.encode_section_info. */
4951 /* ??? We don't have any type information except for this is
4952 a function. Pretend this is "int foo()". */
4953 tree decl = build_decl (FUNCTION_DECL, get_identifier (name),
4954 build_function_type (integer_type_node, NULL_TREE));
4955 DECL_ARTIFICIAL (decl) = 1;
4956 DECL_EXTERNAL (decl) = 1;
4957 TREE_PUBLIC (decl) = 1;
4959 symbol = XEXP (DECL_RTL (decl), 0);
4961 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
4962 are the flags assigned by targetm.encode_section_info. */
4963 SYMBOL_REF_DECL (symbol) = 0;
4968 /* Call this to reset the function entry for one optab (OPTABLE) in mode
4969 MODE to NAME, which should be either 0 or a string constant. */
4971 set_optab_libfunc (optab optable, enum machine_mode mode, const char *name)
4974 optable->handlers[mode].libfunc = init_one_libfunc (name);
4976 optable->handlers[mode].libfunc = 0;
4979 /* Call this to reset the function entry for one conversion optab
4980 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
4981 either 0 or a string constant. */
4983 set_conv_libfunc (convert_optab optable, enum machine_mode tmode,
4984 enum machine_mode fmode, const char *name)
4987 optable->handlers[tmode][fmode].libfunc = init_one_libfunc (name);
4989 optable->handlers[tmode][fmode].libfunc = 0;
4992 /* Call this once to initialize the contents of the optabs
4993 appropriately for the current target machine. */
5000 /* Start by initializing all tables to contain CODE_FOR_nothing. */
5002 for (i = 0; i < NUM_RTX_CODE; i++)
5003 setcc_gen_code[i] = CODE_FOR_nothing;
5005 #ifdef HAVE_conditional_move
5006 for (i = 0; i < NUM_MACHINE_MODES; i++)
5007 movcc_gen_code[i] = CODE_FOR_nothing;
5010 for (i = 0; i < NUM_MACHINE_MODES; i++)
5012 vcond_gen_code[i] = CODE_FOR_nothing;
5013 vcondu_gen_code[i] = CODE_FOR_nothing;
5016 add_optab = init_optab (PLUS);
5017 addv_optab = init_optabv (PLUS);
5018 sub_optab = init_optab (MINUS);
5019 subv_optab = init_optabv (MINUS);
5020 smul_optab = init_optab (MULT);
5021 smulv_optab = init_optabv (MULT);
5022 smul_highpart_optab = init_optab (UNKNOWN);
5023 umul_highpart_optab = init_optab (UNKNOWN);
5024 smul_widen_optab = init_optab (UNKNOWN);
5025 umul_widen_optab = init_optab (UNKNOWN);
5026 usmul_widen_optab = init_optab (UNKNOWN);
5027 sdiv_optab = init_optab (DIV);
5028 sdivv_optab = init_optabv (DIV);
5029 sdivmod_optab = init_optab (UNKNOWN);
5030 udiv_optab = init_optab (UDIV);
5031 udivmod_optab = init_optab (UNKNOWN);
5032 smod_optab = init_optab (MOD);
5033 umod_optab = init_optab (UMOD);
5034 fmod_optab = init_optab (UNKNOWN);
5035 drem_optab = init_optab (UNKNOWN);
5036 ftrunc_optab = init_optab (UNKNOWN);
5037 and_optab = init_optab (AND);
5038 ior_optab = init_optab (IOR);
5039 xor_optab = init_optab (XOR);
5040 ashl_optab = init_optab (ASHIFT);
5041 ashr_optab = init_optab (ASHIFTRT);
5042 lshr_optab = init_optab (LSHIFTRT);
5043 rotl_optab = init_optab (ROTATE);
5044 rotr_optab = init_optab (ROTATERT);
5045 smin_optab = init_optab (SMIN);
5046 smax_optab = init_optab (SMAX);
5047 umin_optab = init_optab (UMIN);
5048 umax_optab = init_optab (UMAX);
5049 pow_optab = init_optab (UNKNOWN);
5050 atan2_optab = init_optab (UNKNOWN);
5052 /* These three have codes assigned exclusively for the sake of
5054 mov_optab = init_optab (SET);
5055 movstrict_optab = init_optab (STRICT_LOW_PART);
5056 cmp_optab = init_optab (COMPARE);
5058 ucmp_optab = init_optab (UNKNOWN);
5059 tst_optab = init_optab (UNKNOWN);
5061 eq_optab = init_optab (EQ);
5062 ne_optab = init_optab (NE);
5063 gt_optab = init_optab (GT);
5064 ge_optab = init_optab (GE);
5065 lt_optab = init_optab (LT);
5066 le_optab = init_optab (LE);
5067 unord_optab = init_optab (UNORDERED);
5069 neg_optab = init_optab (NEG);
5070 negv_optab = init_optabv (NEG);
5071 abs_optab = init_optab (ABS);
5072 absv_optab = init_optabv (ABS);
5073 addcc_optab = init_optab (UNKNOWN);
5074 one_cmpl_optab = init_optab (NOT);
5075 ffs_optab = init_optab (FFS);
5076 clz_optab = init_optab (CLZ);
5077 ctz_optab = init_optab (CTZ);
5078 popcount_optab = init_optab (POPCOUNT);
5079 parity_optab = init_optab (PARITY);
5080 sqrt_optab = init_optab (SQRT);
5081 floor_optab = init_optab (UNKNOWN);
5082 lfloor_optab = init_optab (UNKNOWN);
5083 ceil_optab = init_optab (UNKNOWN);
5084 lceil_optab = init_optab (UNKNOWN);
5085 round_optab = init_optab (UNKNOWN);
5086 btrunc_optab = init_optab (UNKNOWN);
5087 nearbyint_optab = init_optab (UNKNOWN);
5088 rint_optab = init_optab (UNKNOWN);
5089 lrint_optab = init_optab (UNKNOWN);
5090 sincos_optab = init_optab (UNKNOWN);
5091 sin_optab = init_optab (UNKNOWN);
5092 asin_optab = init_optab (UNKNOWN);
5093 cos_optab = init_optab (UNKNOWN);
5094 acos_optab = init_optab (UNKNOWN);
5095 exp_optab = init_optab (UNKNOWN);
5096 exp10_optab = init_optab (UNKNOWN);
5097 exp2_optab = init_optab (UNKNOWN);
5098 expm1_optab = init_optab (UNKNOWN);
5099 ldexp_optab = init_optab (UNKNOWN);
5100 logb_optab = init_optab (UNKNOWN);
5101 ilogb_optab = init_optab (UNKNOWN);
5102 log_optab = init_optab (UNKNOWN);
5103 log10_optab = init_optab (UNKNOWN);
5104 log2_optab = init_optab (UNKNOWN);
5105 log1p_optab = init_optab (UNKNOWN);
5106 tan_optab = init_optab (UNKNOWN);
5107 atan_optab = init_optab (UNKNOWN);
5108 copysign_optab = init_optab (UNKNOWN);
5110 strlen_optab = init_optab (UNKNOWN);
5111 cbranch_optab = init_optab (UNKNOWN);
5112 cmov_optab = init_optab (UNKNOWN);
5113 cstore_optab = init_optab (UNKNOWN);
5114 push_optab = init_optab (UNKNOWN);
5116 reduc_smax_optab = init_optab (UNKNOWN);
5117 reduc_umax_optab = init_optab (UNKNOWN);
5118 reduc_smin_optab = init_optab (UNKNOWN);
5119 reduc_umin_optab = init_optab (UNKNOWN);
5120 reduc_splus_optab = init_optab (UNKNOWN);
5121 reduc_uplus_optab = init_optab (UNKNOWN);
5123 vec_extract_optab = init_optab (UNKNOWN);
5124 vec_set_optab = init_optab (UNKNOWN);
5125 vec_init_optab = init_optab (UNKNOWN);
5126 vec_shl_optab = init_optab (UNKNOWN);
5127 vec_shr_optab = init_optab (UNKNOWN);
5128 vec_realign_load_optab = init_optab (UNKNOWN);
5129 movmisalign_optab = init_optab (UNKNOWN);
5131 powi_optab = init_optab (UNKNOWN);
5134 sext_optab = init_convert_optab (SIGN_EXTEND);
5135 zext_optab = init_convert_optab (ZERO_EXTEND);
5136 trunc_optab = init_convert_optab (TRUNCATE);
5137 sfix_optab = init_convert_optab (FIX);
5138 ufix_optab = init_convert_optab (UNSIGNED_FIX);
5139 sfixtrunc_optab = init_convert_optab (UNKNOWN);
5140 ufixtrunc_optab = init_convert_optab (UNKNOWN);
5141 sfloat_optab = init_convert_optab (FLOAT);
5142 ufloat_optab = init_convert_optab (UNSIGNED_FLOAT);
5144 for (i = 0; i < NUM_MACHINE_MODES; i++)
5146 movmem_optab[i] = CODE_FOR_nothing;
5147 cmpstr_optab[i] = CODE_FOR_nothing;
5148 cmpstrn_optab[i] = CODE_FOR_nothing;
5149 cmpmem_optab[i] = CODE_FOR_nothing;
5150 setmem_optab[i] = CODE_FOR_nothing;
5152 sync_add_optab[i] = CODE_FOR_nothing;
5153 sync_sub_optab[i] = CODE_FOR_nothing;
5154 sync_ior_optab[i] = CODE_FOR_nothing;
5155 sync_and_optab[i] = CODE_FOR_nothing;
5156 sync_xor_optab[i] = CODE_FOR_nothing;
5157 sync_nand_optab[i] = CODE_FOR_nothing;
5158 sync_old_add_optab[i] = CODE_FOR_nothing;
5159 sync_old_sub_optab[i] = CODE_FOR_nothing;
5160 sync_old_ior_optab[i] = CODE_FOR_nothing;
5161 sync_old_and_optab[i] = CODE_FOR_nothing;
5162 sync_old_xor_optab[i] = CODE_FOR_nothing;
5163 sync_old_nand_optab[i] = CODE_FOR_nothing;
5164 sync_new_add_optab[i] = CODE_FOR_nothing;
5165 sync_new_sub_optab[i] = CODE_FOR_nothing;
5166 sync_new_ior_optab[i] = CODE_FOR_nothing;
5167 sync_new_and_optab[i] = CODE_FOR_nothing;
5168 sync_new_xor_optab[i] = CODE_FOR_nothing;
5169 sync_new_nand_optab[i] = CODE_FOR_nothing;
5170 sync_compare_and_swap[i] = CODE_FOR_nothing;
5171 sync_compare_and_swap_cc[i] = CODE_FOR_nothing;
5172 sync_lock_test_and_set[i] = CODE_FOR_nothing;
5173 sync_lock_release[i] = CODE_FOR_nothing;
5175 #ifdef HAVE_SECONDARY_RELOADS
5176 reload_in_optab[i] = reload_out_optab[i] = CODE_FOR_nothing;
5180 /* Fill in the optabs with the insns we support. */
5183 /* Initialize the optabs with the names of the library functions. */
5184 init_integral_libfuncs (add_optab, "add", '3');
5185 init_floating_libfuncs (add_optab, "add", '3');
5186 init_integral_libfuncs (addv_optab, "addv", '3');
5187 init_floating_libfuncs (addv_optab, "add", '3');
5188 init_integral_libfuncs (sub_optab, "sub", '3');
5189 init_floating_libfuncs (sub_optab, "sub", '3');
5190 init_integral_libfuncs (subv_optab, "subv", '3');
5191 init_floating_libfuncs (subv_optab, "sub", '3');
5192 init_integral_libfuncs (smul_optab, "mul", '3');
5193 init_floating_libfuncs (smul_optab, "mul", '3');
5194 init_integral_libfuncs (smulv_optab, "mulv", '3');
5195 init_floating_libfuncs (smulv_optab, "mul", '3');
5196 init_integral_libfuncs (sdiv_optab, "div", '3');
5197 init_floating_libfuncs (sdiv_optab, "div", '3');
5198 init_integral_libfuncs (sdivv_optab, "divv", '3');
5199 init_integral_libfuncs (udiv_optab, "udiv", '3');
5200 init_integral_libfuncs (sdivmod_optab, "divmod", '4');
5201 init_integral_libfuncs (udivmod_optab, "udivmod", '4');
5202 init_integral_libfuncs (smod_optab, "mod", '3');
5203 init_integral_libfuncs (umod_optab, "umod", '3');
5204 init_floating_libfuncs (ftrunc_optab, "ftrunc", '2');
5205 init_integral_libfuncs (and_optab, "and", '3');
5206 init_integral_libfuncs (ior_optab, "ior", '3');
5207 init_integral_libfuncs (xor_optab, "xor", '3');
5208 init_integral_libfuncs (ashl_optab, "ashl", '3');
5209 init_integral_libfuncs (ashr_optab, "ashr", '3');
5210 init_integral_libfuncs (lshr_optab, "lshr", '3');
5211 init_integral_libfuncs (smin_optab, "min", '3');
5212 init_floating_libfuncs (smin_optab, "min", '3');
5213 init_integral_libfuncs (smax_optab, "max", '3');
5214 init_floating_libfuncs (smax_optab, "max", '3');
5215 init_integral_libfuncs (umin_optab, "umin", '3');
5216 init_integral_libfuncs (umax_optab, "umax", '3');
5217 init_integral_libfuncs (neg_optab, "neg", '2');
5218 init_floating_libfuncs (neg_optab, "neg", '2');
5219 init_integral_libfuncs (negv_optab, "negv", '2');
5220 init_floating_libfuncs (negv_optab, "neg", '2');
5221 init_integral_libfuncs (one_cmpl_optab, "one_cmpl", '2');
5222 init_integral_libfuncs (ffs_optab, "ffs", '2');
5223 init_integral_libfuncs (clz_optab, "clz", '2');
5224 init_integral_libfuncs (ctz_optab, "ctz", '2');
5225 init_integral_libfuncs (popcount_optab, "popcount", '2');
5226 init_integral_libfuncs (parity_optab, "parity", '2');
5228 /* Comparison libcalls for integers MUST come in pairs,
5230 init_integral_libfuncs (cmp_optab, "cmp", '2');
5231 init_integral_libfuncs (ucmp_optab, "ucmp", '2');
5232 init_floating_libfuncs (cmp_optab, "cmp", '2');
5234 /* EQ etc are floating point only. */
5235 init_floating_libfuncs (eq_optab, "eq", '2');
5236 init_floating_libfuncs (ne_optab, "ne", '2');
5237 init_floating_libfuncs (gt_optab, "gt", '2');
5238 init_floating_libfuncs (ge_optab, "ge", '2');
5239 init_floating_libfuncs (lt_optab, "lt", '2');
5240 init_floating_libfuncs (le_optab, "le", '2');
5241 init_floating_libfuncs (unord_optab, "unord", '2');
5243 init_floating_libfuncs (powi_optab, "powi", '2');
5246 init_interclass_conv_libfuncs (sfloat_optab, "float",
5247 MODE_INT, MODE_FLOAT);
5248 init_interclass_conv_libfuncs (ufloat_optab, "floatun",
5249 MODE_INT, MODE_FLOAT);
5250 init_interclass_conv_libfuncs (sfix_optab, "fix",
5251 MODE_FLOAT, MODE_INT);
5252 init_interclass_conv_libfuncs (ufix_optab, "fixuns",
5253 MODE_FLOAT, MODE_INT);
5255 /* sext_optab is also used for FLOAT_EXTEND. */
5256 init_intraclass_conv_libfuncs (sext_optab, "extend", MODE_FLOAT, true);
5257 init_intraclass_conv_libfuncs (trunc_optab, "trunc", MODE_FLOAT, false);
5259 /* Use cabs for double complex abs, since systems generally have cabs.
5260 Don't define any libcall for float complex, so that cabs will be used. */
5261 if (complex_double_type_node)
5262 abs_optab->handlers[TYPE_MODE (complex_double_type_node)].libfunc
5263 = init_one_libfunc ("cabs");
5265 /* The ffs function operates on `int'. */
5266 ffs_optab->handlers[(int) mode_for_size (INT_TYPE_SIZE, MODE_INT, 0)].libfunc
5267 = init_one_libfunc ("ffs");
5269 abort_libfunc = init_one_libfunc ("abort");
5270 memcpy_libfunc = init_one_libfunc ("memcpy");
5271 memmove_libfunc = init_one_libfunc ("memmove");
5272 memcmp_libfunc = init_one_libfunc ("memcmp");
5273 memset_libfunc = init_one_libfunc ("memset");
5274 setbits_libfunc = init_one_libfunc ("__setbits");
5276 #ifndef DONT_USE_BUILTIN_SETJMP
5277 setjmp_libfunc = init_one_libfunc ("__builtin_setjmp");
5278 longjmp_libfunc = init_one_libfunc ("__builtin_longjmp");
5280 setjmp_libfunc = init_one_libfunc ("setjmp");
5281 longjmp_libfunc = init_one_libfunc ("longjmp");
5283 unwind_sjlj_register_libfunc = init_one_libfunc ("_Unwind_SjLj_Register");
5284 unwind_sjlj_unregister_libfunc
5285 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5287 /* For function entry/exit instrumentation. */
5288 profile_function_entry_libfunc
5289 = init_one_libfunc ("__cyg_profile_func_enter");
5290 profile_function_exit_libfunc
5291 = init_one_libfunc ("__cyg_profile_func_exit");
5293 gcov_flush_libfunc = init_one_libfunc ("__gcov_flush");
5295 if (HAVE_conditional_trap)
5296 trap_rtx = gen_rtx_fmt_ee (EQ, VOIDmode, NULL_RTX, NULL_RTX);
5298 /* Allow the target to add more libcalls or rename some, etc. */
5299 targetm.init_libfuncs ();
5304 /* Print information about the current contents of the optabs on
5308 debug_optab_libfuncs (void)
5314 /* Dump the arithmetic optabs. */
5315 for (i = 0; i != (int) OTI_MAX; i++)
5316 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5319 struct optab_handlers *h;
5322 h = &o->handlers[j];
5325 gcc_assert (GET_CODE (h->libfunc) = SYMBOL_REF);
5326 fprintf (stderr, "%s\t%s:\t%s\n",
5327 GET_RTX_NAME (o->code),
5329 XSTR (h->libfunc, 0));
5333 /* Dump the conversion optabs. */
5334 for (i = 0; i < (int) COI_MAX; ++i)
5335 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5336 for (k = 0; k < NUM_MACHINE_MODES; ++k)
5339 struct optab_handlers *h;
5341 o = &convert_optab_table[i];
5342 h = &o->handlers[j][k];
5345 gcc_assert (GET_CODE (h->libfunc) = SYMBOL_REF);
5346 fprintf (stderr, "%s\t%s\t%s:\t%s\n",
5347 GET_RTX_NAME (o->code),
5350 XSTR (h->libfunc, 0));
5358 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5359 CODE. Return 0 on failure. */
5362 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED, rtx op1,
5363 rtx op2 ATTRIBUTE_UNUSED, rtx tcode ATTRIBUTE_UNUSED)
5365 enum machine_mode mode = GET_MODE (op1);
5366 enum insn_code icode;
5369 if (!HAVE_conditional_trap)
5372 if (mode == VOIDmode)
5375 icode = cmp_optab->handlers[(int) mode].insn_code;
5376 if (icode == CODE_FOR_nothing)
5380 op1 = prepare_operand (icode, op1, 0, mode, mode, 0);
5381 op2 = prepare_operand (icode, op2, 1, mode, mode, 0);
5387 emit_insn (GEN_FCN (icode) (op1, op2));
5389 PUT_CODE (trap_rtx, code);
5390 gcc_assert (HAVE_conditional_trap);
5391 insn = gen_conditional_trap (trap_rtx, tcode);
5395 insn = get_insns ();
5402 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5403 or unsigned operation code. */
5405 static enum rtx_code
5406 get_rtx_code (enum tree_code tcode, bool unsignedp)
5418 code = unsignedp ? LTU : LT;
5421 code = unsignedp ? LEU : LE;
5424 code = unsignedp ? GTU : GT;
5427 code = unsignedp ? GEU : GE;
5430 case UNORDERED_EXPR:
5461 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
5462 unsigned operators. Do not generate compare instruction. */
5465 vector_compare_rtx (tree cond, bool unsignedp, enum insn_code icode)
5467 enum rtx_code rcode;
5469 rtx rtx_op0, rtx_op1;
5471 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
5472 ensures that condition is a relational operation. */
5473 gcc_assert (COMPARISON_CLASS_P (cond));
5475 rcode = get_rtx_code (TREE_CODE (cond), unsignedp);
5476 t_op0 = TREE_OPERAND (cond, 0);
5477 t_op1 = TREE_OPERAND (cond, 1);
5479 /* Expand operands. */
5480 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)), 1);
5481 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)), 1);
5483 if (!insn_data[icode].operand[4].predicate (rtx_op0, GET_MODE (rtx_op0))
5484 && GET_MODE (rtx_op0) != VOIDmode)
5485 rtx_op0 = force_reg (GET_MODE (rtx_op0), rtx_op0);
5487 if (!insn_data[icode].operand[5].predicate (rtx_op1, GET_MODE (rtx_op1))
5488 && GET_MODE (rtx_op1) != VOIDmode)
5489 rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1);
5491 return gen_rtx_fmt_ee (rcode, VOIDmode, rtx_op0, rtx_op1);
5494 /* Return insn code for VEC_COND_EXPR EXPR. */
5496 static inline enum insn_code
5497 get_vcond_icode (tree expr, enum machine_mode mode)
5499 enum insn_code icode = CODE_FOR_nothing;
5501 if (TYPE_UNSIGNED (TREE_TYPE (expr)))
5502 icode = vcondu_gen_code[mode];
5504 icode = vcond_gen_code[mode];
5508 /* Return TRUE iff, appropriate vector insns are available
5509 for vector cond expr expr in VMODE mode. */
5512 expand_vec_cond_expr_p (tree expr, enum machine_mode vmode)
5514 if (get_vcond_icode (expr, vmode) == CODE_FOR_nothing)
5519 /* Generate insns for VEC_COND_EXPR. */
5522 expand_vec_cond_expr (tree vec_cond_expr, rtx target)
5524 enum insn_code icode;
5525 rtx comparison, rtx_op1, rtx_op2, cc_op0, cc_op1;
5526 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_cond_expr));
5527 bool unsignedp = TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr));
5529 icode = get_vcond_icode (vec_cond_expr, mode);
5530 if (icode == CODE_FOR_nothing)
5533 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
5534 target = gen_reg_rtx (mode);
5536 /* Get comparison rtx. First expand both cond expr operands. */
5537 comparison = vector_compare_rtx (TREE_OPERAND (vec_cond_expr, 0),
5539 cc_op0 = XEXP (comparison, 0);
5540 cc_op1 = XEXP (comparison, 1);
5541 /* Expand both operands and force them in reg, if required. */
5542 rtx_op1 = expand_expr (TREE_OPERAND (vec_cond_expr, 1),
5543 NULL_RTX, VOIDmode, 1);
5544 if (!insn_data[icode].operand[1].predicate (rtx_op1, mode)
5545 && mode != VOIDmode)
5546 rtx_op1 = force_reg (mode, rtx_op1);
5548 rtx_op2 = expand_expr (TREE_OPERAND (vec_cond_expr, 2),
5549 NULL_RTX, VOIDmode, 1);
5550 if (!insn_data[icode].operand[2].predicate (rtx_op2, mode)
5551 && mode != VOIDmode)
5552 rtx_op2 = force_reg (mode, rtx_op2);
5554 /* Emit instruction! */
5555 emit_insn (GEN_FCN (icode) (target, rtx_op1, rtx_op2,
5556 comparison, cc_op0, cc_op1));
5562 /* This is an internal subroutine of the other compare_and_swap expanders.
5563 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
5564 operation. TARGET is an optional place to store the value result of
5565 the operation. ICODE is the particular instruction to expand. Return
5566 the result of the operation. */
5569 expand_val_compare_and_swap_1 (rtx mem, rtx old_val, rtx new_val,
5570 rtx target, enum insn_code icode)
5572 enum machine_mode mode = GET_MODE (mem);
5575 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
5576 target = gen_reg_rtx (mode);
5578 if (GET_MODE (old_val) != VOIDmode && GET_MODE (old_val) != mode)
5579 old_val = convert_modes (mode, GET_MODE (old_val), old_val, 1);
5580 if (!insn_data[icode].operand[2].predicate (old_val, mode))
5581 old_val = force_reg (mode, old_val);
5583 if (GET_MODE (new_val) != VOIDmode && GET_MODE (new_val) != mode)
5584 new_val = convert_modes (mode, GET_MODE (new_val), new_val, 1);
5585 if (!insn_data[icode].operand[3].predicate (new_val, mode))
5586 new_val = force_reg (mode, new_val);
5588 insn = GEN_FCN (icode) (target, mem, old_val, new_val);
5589 if (insn == NULL_RTX)
5596 /* Expand a compare-and-swap operation and return its value. */
5599 expand_val_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
5601 enum machine_mode mode = GET_MODE (mem);
5602 enum insn_code icode = sync_compare_and_swap[mode];
5604 if (icode == CODE_FOR_nothing)
5607 return expand_val_compare_and_swap_1 (mem, old_val, new_val, target, icode);
5610 /* Expand a compare-and-swap operation and store true into the result if
5611 the operation was successful and false otherwise. Return the result.
5612 Unlike other routines, TARGET is not optional. */
5615 expand_bool_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
5617 enum machine_mode mode = GET_MODE (mem);
5618 enum insn_code icode;
5619 rtx subtarget, label0, label1;
5621 /* If the target supports a compare-and-swap pattern that simultaneously
5622 sets some flag for success, then use it. Otherwise use the regular
5623 compare-and-swap and follow that immediately with a compare insn. */
5624 icode = sync_compare_and_swap_cc[mode];
5628 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
5630 if (subtarget != NULL_RTX)
5634 case CODE_FOR_nothing:
5635 icode = sync_compare_and_swap[mode];
5636 if (icode == CODE_FOR_nothing)
5639 /* Ensure that if old_val == mem, that we're not comparing
5640 against an old value. */
5641 if (MEM_P (old_val))
5642 old_val = force_reg (mode, old_val);
5644 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
5646 if (subtarget == NULL_RTX)
5649 emit_cmp_insn (subtarget, old_val, EQ, const0_rtx, mode, true);
5652 /* If the target has a sane STORE_FLAG_VALUE, then go ahead and use a
5653 setcc instruction from the beginning. We don't work too hard here,
5654 but it's nice to not be stupid about initial code gen either. */
5655 if (STORE_FLAG_VALUE == 1)
5657 icode = setcc_gen_code[EQ];
5658 if (icode != CODE_FOR_nothing)
5660 enum machine_mode cmode = insn_data[icode].operand[0].mode;
5664 if (!insn_data[icode].operand[0].predicate (target, cmode))
5665 subtarget = gen_reg_rtx (cmode);
5667 insn = GEN_FCN (icode) (subtarget);
5671 if (GET_MODE (target) != GET_MODE (subtarget))
5673 convert_move (target, subtarget, 1);
5681 /* Without an appropriate setcc instruction, use a set of branches to
5682 get 1 and 0 stored into target. Presumably if the target has a
5683 STORE_FLAG_VALUE that isn't 1, then this will get cleaned up by ifcvt. */
5685 label0 = gen_label_rtx ();
5686 label1 = gen_label_rtx ();
5688 emit_jump_insn (bcc_gen_fctn[EQ] (label0));
5689 emit_move_insn (target, const0_rtx);
5690 emit_jump_insn (gen_jump (label1));
5692 emit_label (label0);
5693 emit_move_insn (target, const1_rtx);
5694 emit_label (label1);
5699 /* This is a helper function for the other atomic operations. This function
5700 emits a loop that contains SEQ that iterates until a compare-and-swap
5701 operation at the end succeeds. MEM is the memory to be modified. SEQ is
5702 a set of instructions that takes a value from OLD_REG as an input and
5703 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
5704 set to the current contents of MEM. After SEQ, a compare-and-swap will
5705 attempt to update MEM with NEW_REG. The function returns true when the
5706 loop was generated successfully. */
5709 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
5711 enum machine_mode mode = GET_MODE (mem);
5712 enum insn_code icode;
5713 rtx label, cmp_reg, subtarget;
5715 /* The loop we want to generate looks like
5721 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
5722 if (cmp_reg != old_reg)
5725 Note that we only do the plain load from memory once. Subsequent
5726 iterations use the value loaded by the compare-and-swap pattern. */
5728 label = gen_label_rtx ();
5729 cmp_reg = gen_reg_rtx (mode);
5731 emit_move_insn (cmp_reg, mem);
5733 emit_move_insn (old_reg, cmp_reg);
5737 /* If the target supports a compare-and-swap pattern that simultaneously
5738 sets some flag for success, then use it. Otherwise use the regular
5739 compare-and-swap and follow that immediately with a compare insn. */
5740 icode = sync_compare_and_swap_cc[mode];
5744 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
5746 if (subtarget != NULL_RTX)
5748 gcc_assert (subtarget == cmp_reg);
5753 case CODE_FOR_nothing:
5754 icode = sync_compare_and_swap[mode];
5755 if (icode == CODE_FOR_nothing)
5758 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
5760 if (subtarget == NULL_RTX)
5762 if (subtarget != cmp_reg)
5763 emit_move_insn (cmp_reg, subtarget);
5765 emit_cmp_insn (cmp_reg, old_reg, EQ, const0_rtx, mode, true);
5768 /* ??? Mark this jump predicted not taken? */
5769 emit_jump_insn (bcc_gen_fctn[NE] (label));
5774 /* This function generates the atomic operation MEM CODE= VAL. In this
5775 case, we do not care about any resulting value. Returns NULL if we
5776 cannot generate the operation. */
5779 expand_sync_operation (rtx mem, rtx val, enum rtx_code code)
5781 enum machine_mode mode = GET_MODE (mem);
5782 enum insn_code icode;
5785 /* Look to see if the target supports the operation directly. */
5789 icode = sync_add_optab[mode];
5792 icode = sync_ior_optab[mode];
5795 icode = sync_xor_optab[mode];
5798 icode = sync_and_optab[mode];
5801 icode = sync_nand_optab[mode];
5805 icode = sync_sub_optab[mode];
5806 if (icode == CODE_FOR_nothing)
5808 icode = sync_add_optab[mode];
5809 if (icode != CODE_FOR_nothing)
5811 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
5821 /* Generate the direct operation, if present. */
5822 if (icode != CODE_FOR_nothing)
5824 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
5825 val = convert_modes (mode, GET_MODE (val), val, 1);
5826 if (!insn_data[icode].operand[1].predicate (val, mode))
5827 val = force_reg (mode, val);
5829 insn = GEN_FCN (icode) (mem, val);
5837 /* Failing that, generate a compare-and-swap loop in which we perform the
5838 operation with normal arithmetic instructions. */
5839 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
5841 rtx t0 = gen_reg_rtx (mode), t1;
5848 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
5851 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
5852 true, OPTAB_LIB_WIDEN);
5854 insn = get_insns ();
5857 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
5864 /* This function generates the atomic operation MEM CODE= VAL. In this
5865 case, we do care about the resulting value: if AFTER is true then
5866 return the value MEM holds after the operation, if AFTER is false
5867 then return the value MEM holds before the operation. TARGET is an
5868 optional place for the result value to be stored. */
5871 expand_sync_fetch_operation (rtx mem, rtx val, enum rtx_code code,
5872 bool after, rtx target)
5874 enum machine_mode mode = GET_MODE (mem);
5875 enum insn_code old_code, new_code, icode;
5879 /* Look to see if the target supports the operation directly. */
5883 old_code = sync_old_add_optab[mode];
5884 new_code = sync_new_add_optab[mode];
5887 old_code = sync_old_ior_optab[mode];
5888 new_code = sync_new_ior_optab[mode];
5891 old_code = sync_old_xor_optab[mode];
5892 new_code = sync_new_xor_optab[mode];
5895 old_code = sync_old_and_optab[mode];
5896 new_code = sync_new_and_optab[mode];
5899 old_code = sync_old_nand_optab[mode];
5900 new_code = sync_new_nand_optab[mode];
5904 old_code = sync_old_sub_optab[mode];
5905 new_code = sync_new_sub_optab[mode];
5906 if (old_code == CODE_FOR_nothing && new_code == CODE_FOR_nothing)
5908 old_code = sync_old_add_optab[mode];
5909 new_code = sync_new_add_optab[mode];
5910 if (old_code != CODE_FOR_nothing || new_code != CODE_FOR_nothing)
5912 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
5922 /* If the target does supports the proper new/old operation, great. But
5923 if we only support the opposite old/new operation, check to see if we
5924 can compensate. In the case in which the old value is supported, then
5925 we can always perform the operation again with normal arithmetic. In
5926 the case in which the new value is supported, then we can only handle
5927 this in the case the operation is reversible. */
5932 if (icode == CODE_FOR_nothing)
5935 if (icode != CODE_FOR_nothing)
5942 if (icode == CODE_FOR_nothing
5943 && (code == PLUS || code == MINUS || code == XOR))
5946 if (icode != CODE_FOR_nothing)
5951 /* If we found something supported, great. */
5952 if (icode != CODE_FOR_nothing)
5954 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
5955 target = gen_reg_rtx (mode);
5957 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
5958 val = convert_modes (mode, GET_MODE (val), val, 1);
5959 if (!insn_data[icode].operand[2].predicate (val, mode))
5960 val = force_reg (mode, val);
5962 insn = GEN_FCN (icode) (target, mem, val);
5967 /* If we need to compensate for using an operation with the
5968 wrong return value, do so now. */
5975 else if (code == MINUS)
5980 target = expand_simple_unop (mode, NOT, target, NULL_RTX, true);
5981 target = expand_simple_binop (mode, code, target, val, NULL_RTX,
5982 true, OPTAB_LIB_WIDEN);
5989 /* Failing that, generate a compare-and-swap loop in which we perform the
5990 operation with normal arithmetic instructions. */
5991 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
5993 rtx t0 = gen_reg_rtx (mode), t1;
5995 if (!target || !register_operand (target, mode))
5996 target = gen_reg_rtx (mode);
6001 emit_move_insn (target, t0);
6005 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
6008 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
6009 true, OPTAB_LIB_WIDEN);
6011 emit_move_insn (target, t1);
6013 insn = get_insns ();
6016 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
6023 /* This function expands a test-and-set operation. Ideally we atomically
6024 store VAL in MEM and return the previous value in MEM. Some targets
6025 may not support this operation and only support VAL with the constant 1;
6026 in this case while the return value will be 0/1, but the exact value
6027 stored in MEM is target defined. TARGET is an option place to stick
6028 the return value. */
6031 expand_sync_lock_test_and_set (rtx mem, rtx val, rtx target)
6033 enum machine_mode mode = GET_MODE (mem);
6034 enum insn_code icode;
6037 /* If the target supports the test-and-set directly, great. */
6038 icode = sync_lock_test_and_set[mode];
6039 if (icode != CODE_FOR_nothing)
6041 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6042 target = gen_reg_rtx (mode);
6044 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6045 val = convert_modes (mode, GET_MODE (val), val, 1);
6046 if (!insn_data[icode].operand[2].predicate (val, mode))
6047 val = force_reg (mode, val);
6049 insn = GEN_FCN (icode) (target, mem, val);
6057 /* Otherwise, use a compare-and-swap loop for the exchange. */
6058 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6060 if (!target || !register_operand (target, mode))
6061 target = gen_reg_rtx (mode);
6062 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6063 val = convert_modes (mode, GET_MODE (val), val, 1);
6064 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
6071 #include "gt-optabs.h"