1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
25 #include "coretypes.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
45 #include "basic-block.h"
48 /* Each optab contains info on how this target machine
49 can perform a particular operation
50 for all sizes and kinds of operands.
52 The operation to be performed is often specified
53 by passing one of these optabs as an argument.
55 See expr.h for documentation of these optabs. */
57 optab optab_table[OTI_MAX];
59 rtx libfunc_table[LTI_MAX];
61 /* Tables of patterns for converting one mode to another. */
62 convert_optab convert_optab_table[COI_MAX];
64 /* Contains the optab used for each rtx code. */
65 optab code_to_optab[NUM_RTX_CODE + 1];
67 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
68 gives the gen_function to make a branch to test that condition. */
70 rtxfun bcc_gen_fctn[NUM_RTX_CODE];
72 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
73 gives the insn code to make a store-condition insn
74 to test that condition. */
76 enum insn_code setcc_gen_code[NUM_RTX_CODE];
78 #ifdef HAVE_conditional_move
79 /* Indexed by the machine mode, gives the insn code to make a conditional
80 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
81 setcc_gen_code to cut down on the number of named patterns. Consider a day
82 when a lot more rtx codes are conditional (eg: for the ARM). */
84 enum insn_code movcc_gen_code[NUM_MACHINE_MODES];
87 /* Indexed by the machine mode, gives the insn code for vector conditional
90 enum insn_code vcond_gen_code[NUM_MACHINE_MODES];
91 enum insn_code vcondu_gen_code[NUM_MACHINE_MODES];
93 /* The insn generating function can not take an rtx_code argument.
94 TRAP_RTX is used as an rtx argument. Its code is replaced with
95 the code to be used in the trap insn and all other fields are ignored. */
96 static GTY(()) rtx trap_rtx;
98 static int add_equal_note (rtx, rtx, enum rtx_code, rtx, rtx);
99 static rtx widen_operand (rtx, enum machine_mode, enum machine_mode, int,
101 static void prepare_cmp_insn (rtx *, rtx *, enum rtx_code *, rtx,
102 enum machine_mode *, int *,
103 enum can_compare_purpose);
104 static enum insn_code can_fix_p (enum machine_mode, enum machine_mode, int,
106 static enum insn_code can_float_p (enum machine_mode, enum machine_mode, int);
107 static optab new_optab (void);
108 static convert_optab new_convert_optab (void);
109 static inline optab init_optab (enum rtx_code);
110 static inline optab init_optabv (enum rtx_code);
111 static inline convert_optab init_convert_optab (enum rtx_code);
112 static void init_libfuncs (optab, int, int, const char *, int);
113 static void init_integral_libfuncs (optab, const char *, int);
114 static void init_floating_libfuncs (optab, const char *, int);
115 static void init_interclass_conv_libfuncs (convert_optab, const char *,
116 enum mode_class, enum mode_class);
117 static void init_intraclass_conv_libfuncs (convert_optab, const char *,
118 enum mode_class, bool);
119 static void emit_cmp_and_jump_insn_1 (rtx, rtx, enum machine_mode,
120 enum rtx_code, int, rtx);
121 static void prepare_float_lib_cmp (rtx *, rtx *, enum rtx_code *,
122 enum machine_mode *, int *);
123 static rtx widen_clz (enum machine_mode, rtx, rtx);
124 static rtx expand_parity (enum machine_mode, rtx, rtx);
125 static enum rtx_code get_rtx_code (enum tree_code, bool);
126 static rtx vector_compare_rtx (tree, bool, enum insn_code);
128 #ifndef HAVE_conditional_trap
129 #define HAVE_conditional_trap 0
130 #define gen_conditional_trap(a,b) (gcc_unreachable (), NULL_RTX)
133 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
134 the result of operation CODE applied to OP0 (and OP1 if it is a binary
137 If the last insn does not set TARGET, don't do anything, but return 1.
139 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
140 don't add the REG_EQUAL note but return 0. Our caller can then try
141 again, ensuring that TARGET is not one of the operands. */
144 add_equal_note (rtx insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
146 rtx last_insn, insn, set;
149 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
151 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
152 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
153 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
154 && GET_RTX_CLASS (code) != RTX_COMPARE
155 && GET_RTX_CLASS (code) != RTX_UNARY)
158 if (GET_CODE (target) == ZERO_EXTRACT)
161 for (last_insn = insns;
162 NEXT_INSN (last_insn) != NULL_RTX;
163 last_insn = NEXT_INSN (last_insn))
166 set = single_set (last_insn);
170 if (! rtx_equal_p (SET_DEST (set), target)
171 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
172 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
173 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
176 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
177 besides the last insn. */
178 if (reg_overlap_mentioned_p (target, op0)
179 || (op1 && reg_overlap_mentioned_p (target, op1)))
181 insn = PREV_INSN (last_insn);
182 while (insn != NULL_RTX)
184 if (reg_set_p (target, insn))
187 insn = PREV_INSN (insn);
191 if (GET_RTX_CLASS (code) == RTX_UNARY)
192 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
194 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
196 set_unique_reg_note (last_insn, REG_EQUAL, note);
201 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
202 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
203 not actually do a sign-extend or zero-extend, but can leave the
204 higher-order bits of the result rtx undefined, for example, in the case
205 of logical operations, but not right shifts. */
208 widen_operand (rtx op, enum machine_mode mode, enum machine_mode oldmode,
209 int unsignedp, int no_extend)
213 /* If we don't have to extend and this is a constant, return it. */
214 if (no_extend && GET_MODE (op) == VOIDmode)
217 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
218 extend since it will be more efficient to do so unless the signedness of
219 a promoted object differs from our extension. */
221 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
222 && SUBREG_PROMOTED_UNSIGNED_P (op) == unsignedp))
223 return convert_modes (mode, oldmode, op, unsignedp);
225 /* If MODE is no wider than a single word, we return a paradoxical
227 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
228 return gen_rtx_SUBREG (mode, force_reg (GET_MODE (op), op), 0);
230 /* Otherwise, get an object of MODE, clobber it, and set the low-order
233 result = gen_reg_rtx (mode);
234 emit_insn (gen_rtx_CLOBBER (VOIDmode, result));
235 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
239 /* Return the optab used for computing the operation given by
240 the tree code, CODE. This function is not always usable (for
241 example, it cannot give complete results for multiplication
242 or division) but probably ought to be relied on more widely
243 throughout the expander. */
245 optab_for_tree_code (enum tree_code code, tree type)
257 return one_cmpl_optab;
266 return TYPE_UNSIGNED (type) ? umod_optab : smod_optab;
274 return TYPE_UNSIGNED (type) ? udiv_optab : sdiv_optab;
280 return TYPE_UNSIGNED (type) ? lshr_optab : ashr_optab;
289 return TYPE_UNSIGNED (type) ? umax_optab : smax_optab;
292 return TYPE_UNSIGNED (type) ? umin_optab : smin_optab;
294 case REALIGN_LOAD_EXPR:
295 return vec_realign_load_optab;
298 return TYPE_UNSIGNED (type) ? reduc_umax_optab : reduc_smax_optab;
301 return TYPE_UNSIGNED (type) ? reduc_umin_optab : reduc_smin_optab;
303 case REDUC_PLUS_EXPR:
304 return TYPE_UNSIGNED (type) ? reduc_uplus_optab : reduc_splus_optab;
306 case VEC_LSHIFT_EXPR:
307 return vec_shl_optab;
309 case VEC_RSHIFT_EXPR:
310 return vec_shr_optab;
316 trapv = flag_trapv && INTEGRAL_TYPE_P (type) && !TYPE_UNSIGNED (type);
320 return trapv ? addv_optab : add_optab;
323 return trapv ? subv_optab : sub_optab;
326 return trapv ? smulv_optab : smul_optab;
329 return trapv ? negv_optab : neg_optab;
332 return trapv ? absv_optab : abs_optab;
340 /* Generate code to perform an operation specified by TERNARY_OPTAB
341 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
343 UNSIGNEDP is for the case where we have to widen the operands
344 to perform the operation. It says to use zero-extension.
346 If TARGET is nonzero, the value
347 is generated there, if it is convenient to do so.
348 In all cases an rtx is returned for the locus of the value;
349 this may or may not be TARGET. */
352 expand_ternary_op (enum machine_mode mode, optab ternary_optab, rtx op0,
353 rtx op1, rtx op2, rtx target, int unsignedp)
355 int icode = (int) ternary_optab->handlers[(int) mode].insn_code;
356 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
357 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
358 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
361 rtx xop0 = op0, xop1 = op1, xop2 = op2;
363 gcc_assert (ternary_optab->handlers[(int) mode].insn_code
364 != CODE_FOR_nothing);
366 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
367 temp = gen_reg_rtx (mode);
371 /* In case the insn wants input operands in modes different from
372 those of the actual operands, convert the operands. It would
373 seem that we don't need to convert CONST_INTs, but we do, so
374 that they're properly zero-extended, sign-extended or truncated
377 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
378 xop0 = convert_modes (mode0,
379 GET_MODE (op0) != VOIDmode
384 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
385 xop1 = convert_modes (mode1,
386 GET_MODE (op1) != VOIDmode
391 if (GET_MODE (op2) != mode2 && mode2 != VOIDmode)
392 xop2 = convert_modes (mode2,
393 GET_MODE (op2) != VOIDmode
398 /* Now, if insn's predicates don't allow our operands, put them into
401 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
402 && mode0 != VOIDmode)
403 xop0 = copy_to_mode_reg (mode0, xop0);
405 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
406 && mode1 != VOIDmode)
407 xop1 = copy_to_mode_reg (mode1, xop1);
409 if (!insn_data[icode].operand[3].predicate (xop2, mode2)
410 && mode2 != VOIDmode)
411 xop2 = copy_to_mode_reg (mode2, xop2);
413 pat = GEN_FCN (icode) (temp, xop0, xop1, xop2);
420 /* Like expand_binop, but return a constant rtx if the result can be
421 calculated at compile time. The arguments and return value are
422 otherwise the same as for expand_binop. */
425 simplify_expand_binop (enum machine_mode mode, optab binoptab,
426 rtx op0, rtx op1, rtx target, int unsignedp,
427 enum optab_methods methods)
429 if (CONSTANT_P (op0) && CONSTANT_P (op1))
430 return simplify_gen_binary (binoptab->code, mode, op0, op1);
432 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
435 /* Like simplify_expand_binop, but always put the result in TARGET.
436 Return true if the expansion succeeded. */
439 force_expand_binop (enum machine_mode mode, optab binoptab,
440 rtx op0, rtx op1, rtx target, int unsignedp,
441 enum optab_methods methods)
443 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
444 target, unsignedp, methods);
448 emit_move_insn (target, x);
452 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
455 expand_vec_shift_expr (tree vec_shift_expr, rtx target)
457 enum insn_code icode;
458 rtx rtx_op1, rtx_op2;
459 enum machine_mode mode1;
460 enum machine_mode mode2;
461 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_shift_expr));
462 tree vec_oprnd = TREE_OPERAND (vec_shift_expr, 0);
463 tree shift_oprnd = TREE_OPERAND (vec_shift_expr, 1);
467 switch (TREE_CODE (vec_shift_expr))
469 case VEC_RSHIFT_EXPR:
470 shift_optab = vec_shr_optab;
472 case VEC_LSHIFT_EXPR:
473 shift_optab = vec_shl_optab;
479 icode = (int) shift_optab->handlers[(int) mode].insn_code;
480 gcc_assert (icode != CODE_FOR_nothing);
482 mode1 = insn_data[icode].operand[1].mode;
483 mode2 = insn_data[icode].operand[2].mode;
485 rtx_op1 = expand_expr (vec_oprnd, NULL_RTX, VOIDmode, EXPAND_NORMAL);
486 if (!(*insn_data[icode].operand[1].predicate) (rtx_op1, mode1)
487 && mode1 != VOIDmode)
488 rtx_op1 = force_reg (mode1, rtx_op1);
490 rtx_op2 = expand_expr (shift_oprnd, NULL_RTX, VOIDmode, EXPAND_NORMAL);
491 if (!(*insn_data[icode].operand[2].predicate) (rtx_op2, mode2)
492 && mode2 != VOIDmode)
493 rtx_op2 = force_reg (mode2, rtx_op2);
496 || ! (*insn_data[icode].operand[0].predicate) (target, mode))
497 target = gen_reg_rtx (mode);
499 /* Emit instruction */
500 pat = GEN_FCN (icode) (target, rtx_op1, rtx_op2);
507 /* This subroutine of expand_doubleword_shift handles the cases in which
508 the effective shift value is >= BITS_PER_WORD. The arguments and return
509 value are the same as for the parent routine, except that SUPERWORD_OP1
510 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
511 INTO_TARGET may be null if the caller has decided to calculate it. */
514 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
515 rtx outof_target, rtx into_target,
516 int unsignedp, enum optab_methods methods)
518 if (into_target != 0)
519 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
520 into_target, unsignedp, methods))
523 if (outof_target != 0)
525 /* For a signed right shift, we must fill OUTOF_TARGET with copies
526 of the sign bit, otherwise we must fill it with zeros. */
527 if (binoptab != ashr_optab)
528 emit_move_insn (outof_target, CONST0_RTX (word_mode));
530 if (!force_expand_binop (word_mode, binoptab,
531 outof_input, GEN_INT (BITS_PER_WORD - 1),
532 outof_target, unsignedp, methods))
538 /* This subroutine of expand_doubleword_shift handles the cases in which
539 the effective shift value is < BITS_PER_WORD. The arguments and return
540 value are the same as for the parent routine. */
543 expand_subword_shift (enum machine_mode op1_mode, optab binoptab,
544 rtx outof_input, rtx into_input, rtx op1,
545 rtx outof_target, rtx into_target,
546 int unsignedp, enum optab_methods methods,
547 unsigned HOST_WIDE_INT shift_mask)
549 optab reverse_unsigned_shift, unsigned_shift;
552 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
553 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
555 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
556 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
557 the opposite direction to BINOPTAB. */
558 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
560 carries = outof_input;
561 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
562 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
567 /* We must avoid shifting by BITS_PER_WORD bits since that is either
568 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
569 has unknown behavior. Do a single shift first, then shift by the
570 remainder. It's OK to use ~OP1 as the remainder if shift counts
571 are truncated to the mode size. */
572 carries = expand_binop (word_mode, reverse_unsigned_shift,
573 outof_input, const1_rtx, 0, unsignedp, methods);
574 if (shift_mask == BITS_PER_WORD - 1)
576 tmp = immed_double_const (-1, -1, op1_mode);
577 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
582 tmp = immed_double_const (BITS_PER_WORD - 1, 0, op1_mode);
583 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
587 if (tmp == 0 || carries == 0)
589 carries = expand_binop (word_mode, reverse_unsigned_shift,
590 carries, tmp, 0, unsignedp, methods);
594 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
595 so the result can go directly into INTO_TARGET if convenient. */
596 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
597 into_target, unsignedp, methods);
601 /* Now OR in the bits carried over from OUTOF_INPUT. */
602 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
603 into_target, unsignedp, methods))
606 /* Use a standard word_mode shift for the out-of half. */
607 if (outof_target != 0)
608 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
609 outof_target, unsignedp, methods))
616 #ifdef HAVE_conditional_move
617 /* Try implementing expand_doubleword_shift using conditional moves.
618 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
619 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
620 are the shift counts to use in the former and latter case. All other
621 arguments are the same as the parent routine. */
624 expand_doubleword_shift_condmove (enum machine_mode op1_mode, optab binoptab,
625 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
626 rtx outof_input, rtx into_input,
627 rtx subword_op1, rtx superword_op1,
628 rtx outof_target, rtx into_target,
629 int unsignedp, enum optab_methods methods,
630 unsigned HOST_WIDE_INT shift_mask)
632 rtx outof_superword, into_superword;
634 /* Put the superword version of the output into OUTOF_SUPERWORD and
636 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
637 if (outof_target != 0 && subword_op1 == superword_op1)
639 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
640 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
641 into_superword = outof_target;
642 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
643 outof_superword, 0, unsignedp, methods))
648 into_superword = gen_reg_rtx (word_mode);
649 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
650 outof_superword, into_superword,
655 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
656 if (!expand_subword_shift (op1_mode, binoptab,
657 outof_input, into_input, subword_op1,
658 outof_target, into_target,
659 unsignedp, methods, shift_mask))
662 /* Select between them. Do the INTO half first because INTO_SUPERWORD
663 might be the current value of OUTOF_TARGET. */
664 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
665 into_target, into_superword, word_mode, false))
668 if (outof_target != 0)
669 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
670 outof_target, outof_superword,
678 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
679 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
680 input operand; the shift moves bits in the direction OUTOF_INPUT->
681 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
682 of the target. OP1 is the shift count and OP1_MODE is its mode.
683 If OP1 is constant, it will have been truncated as appropriate
684 and is known to be nonzero.
686 If SHIFT_MASK is zero, the result of word shifts is undefined when the
687 shift count is outside the range [0, BITS_PER_WORD). This routine must
688 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
690 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
691 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
692 fill with zeros or sign bits as appropriate.
694 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
695 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
696 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
697 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
700 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
701 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
702 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
703 function wants to calculate it itself.
705 Return true if the shift could be successfully synthesized. */
708 expand_doubleword_shift (enum machine_mode op1_mode, optab binoptab,
709 rtx outof_input, rtx into_input, rtx op1,
710 rtx outof_target, rtx into_target,
711 int unsignedp, enum optab_methods methods,
712 unsigned HOST_WIDE_INT shift_mask)
714 rtx superword_op1, tmp, cmp1, cmp2;
715 rtx subword_label, done_label;
716 enum rtx_code cmp_code;
718 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
719 fill the result with sign or zero bits as appropriate. If so, the value
720 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
721 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
722 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
724 This isn't worthwhile for constant shifts since the optimizers will
725 cope better with in-range shift counts. */
726 if (shift_mask >= BITS_PER_WORD
728 && !CONSTANT_P (op1))
730 if (!expand_doubleword_shift (op1_mode, binoptab,
731 outof_input, into_input, op1,
733 unsignedp, methods, shift_mask))
735 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
736 outof_target, unsignedp, methods))
741 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
742 is true when the effective shift value is less than BITS_PER_WORD.
743 Set SUPERWORD_OP1 to the shift count that should be used to shift
744 OUTOF_INPUT into INTO_TARGET when the condition is false. */
745 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
746 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
748 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
749 is a subword shift count. */
750 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
752 cmp2 = CONST0_RTX (op1_mode);
758 /* Set CMP1 to OP1 - BITS_PER_WORD. */
759 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
761 cmp2 = CONST0_RTX (op1_mode);
763 superword_op1 = cmp1;
768 /* If we can compute the condition at compile time, pick the
769 appropriate subroutine. */
770 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
771 if (tmp != 0 && GET_CODE (tmp) == CONST_INT)
773 if (tmp == const0_rtx)
774 return expand_superword_shift (binoptab, outof_input, superword_op1,
775 outof_target, into_target,
778 return expand_subword_shift (op1_mode, binoptab,
779 outof_input, into_input, op1,
780 outof_target, into_target,
781 unsignedp, methods, shift_mask);
784 #ifdef HAVE_conditional_move
785 /* Try using conditional moves to generate straight-line code. */
787 rtx start = get_last_insn ();
788 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
789 cmp_code, cmp1, cmp2,
790 outof_input, into_input,
792 outof_target, into_target,
793 unsignedp, methods, shift_mask))
795 delete_insns_since (start);
799 /* As a last resort, use branches to select the correct alternative. */
800 subword_label = gen_label_rtx ();
801 done_label = gen_label_rtx ();
803 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
804 0, 0, subword_label);
806 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
807 outof_target, into_target,
811 emit_jump_insn (gen_jump (done_label));
813 emit_label (subword_label);
815 if (!expand_subword_shift (op1_mode, binoptab,
816 outof_input, into_input, op1,
817 outof_target, into_target,
818 unsignedp, methods, shift_mask))
821 emit_label (done_label);
825 /* Subroutine of expand_binop. Perform a double word multiplication of
826 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
827 as the target's word_mode. This function return NULL_RTX if anything
828 goes wrong, in which case it may have already emitted instructions
829 which need to be deleted.
831 If we want to multiply two two-word values and have normal and widening
832 multiplies of single-word values, we can do this with three smaller
833 multiplications. Note that we do not make a REG_NO_CONFLICT block here
834 because we are not operating on one word at a time.
836 The multiplication proceeds as follows:
837 _______________________
838 [__op0_high_|__op0_low__]
839 _______________________
840 * [__op1_high_|__op1_low__]
841 _______________________________________________
842 _______________________
843 (1) [__op0_low__*__op1_low__]
844 _______________________
845 (2a) [__op0_low__*__op1_high_]
846 _______________________
847 (2b) [__op0_high_*__op1_low__]
848 _______________________
849 (3) [__op0_high_*__op1_high_]
852 This gives a 4-word result. Since we are only interested in the
853 lower 2 words, partial result (3) and the upper words of (2a) and
854 (2b) don't need to be calculated. Hence (2a) and (2b) can be
855 calculated using non-widening multiplication.
857 (1), however, needs to be calculated with an unsigned widening
858 multiplication. If this operation is not directly supported we
859 try using a signed widening multiplication and adjust the result.
860 This adjustment works as follows:
862 If both operands are positive then no adjustment is needed.
864 If the operands have different signs, for example op0_low < 0 and
865 op1_low >= 0, the instruction treats the most significant bit of
866 op0_low as a sign bit instead of a bit with significance
867 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
868 with 2**BITS_PER_WORD - op0_low, and two's complements the
869 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
872 Similarly, if both operands are negative, we need to add
873 (op0_low + op1_low) * 2**BITS_PER_WORD.
875 We use a trick to adjust quickly. We logically shift op0_low right
876 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
877 op0_high (op1_high) before it is used to calculate 2b (2a). If no
878 logical shift exists, we do an arithmetic right shift and subtract
882 expand_doubleword_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
883 bool umulp, enum optab_methods methods)
885 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
886 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
887 rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
888 rtx product, adjust, product_high, temp;
890 rtx op0_high = operand_subword_force (op0, high, mode);
891 rtx op0_low = operand_subword_force (op0, low, mode);
892 rtx op1_high = operand_subword_force (op1, high, mode);
893 rtx op1_low = operand_subword_force (op1, low, mode);
895 /* If we're using an unsigned multiply to directly compute the product
896 of the low-order words of the operands and perform any required
897 adjustments of the operands, we begin by trying two more multiplications
898 and then computing the appropriate sum.
900 We have checked above that the required addition is provided.
901 Full-word addition will normally always succeed, especially if
902 it is provided at all, so we don't worry about its failure. The
903 multiplication may well fail, however, so we do handle that. */
907 /* ??? This could be done with emit_store_flag where available. */
908 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
909 NULL_RTX, 1, methods);
911 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
912 NULL_RTX, 0, OPTAB_DIRECT);
915 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
916 NULL_RTX, 0, methods);
919 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
920 NULL_RTX, 0, OPTAB_DIRECT);
927 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
928 NULL_RTX, 0, OPTAB_DIRECT);
932 /* OP0_HIGH should now be dead. */
936 /* ??? This could be done with emit_store_flag where available. */
937 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
938 NULL_RTX, 1, methods);
940 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
941 NULL_RTX, 0, OPTAB_DIRECT);
944 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
945 NULL_RTX, 0, methods);
948 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
949 NULL_RTX, 0, OPTAB_DIRECT);
956 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
957 NULL_RTX, 0, OPTAB_DIRECT);
961 /* OP1_HIGH should now be dead. */
963 adjust = expand_binop (word_mode, add_optab, adjust, temp,
964 adjust, 0, OPTAB_DIRECT);
966 if (target && !REG_P (target))
970 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
971 target, 1, OPTAB_DIRECT);
973 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
974 target, 1, OPTAB_DIRECT);
979 product_high = operand_subword (product, high, 1, mode);
980 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
981 REG_P (product_high) ? product_high : adjust,
983 emit_move_insn (product_high, adjust);
987 /* Wrapper around expand_binop which takes an rtx code to specify
988 the operation to perform, not an optab pointer. All other
989 arguments are the same. */
991 expand_simple_binop (enum machine_mode mode, enum rtx_code code, rtx op0,
992 rtx op1, rtx target, int unsignedp,
993 enum optab_methods methods)
995 optab binop = code_to_optab[(int) code];
998 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
1002 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1003 binop. Order them according to commutative_operand_precedence and, if
1004 possible, try to put TARGET first. */
1006 swap_commutative_operands_with_target (rtx target, rtx op0, rtx op1)
1008 int op0_prec = commutative_operand_precedence (op0);
1009 int op1_prec = commutative_operand_precedence (op1);
1011 if (op0_prec < op1_prec)
1014 if (op0_prec > op1_prec)
1017 /* With equal precedence, both orders are ok, but try to put the
1019 return target && rtx_equal_p (op1, target);
1023 /* Generate code to perform an operation specified by BINOPTAB
1024 on operands OP0 and OP1, with result having machine-mode MODE.
1026 UNSIGNEDP is for the case where we have to widen the operands
1027 to perform the operation. It says to use zero-extension.
1029 If TARGET is nonzero, the value
1030 is generated there, if it is convenient to do so.
1031 In all cases an rtx is returned for the locus of the value;
1032 this may or may not be TARGET. */
1035 expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1,
1036 rtx target, int unsignedp, enum optab_methods methods)
1038 enum optab_methods next_methods
1039 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1040 ? OPTAB_WIDEN : methods);
1041 enum mode_class class;
1042 enum machine_mode wider_mode;
1044 int commutative_op = 0;
1045 int shift_op = (binoptab->code == ASHIFT
1046 || binoptab->code == ASHIFTRT
1047 || binoptab->code == LSHIFTRT
1048 || binoptab->code == ROTATE
1049 || binoptab->code == ROTATERT);
1050 rtx entry_last = get_last_insn ();
1053 class = GET_MODE_CLASS (mode);
1055 /* If subtracting an integer constant, convert this into an addition of
1056 the negated constant. */
1058 if (binoptab == sub_optab && GET_CODE (op1) == CONST_INT)
1060 op1 = negate_rtx (mode, op1);
1061 binoptab = add_optab;
1064 /* If we are inside an appropriately-short loop and we are optimizing,
1065 force expensive constants into a register. */
1066 if (CONSTANT_P (op0) && optimize
1067 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
1069 if (GET_MODE (op0) != VOIDmode)
1070 op0 = convert_modes (mode, VOIDmode, op0, unsignedp);
1071 op0 = force_reg (mode, op0);
1074 if (CONSTANT_P (op1) && optimize
1075 && ! shift_op && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
1077 if (GET_MODE (op1) != VOIDmode)
1078 op1 = convert_modes (mode, VOIDmode, op1, unsignedp);
1079 op1 = force_reg (mode, op1);
1082 /* Record where to delete back to if we backtrack. */
1083 last = get_last_insn ();
1085 /* If operation is commutative, canonicalize the order of the operands. */
1086 if (GET_RTX_CLASS (binoptab->code) == RTX_COMM_ARITH
1087 || binoptab == smul_widen_optab
1088 || binoptab == umul_widen_optab
1089 || binoptab == smul_highpart_optab
1090 || binoptab == umul_highpart_optab)
1093 if (swap_commutative_operands_with_target (target, op0, op1))
1101 /* If we can do it with a three-operand insn, do so. */
1103 if (methods != OPTAB_MUST_WIDEN
1104 && binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1106 int icode = (int) binoptab->handlers[(int) mode].insn_code;
1107 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
1108 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
1110 rtx xop0 = op0, xop1 = op1;
1115 temp = gen_reg_rtx (mode);
1117 /* If it is a commutative operator and the modes would match
1118 if we would swap the operands, we can save the conversions. */
1121 if (GET_MODE (op0) != mode0 && GET_MODE (op1) != mode1
1122 && GET_MODE (op0) == mode1 && GET_MODE (op1) == mode0)
1126 tmp = op0; op0 = op1; op1 = tmp;
1127 tmp = xop0; xop0 = xop1; xop1 = tmp;
1131 /* In case the insn wants input operands in modes different from
1132 those of the actual operands, convert the operands. It would
1133 seem that we don't need to convert CONST_INTs, but we do, so
1134 that they're properly zero-extended, sign-extended or truncated
1137 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
1138 xop0 = convert_modes (mode0,
1139 GET_MODE (op0) != VOIDmode
1144 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
1145 xop1 = convert_modes (mode1,
1146 GET_MODE (op1) != VOIDmode
1151 /* Now, if insn's predicates don't allow our operands, put them into
1154 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
1155 && mode0 != VOIDmode)
1156 xop0 = copy_to_mode_reg (mode0, xop0);
1158 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
1159 && mode1 != VOIDmode)
1160 xop1 = copy_to_mode_reg (mode1, xop1);
1162 if (!insn_data[icode].operand[0].predicate (temp, mode))
1163 temp = gen_reg_rtx (mode);
1165 pat = GEN_FCN (icode) (temp, xop0, xop1);
1168 /* If PAT is composed of more than one insn, try to add an appropriate
1169 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1170 operand, call ourselves again, this time without a target. */
1171 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1172 && ! add_equal_note (pat, temp, binoptab->code, xop0, xop1))
1174 delete_insns_since (last);
1175 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1176 unsignedp, methods);
1183 delete_insns_since (last);
1186 /* If this is a multiply, see if we can do a widening operation that
1187 takes operands of this mode and makes a wider mode. */
1189 if (binoptab == smul_optab && GET_MODE_WIDER_MODE (mode) != VOIDmode
1190 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1191 ->handlers[(int) GET_MODE_WIDER_MODE (mode)].insn_code)
1192 != CODE_FOR_nothing))
1194 temp = expand_binop (GET_MODE_WIDER_MODE (mode),
1195 unsignedp ? umul_widen_optab : smul_widen_optab,
1196 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1200 if (GET_MODE_CLASS (mode) == MODE_INT
1201 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1202 GET_MODE_BITSIZE (GET_MODE (temp))))
1203 return gen_lowpart (mode, temp);
1205 return convert_to_mode (mode, temp, unsignedp);
1209 /* Look for a wider mode of the same class for which we think we
1210 can open-code the operation. Check for a widening multiply at the
1211 wider mode as well. */
1213 if ((class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
1214 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1215 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
1216 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1218 if (binoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
1219 || (binoptab == smul_optab
1220 && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
1221 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1222 ->handlers[(int) GET_MODE_WIDER_MODE (wider_mode)].insn_code)
1223 != CODE_FOR_nothing)))
1225 rtx xop0 = op0, xop1 = op1;
1228 /* For certain integer operations, we need not actually extend
1229 the narrow operands, as long as we will truncate
1230 the results to the same narrowness. */
1232 if ((binoptab == ior_optab || binoptab == and_optab
1233 || binoptab == xor_optab
1234 || binoptab == add_optab || binoptab == sub_optab
1235 || binoptab == smul_optab || binoptab == ashl_optab)
1236 && class == MODE_INT)
1239 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1241 /* The second operand of a shift must always be extended. */
1242 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1243 no_extend && binoptab != ashl_optab);
1245 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1246 unsignedp, OPTAB_DIRECT);
1249 if (class != MODE_INT
1250 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1251 GET_MODE_BITSIZE (wider_mode)))
1254 target = gen_reg_rtx (mode);
1255 convert_move (target, temp, 0);
1259 return gen_lowpart (mode, temp);
1262 delete_insns_since (last);
1266 /* These can be done a word at a time. */
1267 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1268 && class == MODE_INT
1269 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
1270 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1276 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1277 won't be accurate, so use a new target. */
1278 if (target == 0 || target == op0 || target == op1)
1279 target = gen_reg_rtx (mode);
1283 /* Do the actual arithmetic. */
1284 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
1286 rtx target_piece = operand_subword (target, i, 1, mode);
1287 rtx x = expand_binop (word_mode, binoptab,
1288 operand_subword_force (op0, i, mode),
1289 operand_subword_force (op1, i, mode),
1290 target_piece, unsignedp, next_methods);
1295 if (target_piece != x)
1296 emit_move_insn (target_piece, x);
1299 insns = get_insns ();
1302 if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
1304 if (binoptab->code != UNKNOWN)
1306 = gen_rtx_fmt_ee (binoptab->code, mode,
1307 copy_rtx (op0), copy_rtx (op1));
1311 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1316 /* Synthesize double word shifts from single word shifts. */
1317 if ((binoptab == lshr_optab || binoptab == ashl_optab
1318 || binoptab == ashr_optab)
1319 && class == MODE_INT
1320 && (GET_CODE (op1) == CONST_INT || !optimize_size)
1321 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1322 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1323 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1324 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1326 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1327 enum machine_mode op1_mode;
1329 double_shift_mask = targetm.shift_truncation_mask (mode);
1330 shift_mask = targetm.shift_truncation_mask (word_mode);
1331 op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode;
1333 /* Apply the truncation to constant shifts. */
1334 if (double_shift_mask > 0 && GET_CODE (op1) == CONST_INT)
1335 op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
1337 if (op1 == CONST0_RTX (op1_mode))
1340 /* Make sure that this is a combination that expand_doubleword_shift
1341 can handle. See the comments there for details. */
1342 if (double_shift_mask == 0
1343 || (shift_mask == BITS_PER_WORD - 1
1344 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1346 rtx insns, equiv_value;
1347 rtx into_target, outof_target;
1348 rtx into_input, outof_input;
1349 int left_shift, outof_word;
1351 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1352 won't be accurate, so use a new target. */
1353 if (target == 0 || target == op0 || target == op1)
1354 target = gen_reg_rtx (mode);
1358 /* OUTOF_* is the word we are shifting bits away from, and
1359 INTO_* is the word that we are shifting bits towards, thus
1360 they differ depending on the direction of the shift and
1361 WORDS_BIG_ENDIAN. */
1363 left_shift = binoptab == ashl_optab;
1364 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1366 outof_target = operand_subword (target, outof_word, 1, mode);
1367 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1369 outof_input = operand_subword_force (op0, outof_word, mode);
1370 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1372 if (expand_doubleword_shift (op1_mode, binoptab,
1373 outof_input, into_input, op1,
1374 outof_target, into_target,
1375 unsignedp, methods, shift_mask))
1377 insns = get_insns ();
1380 equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, op0, op1);
1381 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1388 /* Synthesize double word rotates from single word shifts. */
1389 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1390 && class == MODE_INT
1391 && GET_CODE (op1) == CONST_INT
1392 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1393 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1394 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1396 rtx insns, equiv_value;
1397 rtx into_target, outof_target;
1398 rtx into_input, outof_input;
1400 int shift_count, left_shift, outof_word;
1402 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1403 won't be accurate, so use a new target. Do this also if target is not
1404 a REG, first because having a register instead may open optimization
1405 opportunities, and second because if target and op0 happen to be MEMs
1406 designating the same location, we would risk clobbering it too early
1407 in the code sequence we generate below. */
1408 if (target == 0 || target == op0 || target == op1 || ! REG_P (target))
1409 target = gen_reg_rtx (mode);
1413 shift_count = INTVAL (op1);
1415 /* OUTOF_* is the word we are shifting bits away from, and
1416 INTO_* is the word that we are shifting bits towards, thus
1417 they differ depending on the direction of the shift and
1418 WORDS_BIG_ENDIAN. */
1420 left_shift = (binoptab == rotl_optab);
1421 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1423 outof_target = operand_subword (target, outof_word, 1, mode);
1424 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1426 outof_input = operand_subword_force (op0, outof_word, mode);
1427 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1429 if (shift_count == BITS_PER_WORD)
1431 /* This is just a word swap. */
1432 emit_move_insn (outof_target, into_input);
1433 emit_move_insn (into_target, outof_input);
1438 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1439 rtx first_shift_count, second_shift_count;
1440 optab reverse_unsigned_shift, unsigned_shift;
1442 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1443 ? lshr_optab : ashl_optab);
1445 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1446 ? ashl_optab : lshr_optab);
1448 if (shift_count > BITS_PER_WORD)
1450 first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1451 second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1455 first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1456 second_shift_count = GEN_INT (shift_count);
1459 into_temp1 = expand_binop (word_mode, unsigned_shift,
1460 outof_input, first_shift_count,
1461 NULL_RTX, unsignedp, next_methods);
1462 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1463 into_input, second_shift_count,
1464 NULL_RTX, unsignedp, next_methods);
1466 if (into_temp1 != 0 && into_temp2 != 0)
1467 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1468 into_target, unsignedp, next_methods);
1472 if (inter != 0 && inter != into_target)
1473 emit_move_insn (into_target, inter);
1475 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1476 into_input, first_shift_count,
1477 NULL_RTX, unsignedp, next_methods);
1478 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1479 outof_input, second_shift_count,
1480 NULL_RTX, unsignedp, next_methods);
1482 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1483 inter = expand_binop (word_mode, ior_optab,
1484 outof_temp1, outof_temp2,
1485 outof_target, unsignedp, next_methods);
1487 if (inter != 0 && inter != outof_target)
1488 emit_move_insn (outof_target, inter);
1491 insns = get_insns ();
1496 if (binoptab->code != UNKNOWN)
1497 equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, op0, op1);
1501 /* We can't make this a no conflict block if this is a word swap,
1502 because the word swap case fails if the input and output values
1503 are in the same register. */
1504 if (shift_count != BITS_PER_WORD)
1505 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1514 /* These can be done a word at a time by propagating carries. */
1515 if ((binoptab == add_optab || binoptab == sub_optab)
1516 && class == MODE_INT
1517 && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
1518 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1521 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1522 const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
1523 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1524 rtx xop0, xop1, xtarget;
1526 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1527 value is one of those, use it. Otherwise, use 1 since it is the
1528 one easiest to get. */
1529 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1530 int normalizep = STORE_FLAG_VALUE;
1535 /* Prepare the operands. */
1536 xop0 = force_reg (mode, op0);
1537 xop1 = force_reg (mode, op1);
1539 xtarget = gen_reg_rtx (mode);
1541 if (target == 0 || !REG_P (target))
1544 /* Indicate for flow that the entire target reg is being set. */
1546 emit_insn (gen_rtx_CLOBBER (VOIDmode, xtarget));
1548 /* Do the actual arithmetic. */
1549 for (i = 0; i < nwords; i++)
1551 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1552 rtx target_piece = operand_subword (xtarget, index, 1, mode);
1553 rtx op0_piece = operand_subword_force (xop0, index, mode);
1554 rtx op1_piece = operand_subword_force (xop1, index, mode);
1557 /* Main add/subtract of the input operands. */
1558 x = expand_binop (word_mode, binoptab,
1559 op0_piece, op1_piece,
1560 target_piece, unsignedp, next_methods);
1566 /* Store carry from main add/subtract. */
1567 carry_out = gen_reg_rtx (word_mode);
1568 carry_out = emit_store_flag_force (carry_out,
1569 (binoptab == add_optab
1572 word_mode, 1, normalizep);
1579 /* Add/subtract previous carry to main result. */
1580 newx = expand_binop (word_mode,
1581 normalizep == 1 ? binoptab : otheroptab,
1583 NULL_RTX, 1, next_methods);
1587 /* Get out carry from adding/subtracting carry in. */
1588 rtx carry_tmp = gen_reg_rtx (word_mode);
1589 carry_tmp = emit_store_flag_force (carry_tmp,
1590 (binoptab == add_optab
1593 word_mode, 1, normalizep);
1595 /* Logical-ior the two poss. carry together. */
1596 carry_out = expand_binop (word_mode, ior_optab,
1597 carry_out, carry_tmp,
1598 carry_out, 0, next_methods);
1602 emit_move_insn (target_piece, newx);
1606 if (x != target_piece)
1607 emit_move_insn (target_piece, x);
1610 carry_in = carry_out;
1613 if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
1615 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
1616 || ! rtx_equal_p (target, xtarget))
1618 rtx temp = emit_move_insn (target, xtarget);
1620 set_unique_reg_note (temp,
1622 gen_rtx_fmt_ee (binoptab->code, mode,
1633 delete_insns_since (last);
1636 /* Attempt to synthesize double word multiplies using a sequence of word
1637 mode multiplications. We first attempt to generate a sequence using a
1638 more efficient unsigned widening multiply, and if that fails we then
1639 try using a signed widening multiply. */
1641 if (binoptab == smul_optab
1642 && class == MODE_INT
1643 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1644 && smul_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1645 && add_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1647 rtx product = NULL_RTX;
1649 if (umul_widen_optab->handlers[(int) mode].insn_code
1650 != CODE_FOR_nothing)
1652 product = expand_doubleword_mult (mode, op0, op1, target,
1655 delete_insns_since (last);
1658 if (product == NULL_RTX
1659 && smul_widen_optab->handlers[(int) mode].insn_code
1660 != CODE_FOR_nothing)
1662 product = expand_doubleword_mult (mode, op0, op1, target,
1665 delete_insns_since (last);
1668 if (product != NULL_RTX)
1670 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1672 temp = emit_move_insn (target ? target : product, product);
1673 set_unique_reg_note (temp,
1675 gen_rtx_fmt_ee (MULT, mode,
1683 /* It can't be open-coded in this mode.
1684 Use a library call if one is available and caller says that's ok. */
1686 if (binoptab->handlers[(int) mode].libfunc
1687 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
1691 enum machine_mode op1_mode = mode;
1698 op1_mode = word_mode;
1699 /* Specify unsigned here,
1700 since negative shift counts are meaningless. */
1701 op1x = convert_to_mode (word_mode, op1, 1);
1704 if (GET_MODE (op0) != VOIDmode
1705 && GET_MODE (op0) != mode)
1706 op0 = convert_to_mode (mode, op0, unsignedp);
1708 /* Pass 1 for NO_QUEUE so we don't lose any increments
1709 if the libcall is cse'd or moved. */
1710 value = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
1711 NULL_RTX, LCT_CONST, mode, 2,
1712 op0, mode, op1x, op1_mode);
1714 insns = get_insns ();
1717 target = gen_reg_rtx (mode);
1718 emit_libcall_block (insns, target, value,
1719 gen_rtx_fmt_ee (binoptab->code, mode, op0, op1));
1724 delete_insns_since (last);
1726 /* It can't be done in this mode. Can we do it in a wider mode? */
1728 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
1729 || methods == OPTAB_MUST_WIDEN))
1731 /* Caller says, don't even try. */
1732 delete_insns_since (entry_last);
1736 /* Compute the value of METHODS to pass to recursive calls.
1737 Don't allow widening to be tried recursively. */
1739 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
1741 /* Look for a wider mode of the same class for which it appears we can do
1744 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
1746 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
1747 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1749 if ((binoptab->handlers[(int) wider_mode].insn_code
1750 != CODE_FOR_nothing)
1751 || (methods == OPTAB_LIB
1752 && binoptab->handlers[(int) wider_mode].libfunc))
1754 rtx xop0 = op0, xop1 = op1;
1757 /* For certain integer operations, we need not actually extend
1758 the narrow operands, as long as we will truncate
1759 the results to the same narrowness. */
1761 if ((binoptab == ior_optab || binoptab == and_optab
1762 || binoptab == xor_optab
1763 || binoptab == add_optab || binoptab == sub_optab
1764 || binoptab == smul_optab || binoptab == ashl_optab)
1765 && class == MODE_INT)
1768 xop0 = widen_operand (xop0, wider_mode, mode,
1769 unsignedp, no_extend);
1771 /* The second operand of a shift must always be extended. */
1772 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1773 no_extend && binoptab != ashl_optab);
1775 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1776 unsignedp, methods);
1779 if (class != MODE_INT
1780 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1781 GET_MODE_BITSIZE (wider_mode)))
1784 target = gen_reg_rtx (mode);
1785 convert_move (target, temp, 0);
1789 return gen_lowpart (mode, temp);
1792 delete_insns_since (last);
1797 delete_insns_since (entry_last);
1801 /* Expand a binary operator which has both signed and unsigned forms.
1802 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1805 If we widen unsigned operands, we may use a signed wider operation instead
1806 of an unsigned wider operation, since the result would be the same. */
1809 sign_expand_binop (enum machine_mode mode, optab uoptab, optab soptab,
1810 rtx op0, rtx op1, rtx target, int unsignedp,
1811 enum optab_methods methods)
1814 optab direct_optab = unsignedp ? uoptab : soptab;
1815 struct optab wide_soptab;
1817 /* Do it without widening, if possible. */
1818 temp = expand_binop (mode, direct_optab, op0, op1, target,
1819 unsignedp, OPTAB_DIRECT);
1820 if (temp || methods == OPTAB_DIRECT)
1823 /* Try widening to a signed int. Make a fake signed optab that
1824 hides any signed insn for direct use. */
1825 wide_soptab = *soptab;
1826 wide_soptab.handlers[(int) mode].insn_code = CODE_FOR_nothing;
1827 wide_soptab.handlers[(int) mode].libfunc = 0;
1829 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
1830 unsignedp, OPTAB_WIDEN);
1832 /* For unsigned operands, try widening to an unsigned int. */
1833 if (temp == 0 && unsignedp)
1834 temp = expand_binop (mode, uoptab, op0, op1, target,
1835 unsignedp, OPTAB_WIDEN);
1836 if (temp || methods == OPTAB_WIDEN)
1839 /* Use the right width lib call if that exists. */
1840 temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_LIB);
1841 if (temp || methods == OPTAB_LIB)
1844 /* Must widen and use a lib call, use either signed or unsigned. */
1845 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
1846 unsignedp, methods);
1850 return expand_binop (mode, uoptab, op0, op1, target,
1851 unsignedp, methods);
1855 /* Generate code to perform an operation specified by UNOPPTAB
1856 on operand OP0, with two results to TARG0 and TARG1.
1857 We assume that the order of the operands for the instruction
1858 is TARG0, TARG1, OP0.
1860 Either TARG0 or TARG1 may be zero, but what that means is that
1861 the result is not actually wanted. We will generate it into
1862 a dummy pseudo-reg and discard it. They may not both be zero.
1864 Returns 1 if this operation can be performed; 0 if not. */
1867 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
1870 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
1871 enum mode_class class;
1872 enum machine_mode wider_mode;
1873 rtx entry_last = get_last_insn ();
1876 class = GET_MODE_CLASS (mode);
1879 targ0 = gen_reg_rtx (mode);
1881 targ1 = gen_reg_rtx (mode);
1883 /* Record where to go back to if we fail. */
1884 last = get_last_insn ();
1886 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1888 int icode = (int) unoptab->handlers[(int) mode].insn_code;
1889 enum machine_mode mode0 = insn_data[icode].operand[2].mode;
1893 if (GET_MODE (xop0) != VOIDmode
1894 && GET_MODE (xop0) != mode0)
1895 xop0 = convert_to_mode (mode0, xop0, unsignedp);
1897 /* Now, if insn doesn't accept these operands, put them into pseudos. */
1898 if (!insn_data[icode].operand[2].predicate (xop0, mode0))
1899 xop0 = copy_to_mode_reg (mode0, xop0);
1901 /* We could handle this, but we should always be called with a pseudo
1902 for our targets and all insns should take them as outputs. */
1903 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
1904 gcc_assert (insn_data[icode].operand[1].predicate (targ1, mode));
1906 pat = GEN_FCN (icode) (targ0, targ1, xop0);
1913 delete_insns_since (last);
1916 /* It can't be done in this mode. Can we do it in a wider mode? */
1918 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
1920 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
1921 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1923 if (unoptab->handlers[(int) wider_mode].insn_code
1924 != CODE_FOR_nothing)
1926 rtx t0 = gen_reg_rtx (wider_mode);
1927 rtx t1 = gen_reg_rtx (wider_mode);
1928 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
1930 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
1932 convert_move (targ0, t0, unsignedp);
1933 convert_move (targ1, t1, unsignedp);
1937 delete_insns_since (last);
1942 delete_insns_since (entry_last);
1946 /* Generate code to perform an operation specified by BINOPTAB
1947 on operands OP0 and OP1, with two results to TARG1 and TARG2.
1948 We assume that the order of the operands for the instruction
1949 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
1950 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
1952 Either TARG0 or TARG1 may be zero, but what that means is that
1953 the result is not actually wanted. We will generate it into
1954 a dummy pseudo-reg and discard it. They may not both be zero.
1956 Returns 1 if this operation can be performed; 0 if not. */
1959 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
1962 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
1963 enum mode_class class;
1964 enum machine_mode wider_mode;
1965 rtx entry_last = get_last_insn ();
1968 class = GET_MODE_CLASS (mode);
1970 /* If we are inside an appropriately-short loop and we are optimizing,
1971 force expensive constants into a register. */
1972 if (CONSTANT_P (op0) && optimize
1973 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
1974 op0 = force_reg (mode, op0);
1976 if (CONSTANT_P (op1) && optimize
1977 && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
1978 op1 = force_reg (mode, op1);
1981 targ0 = gen_reg_rtx (mode);
1983 targ1 = gen_reg_rtx (mode);
1985 /* Record where to go back to if we fail. */
1986 last = get_last_insn ();
1988 if (binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1990 int icode = (int) binoptab->handlers[(int) mode].insn_code;
1991 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
1992 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
1994 rtx xop0 = op0, xop1 = op1;
1996 /* In case the insn wants input operands in modes different from
1997 those of the actual operands, convert the operands. It would
1998 seem that we don't need to convert CONST_INTs, but we do, so
1999 that they're properly zero-extended, sign-extended or truncated
2002 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
2003 xop0 = convert_modes (mode0,
2004 GET_MODE (op0) != VOIDmode
2009 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
2010 xop1 = convert_modes (mode1,
2011 GET_MODE (op1) != VOIDmode
2016 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2017 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2018 xop0 = copy_to_mode_reg (mode0, xop0);
2020 if (!insn_data[icode].operand[2].predicate (xop1, mode1))
2021 xop1 = copy_to_mode_reg (mode1, xop1);
2023 /* We could handle this, but we should always be called with a pseudo
2024 for our targets and all insns should take them as outputs. */
2025 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2026 gcc_assert (insn_data[icode].operand[3].predicate (targ1, mode));
2028 pat = GEN_FCN (icode) (targ0, xop0, xop1, targ1);
2035 delete_insns_since (last);
2038 /* It can't be done in this mode. Can we do it in a wider mode? */
2040 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2042 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
2043 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2045 if (binoptab->handlers[(int) wider_mode].insn_code
2046 != CODE_FOR_nothing)
2048 rtx t0 = gen_reg_rtx (wider_mode);
2049 rtx t1 = gen_reg_rtx (wider_mode);
2050 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2051 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2053 if (expand_twoval_binop (binoptab, cop0, cop1,
2056 convert_move (targ0, t0, unsignedp);
2057 convert_move (targ1, t1, unsignedp);
2061 delete_insns_since (last);
2066 delete_insns_since (entry_last);
2070 /* Expand the two-valued library call indicated by BINOPTAB, but
2071 preserve only one of the values. If TARG0 is non-NULL, the first
2072 value is placed into TARG0; otherwise the second value is placed
2073 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2074 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2075 This routine assumes that the value returned by the library call is
2076 as if the return value was of an integral mode twice as wide as the
2077 mode of OP0. Returns 1 if the call was successful. */
2080 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2081 rtx targ0, rtx targ1, enum rtx_code code)
2083 enum machine_mode mode;
2084 enum machine_mode libval_mode;
2088 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2089 gcc_assert (!targ0 != !targ1);
2091 mode = GET_MODE (op0);
2092 if (!binoptab->handlers[(int) mode].libfunc)
2095 /* The value returned by the library function will have twice as
2096 many bits as the nominal MODE. */
2097 libval_mode = smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode),
2100 libval = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
2101 NULL_RTX, LCT_CONST,
2105 /* Get the part of VAL containing the value that we want. */
2106 libval = simplify_gen_subreg (mode, libval, libval_mode,
2107 targ0 ? 0 : GET_MODE_SIZE (mode));
2108 insns = get_insns ();
2110 /* Move the into the desired location. */
2111 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2112 gen_rtx_fmt_ee (code, mode, op0, op1));
2118 /* Wrapper around expand_unop which takes an rtx code to specify
2119 the operation to perform, not an optab pointer. All other
2120 arguments are the same. */
2122 expand_simple_unop (enum machine_mode mode, enum rtx_code code, rtx op0,
2123 rtx target, int unsignedp)
2125 optab unop = code_to_optab[(int) code];
2128 return expand_unop (mode, unop, op0, target, unsignedp);
2134 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2136 widen_clz (enum machine_mode mode, rtx op0, rtx target)
2138 enum mode_class class = GET_MODE_CLASS (mode);
2139 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2141 enum machine_mode wider_mode;
2142 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
2143 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2145 if (clz_optab->handlers[(int) wider_mode].insn_code
2146 != CODE_FOR_nothing)
2148 rtx xop0, temp, last;
2150 last = get_last_insn ();
2153 target = gen_reg_rtx (mode);
2154 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2155 temp = expand_unop (wider_mode, clz_optab, xop0, NULL_RTX, true);
2157 temp = expand_binop (wider_mode, sub_optab, temp,
2158 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2159 - GET_MODE_BITSIZE (mode)),
2160 target, true, OPTAB_DIRECT);
2162 delete_insns_since (last);
2171 /* Try calculating (parity x) as (and (popcount x) 1), where
2172 popcount can also be done in a wider mode. */
2174 expand_parity (enum machine_mode mode, rtx op0, rtx target)
2176 enum mode_class class = GET_MODE_CLASS (mode);
2177 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2179 enum machine_mode wider_mode;
2180 for (wider_mode = mode; wider_mode != VOIDmode;
2181 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2183 if (popcount_optab->handlers[(int) wider_mode].insn_code
2184 != CODE_FOR_nothing)
2186 rtx xop0, temp, last;
2188 last = get_last_insn ();
2191 target = gen_reg_rtx (mode);
2192 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2193 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2196 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2197 target, true, OPTAB_DIRECT);
2199 delete_insns_since (last);
2208 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2209 conditions, VAL may already be a SUBREG against which we cannot generate
2210 a further SUBREG. In this case, we expect forcing the value into a
2211 register will work around the situation. */
2214 lowpart_subreg_maybe_copy (enum machine_mode omode, rtx val,
2215 enum machine_mode imode)
2218 ret = lowpart_subreg (omode, val, imode);
2221 val = force_reg (imode, val);
2222 ret = lowpart_subreg (omode, val, imode);
2223 gcc_assert (ret != NULL);
2228 /* Expand a floating point absolute value or negation operation via a
2229 logical operation on the sign bit. */
2232 expand_absneg_bit (enum rtx_code code, enum machine_mode mode,
2233 rtx op0, rtx target)
2235 const struct real_format *fmt;
2236 int bitpos, word, nwords, i;
2237 enum machine_mode imode;
2238 HOST_WIDE_INT hi, lo;
2241 /* The format has to have a simple sign bit. */
2242 fmt = REAL_MODE_FORMAT (mode);
2246 bitpos = fmt->signbit_rw;
2250 /* Don't create negative zeros if the format doesn't support them. */
2251 if (code == NEG && !fmt->has_signed_zero)
2254 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2256 imode = int_mode_for_mode (mode);
2257 if (imode == BLKmode)
2266 if (FLOAT_WORDS_BIG_ENDIAN)
2267 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2269 word = bitpos / BITS_PER_WORD;
2270 bitpos = bitpos % BITS_PER_WORD;
2271 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2274 if (bitpos < HOST_BITS_PER_WIDE_INT)
2277 lo = (HOST_WIDE_INT) 1 << bitpos;
2281 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2287 if (target == 0 || target == op0)
2288 target = gen_reg_rtx (mode);
2294 for (i = 0; i < nwords; ++i)
2296 rtx targ_piece = operand_subword (target, i, 1, mode);
2297 rtx op0_piece = operand_subword_force (op0, i, mode);
2301 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2303 immed_double_const (lo, hi, imode),
2304 targ_piece, 1, OPTAB_LIB_WIDEN);
2305 if (temp != targ_piece)
2306 emit_move_insn (targ_piece, temp);
2309 emit_move_insn (targ_piece, op0_piece);
2312 insns = get_insns ();
2315 temp = gen_rtx_fmt_e (code, mode, copy_rtx (op0));
2316 emit_no_conflict_block (insns, target, op0, NULL_RTX, temp);
2320 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2321 gen_lowpart (imode, op0),
2322 immed_double_const (lo, hi, imode),
2323 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2324 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2326 set_unique_reg_note (get_last_insn (), REG_EQUAL,
2327 gen_rtx_fmt_e (code, mode, copy_rtx (op0)));
2333 /* Generate code to perform an operation specified by UNOPTAB
2334 on operand OP0, with result having machine-mode MODE.
2336 UNSIGNEDP is for the case where we have to widen the operands
2337 to perform the operation. It says to use zero-extension.
2339 If TARGET is nonzero, the value
2340 is generated there, if it is convenient to do so.
2341 In all cases an rtx is returned for the locus of the value;
2342 this may or may not be TARGET. */
2345 expand_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
2348 enum mode_class class;
2349 enum machine_mode wider_mode;
2351 rtx last = get_last_insn ();
2354 class = GET_MODE_CLASS (mode);
2356 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2358 int icode = (int) unoptab->handlers[(int) mode].insn_code;
2359 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2365 temp = gen_reg_rtx (mode);
2367 if (GET_MODE (xop0) != VOIDmode
2368 && GET_MODE (xop0) != mode0)
2369 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2371 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2373 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2374 xop0 = copy_to_mode_reg (mode0, xop0);
2376 if (!insn_data[icode].operand[0].predicate (temp, mode))
2377 temp = gen_reg_rtx (mode);
2379 pat = GEN_FCN (icode) (temp, xop0);
2382 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
2383 && ! add_equal_note (pat, temp, unoptab->code, xop0, NULL_RTX))
2385 delete_insns_since (last);
2386 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
2394 delete_insns_since (last);
2397 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2399 /* Widening clz needs special treatment. */
2400 if (unoptab == clz_optab)
2402 temp = widen_clz (mode, op0, target);
2409 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2410 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
2411 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2413 if (unoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing)
2417 /* For certain operations, we need not actually extend
2418 the narrow operand, as long as we will truncate the
2419 results to the same narrowness. */
2421 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2422 (unoptab == neg_optab
2423 || unoptab == one_cmpl_optab)
2424 && class == MODE_INT);
2426 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2431 if (class != MODE_INT)
2434 target = gen_reg_rtx (mode);
2435 convert_move (target, temp, 0);
2439 return gen_lowpart (mode, temp);
2442 delete_insns_since (last);
2446 /* These can be done a word at a time. */
2447 if (unoptab == one_cmpl_optab
2448 && class == MODE_INT
2449 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
2450 && unoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
2455 if (target == 0 || target == op0)
2456 target = gen_reg_rtx (mode);
2460 /* Do the actual arithmetic. */
2461 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
2463 rtx target_piece = operand_subword (target, i, 1, mode);
2464 rtx x = expand_unop (word_mode, unoptab,
2465 operand_subword_force (op0, i, mode),
2466 target_piece, unsignedp);
2468 if (target_piece != x)
2469 emit_move_insn (target_piece, x);
2472 insns = get_insns ();
2475 emit_no_conflict_block (insns, target, op0, NULL_RTX,
2476 gen_rtx_fmt_e (unoptab->code, mode,
2481 if (unoptab->code == NEG)
2483 /* Try negating floating point values by flipping the sign bit. */
2484 if (class == MODE_FLOAT)
2486 temp = expand_absneg_bit (NEG, mode, op0, target);
2491 /* If there is no negation pattern, and we have no negative zero,
2492 try subtracting from zero. */
2493 if (!HONOR_SIGNED_ZEROS (mode))
2495 temp = expand_binop (mode, (unoptab == negv_optab
2496 ? subv_optab : sub_optab),
2497 CONST0_RTX (mode), op0, target,
2498 unsignedp, OPTAB_DIRECT);
2504 /* Try calculating parity (x) as popcount (x) % 2. */
2505 if (unoptab == parity_optab)
2507 temp = expand_parity (mode, op0, target);
2513 /* Now try a library call in this mode. */
2514 if (unoptab->handlers[(int) mode].libfunc)
2518 enum machine_mode outmode = mode;
2520 /* All of these functions return small values. Thus we choose to
2521 have them return something that isn't a double-word. */
2522 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
2523 || unoptab == popcount_optab || unoptab == parity_optab)
2525 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node)));
2529 /* Pass 1 for NO_QUEUE so we don't lose any increments
2530 if the libcall is cse'd or moved. */
2531 value = emit_library_call_value (unoptab->handlers[(int) mode].libfunc,
2532 NULL_RTX, LCT_CONST, outmode,
2534 insns = get_insns ();
2537 target = gen_reg_rtx (outmode);
2538 emit_libcall_block (insns, target, value,
2539 gen_rtx_fmt_e (unoptab->code, mode, op0));
2544 /* It can't be done in this mode. Can we do it in a wider mode? */
2546 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2548 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
2549 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2551 if ((unoptab->handlers[(int) wider_mode].insn_code
2552 != CODE_FOR_nothing)
2553 || unoptab->handlers[(int) wider_mode].libfunc)
2557 /* For certain operations, we need not actually extend
2558 the narrow operand, as long as we will truncate the
2559 results to the same narrowness. */
2561 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2562 (unoptab == neg_optab
2563 || unoptab == one_cmpl_optab)
2564 && class == MODE_INT);
2566 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2569 /* If we are generating clz using wider mode, adjust the
2571 if (unoptab == clz_optab && temp != 0)
2572 temp = expand_binop (wider_mode, sub_optab, temp,
2573 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2574 - GET_MODE_BITSIZE (mode)),
2575 target, true, OPTAB_DIRECT);
2579 if (class != MODE_INT)
2582 target = gen_reg_rtx (mode);
2583 convert_move (target, temp, 0);
2587 return gen_lowpart (mode, temp);
2590 delete_insns_since (last);
2595 /* One final attempt at implementing negation via subtraction,
2596 this time allowing widening of the operand. */
2597 if (unoptab->code == NEG && !HONOR_SIGNED_ZEROS (mode))
2600 temp = expand_binop (mode,
2601 unoptab == negv_optab ? subv_optab : sub_optab,
2602 CONST0_RTX (mode), op0,
2603 target, unsignedp, OPTAB_LIB_WIDEN);
2611 /* Emit code to compute the absolute value of OP0, with result to
2612 TARGET if convenient. (TARGET may be 0.) The return value says
2613 where the result actually is to be found.
2615 MODE is the mode of the operand; the mode of the result is
2616 different but can be deduced from MODE.
2621 expand_abs_nojump (enum machine_mode mode, rtx op0, rtx target,
2622 int result_unsignedp)
2627 result_unsignedp = 1;
2629 /* First try to do it with a special abs instruction. */
2630 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
2635 /* For floating point modes, try clearing the sign bit. */
2636 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2638 temp = expand_absneg_bit (ABS, mode, op0, target);
2643 /* If we have a MAX insn, we can do this as MAX (x, -x). */
2644 if (smax_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
2645 && !HONOR_SIGNED_ZEROS (mode))
2647 rtx last = get_last_insn ();
2649 temp = expand_unop (mode, neg_optab, op0, NULL_RTX, 0);
2651 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
2657 delete_insns_since (last);
2660 /* If this machine has expensive jumps, we can do integer absolute
2661 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2662 where W is the width of MODE. */
2664 if (GET_MODE_CLASS (mode) == MODE_INT && BRANCH_COST >= 2)
2666 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
2667 size_int (GET_MODE_BITSIZE (mode) - 1),
2670 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
2673 temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab,
2674 temp, extended, target, 0, OPTAB_LIB_WIDEN);
2684 expand_abs (enum machine_mode mode, rtx op0, rtx target,
2685 int result_unsignedp, int safe)
2690 result_unsignedp = 1;
2692 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
2696 /* If that does not win, use conditional jump and negate. */
2698 /* It is safe to use the target if it is the same
2699 as the source if this is also a pseudo register */
2700 if (op0 == target && REG_P (op0)
2701 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
2704 op1 = gen_label_rtx ();
2705 if (target == 0 || ! safe
2706 || GET_MODE (target) != mode
2707 || (MEM_P (target) && MEM_VOLATILE_P (target))
2709 && REGNO (target) < FIRST_PSEUDO_REGISTER))
2710 target = gen_reg_rtx (mode);
2712 emit_move_insn (target, op0);
2715 /* If this mode is an integer too wide to compare properly,
2716 compare word by word. Rely on CSE to optimize constant cases. */
2717 if (GET_MODE_CLASS (mode) == MODE_INT
2718 && ! can_compare_p (GE, mode, ccp_jump))
2719 do_jump_by_parts_greater_rtx (mode, 0, target, const0_rtx,
2722 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
2723 NULL_RTX, NULL_RTX, op1);
2725 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
2728 emit_move_insn (target, op0);
2734 /* A subroutine of expand_copysign, perform the copysign operation using the
2735 abs and neg primitives advertised to exist on the target. The assumption
2736 is that we have a split register file, and leaving op0 in fp registers,
2737 and not playing with subregs so much, will help the register allocator. */
2740 expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target,
2741 int bitpos, bool op0_is_abs)
2743 enum machine_mode imode;
2744 HOST_WIDE_INT hi, lo;
2753 op0 = expand_unop (mode, abs_optab, op0, target, 0);
2760 if (target == NULL_RTX)
2761 target = copy_to_reg (op0);
2763 emit_move_insn (target, op0);
2766 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2768 imode = int_mode_for_mode (mode);
2769 if (imode == BLKmode)
2771 op1 = gen_lowpart (imode, op1);
2776 if (FLOAT_WORDS_BIG_ENDIAN)
2777 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2779 word = bitpos / BITS_PER_WORD;
2780 bitpos = bitpos % BITS_PER_WORD;
2781 op1 = operand_subword_force (op1, word, mode);
2784 if (bitpos < HOST_BITS_PER_WIDE_INT)
2787 lo = (HOST_WIDE_INT) 1 << bitpos;
2791 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2795 op1 = expand_binop (imode, and_optab, op1,
2796 immed_double_const (lo, hi, imode),
2797 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2799 label = gen_label_rtx ();
2800 emit_cmp_and_jump_insns (op1, const0_rtx, EQ, NULL_RTX, imode, 1, label);
2802 if (GET_CODE (op0) == CONST_DOUBLE)
2803 op0 = simplify_unary_operation (NEG, mode, op0, mode);
2805 op0 = expand_unop (mode, neg_optab, op0, target, 0);
2807 emit_move_insn (target, op0);
2815 /* A subroutine of expand_copysign, perform the entire copysign operation
2816 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
2817 is true if op0 is known to have its sign bit clear. */
2820 expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
2821 int bitpos, bool op0_is_abs)
2823 enum machine_mode imode;
2824 HOST_WIDE_INT hi, lo;
2825 int word, nwords, i;
2828 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2830 imode = int_mode_for_mode (mode);
2831 if (imode == BLKmode)
2840 if (FLOAT_WORDS_BIG_ENDIAN)
2841 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2843 word = bitpos / BITS_PER_WORD;
2844 bitpos = bitpos % BITS_PER_WORD;
2845 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2848 if (bitpos < HOST_BITS_PER_WIDE_INT)
2851 lo = (HOST_WIDE_INT) 1 << bitpos;
2855 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2859 if (target == 0 || target == op0 || target == op1)
2860 target = gen_reg_rtx (mode);
2866 for (i = 0; i < nwords; ++i)
2868 rtx targ_piece = operand_subword (target, i, 1, mode);
2869 rtx op0_piece = operand_subword_force (op0, i, mode);
2874 op0_piece = expand_binop (imode, and_optab, op0_piece,
2875 immed_double_const (~lo, ~hi, imode),
2876 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2878 op1 = expand_binop (imode, and_optab,
2879 operand_subword_force (op1, i, mode),
2880 immed_double_const (lo, hi, imode),
2881 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2883 temp = expand_binop (imode, ior_optab, op0_piece, op1,
2884 targ_piece, 1, OPTAB_LIB_WIDEN);
2885 if (temp != targ_piece)
2886 emit_move_insn (targ_piece, temp);
2889 emit_move_insn (targ_piece, op0_piece);
2892 insns = get_insns ();
2895 emit_no_conflict_block (insns, target, op0, op1, NULL_RTX);
2899 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
2900 immed_double_const (lo, hi, imode),
2901 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2903 op0 = gen_lowpart (imode, op0);
2905 op0 = expand_binop (imode, and_optab, op0,
2906 immed_double_const (~lo, ~hi, imode),
2907 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2909 temp = expand_binop (imode, ior_optab, op0, op1,
2910 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2911 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2917 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
2918 scalar floating point mode. Return NULL if we do not know how to
2919 expand the operation inline. */
2922 expand_copysign (rtx op0, rtx op1, rtx target)
2924 enum machine_mode mode = GET_MODE (op0);
2925 const struct real_format *fmt;
2929 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
2930 gcc_assert (GET_MODE (op1) == mode);
2932 /* First try to do it with a special instruction. */
2933 temp = expand_binop (mode, copysign_optab, op0, op1,
2934 target, 0, OPTAB_DIRECT);
2938 fmt = REAL_MODE_FORMAT (mode);
2939 if (fmt == NULL || !fmt->has_signed_zero)
2943 if (GET_CODE (op0) == CONST_DOUBLE)
2945 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
2946 op0 = simplify_unary_operation (ABS, mode, op0, mode);
2950 if (fmt->signbit_ro >= 0
2951 && (GET_CODE (op0) == CONST_DOUBLE
2952 || (neg_optab->handlers[mode].insn_code != CODE_FOR_nothing
2953 && abs_optab->handlers[mode].insn_code != CODE_FOR_nothing)))
2955 temp = expand_copysign_absneg (mode, op0, op1, target,
2956 fmt->signbit_ro, op0_is_abs);
2961 if (fmt->signbit_rw < 0)
2963 return expand_copysign_bit (mode, op0, op1, target,
2964 fmt->signbit_rw, op0_is_abs);
2967 /* Generate an instruction whose insn-code is INSN_CODE,
2968 with two operands: an output TARGET and an input OP0.
2969 TARGET *must* be nonzero, and the output is always stored there.
2970 CODE is an rtx code such that (CODE OP0) is an rtx that describes
2971 the value that is stored into TARGET. */
2974 emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code)
2977 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2982 /* Now, if insn does not accept our operands, put them into pseudos. */
2984 if (!insn_data[icode].operand[1].predicate (op0, mode0))
2985 op0 = copy_to_mode_reg (mode0, op0);
2987 if (!insn_data[icode].operand[0].predicate (temp, GET_MODE (temp)))
2988 temp = gen_reg_rtx (GET_MODE (temp));
2990 pat = GEN_FCN (icode) (temp, op0);
2992 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && code != UNKNOWN)
2993 add_equal_note (pat, temp, code, op0, NULL_RTX);
2998 emit_move_insn (target, temp);
3001 struct no_conflict_data
3003 rtx target, first, insn;
3007 /* Called via note_stores by emit_no_conflict_block. Set P->must_stay
3008 if the currently examined clobber / store has to stay in the list of
3009 insns that constitute the actual no_conflict block. */
3011 no_conflict_move_test (rtx dest, rtx set, void *p0)
3013 struct no_conflict_data *p= p0;
3015 /* If this inns directly contributes to setting the target, it must stay. */
3016 if (reg_overlap_mentioned_p (p->target, dest))
3017 p->must_stay = true;
3018 /* If we haven't committed to keeping any other insns in the list yet,
3019 there is nothing more to check. */
3020 else if (p->insn == p->first)
3022 /* If this insn sets / clobbers a register that feeds one of the insns
3023 already in the list, this insn has to stay too. */
3024 else if (reg_overlap_mentioned_p (dest, PATTERN (p->first))
3025 || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest)))
3026 || reg_used_between_p (dest, p->first, p->insn)
3027 /* Likewise if this insn depends on a register set by a previous
3028 insn in the list. */
3029 || (GET_CODE (set) == SET
3030 && (modified_in_p (SET_SRC (set), p->first)
3031 || modified_between_p (SET_SRC (set), p->first, p->insn))))
3032 p->must_stay = true;
3035 /* Emit code to perform a series of operations on a multi-word quantity, one
3038 Such a block is preceded by a CLOBBER of the output, consists of multiple
3039 insns, each setting one word of the output, and followed by a SET copying
3040 the output to itself.
3042 Each of the insns setting words of the output receives a REG_NO_CONFLICT
3043 note indicating that it doesn't conflict with the (also multi-word)
3044 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
3047 INSNS is a block of code generated to perform the operation, not including
3048 the CLOBBER and final copy. All insns that compute intermediate values
3049 are first emitted, followed by the block as described above.
3051 TARGET, OP0, and OP1 are the output and inputs of the operations,
3052 respectively. OP1 may be zero for a unary operation.
3054 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
3057 If TARGET is not a register, INSNS is simply emitted with no special
3058 processing. Likewise if anything in INSNS is not an INSN or if
3059 there is a libcall block inside INSNS.
3061 The final insn emitted is returned. */
3064 emit_no_conflict_block (rtx insns, rtx target, rtx op0, rtx op1, rtx equiv)
3066 rtx prev, next, first, last, insn;
3068 if (!REG_P (target) || reload_in_progress)
3069 return emit_insn (insns);
3071 for (insn = insns; insn; insn = NEXT_INSN (insn))
3072 if (!NONJUMP_INSN_P (insn)
3073 || find_reg_note (insn, REG_LIBCALL, NULL_RTX))
3074 return emit_insn (insns);
3076 /* First emit all insns that do not store into words of the output and remove
3077 these from the list. */
3078 for (insn = insns; insn; insn = next)
3081 struct no_conflict_data data;
3083 next = NEXT_INSN (insn);
3085 /* Some ports (cris) create a libcall regions at their own. We must
3086 avoid any potential nesting of LIBCALLs. */
3087 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3088 remove_note (insn, note);
3089 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3090 remove_note (insn, note);
3092 data.target = target;
3096 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3097 if (! data.must_stay)
3099 if (PREV_INSN (insn))
3100 NEXT_INSN (PREV_INSN (insn)) = next;
3105 PREV_INSN (next) = PREV_INSN (insn);
3111 prev = get_last_insn ();
3113 /* Now write the CLOBBER of the output, followed by the setting of each
3114 of the words, followed by the final copy. */
3115 if (target != op0 && target != op1)
3116 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
3118 for (insn = insns; insn; insn = next)
3120 next = NEXT_INSN (insn);
3123 if (op1 && REG_P (op1))
3124 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op1,
3127 if (op0 && REG_P (op0))
3128 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op0,
3132 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3133 != CODE_FOR_nothing)
3135 last = emit_move_insn (target, target);
3137 set_unique_reg_note (last, REG_EQUAL, equiv);
3141 last = get_last_insn ();
3143 /* Remove any existing REG_EQUAL note from "last", or else it will
3144 be mistaken for a note referring to the full contents of the
3145 alleged libcall value when found together with the REG_RETVAL
3146 note added below. An existing note can come from an insn
3147 expansion at "last". */
3148 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3152 first = get_insns ();
3154 first = NEXT_INSN (prev);
3156 /* Encapsulate the block so it gets manipulated as a unit. */
3157 REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
3159 REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first, REG_NOTES (last));
3164 /* Emit code to make a call to a constant function or a library call.
3166 INSNS is a list containing all insns emitted in the call.
3167 These insns leave the result in RESULT. Our block is to copy RESULT
3168 to TARGET, which is logically equivalent to EQUIV.
3170 We first emit any insns that set a pseudo on the assumption that these are
3171 loading constants into registers; doing so allows them to be safely cse'ed
3172 between blocks. Then we emit all the other insns in the block, followed by
3173 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3174 note with an operand of EQUIV.
3176 Moving assignments to pseudos outside of the block is done to improve
3177 the generated code, but is not required to generate correct code,
3178 hence being unable to move an assignment is not grounds for not making
3179 a libcall block. There are two reasons why it is safe to leave these
3180 insns inside the block: First, we know that these pseudos cannot be
3181 used in generated RTL outside the block since they are created for
3182 temporary purposes within the block. Second, CSE will not record the
3183 values of anything set inside a libcall block, so we know they must
3184 be dead at the end of the block.
3186 Except for the first group of insns (the ones setting pseudos), the
3187 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3190 emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv)
3192 rtx final_dest = target;
3193 rtx prev, next, first, last, insn;
3195 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3196 into a MEM later. Protect the libcall block from this change. */
3197 if (! REG_P (target) || REG_USERVAR_P (target))
3198 target = gen_reg_rtx (GET_MODE (target));
3200 /* If we're using non-call exceptions, a libcall corresponding to an
3201 operation that may trap may also trap. */
3202 if (flag_non_call_exceptions && may_trap_p (equiv))
3204 for (insn = insns; insn; insn = NEXT_INSN (insn))
3207 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3209 if (note != 0 && INTVAL (XEXP (note, 0)) <= 0)
3210 remove_note (insn, note);
3214 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3215 reg note to indicate that this call cannot throw or execute a nonlocal
3216 goto (unless there is already a REG_EH_REGION note, in which case
3218 for (insn = insns; insn; insn = NEXT_INSN (insn))
3221 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3224 XEXP (note, 0) = constm1_rtx;
3226 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx,
3230 /* First emit all insns that set pseudos. Remove them from the list as
3231 we go. Avoid insns that set pseudos which were referenced in previous
3232 insns. These can be generated by move_by_pieces, for example,
3233 to update an address. Similarly, avoid insns that reference things
3234 set in previous insns. */
3236 for (insn = insns; insn; insn = next)
3238 rtx set = single_set (insn);
3241 /* Some ports (cris) create a libcall regions at their own. We must
3242 avoid any potential nesting of LIBCALLs. */
3243 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3244 remove_note (insn, note);
3245 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3246 remove_note (insn, note);
3248 next = NEXT_INSN (insn);
3250 if (set != 0 && REG_P (SET_DEST (set))
3251 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
3253 || ((! INSN_P(insns)
3254 || ! reg_mentioned_p (SET_DEST (set), PATTERN (insns)))
3255 && ! reg_used_between_p (SET_DEST (set), insns, insn)
3256 && ! modified_in_p (SET_SRC (set), insns)
3257 && ! modified_between_p (SET_SRC (set), insns, insn))))
3259 if (PREV_INSN (insn))
3260 NEXT_INSN (PREV_INSN (insn)) = next;
3265 PREV_INSN (next) = PREV_INSN (insn);
3270 /* Some ports use a loop to copy large arguments onto the stack.
3271 Don't move anything outside such a loop. */
3276 prev = get_last_insn ();
3278 /* Write the remaining insns followed by the final copy. */
3280 for (insn = insns; insn; insn = next)
3282 next = NEXT_INSN (insn);
3287 last = emit_move_insn (target, result);
3288 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3289 != CODE_FOR_nothing)
3290 set_unique_reg_note (last, REG_EQUAL, copy_rtx (equiv));
3293 /* Remove any existing REG_EQUAL note from "last", or else it will
3294 be mistaken for a note referring to the full contents of the
3295 libcall value when found together with the REG_RETVAL note added
3296 below. An existing note can come from an insn expansion at
3298 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3301 if (final_dest != target)
3302 emit_move_insn (final_dest, target);
3305 first = get_insns ();
3307 first = NEXT_INSN (prev);
3309 /* Encapsulate the block so it gets manipulated as a unit. */
3310 if (!flag_non_call_exceptions || !may_trap_p (equiv))
3312 /* We can't attach the REG_LIBCALL and REG_RETVAL notes
3313 when the encapsulated region would not be in one basic block,
3314 i.e. when there is a control_flow_insn_p insn between FIRST and LAST.
3316 bool attach_libcall_retval_notes = true;
3317 next = NEXT_INSN (last);
3318 for (insn = first; insn != next; insn = NEXT_INSN (insn))
3319 if (control_flow_insn_p (insn))
3321 attach_libcall_retval_notes = false;
3325 if (attach_libcall_retval_notes)
3327 REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
3329 REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first,
3335 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3336 PURPOSE describes how this comparison will be used. CODE is the rtx
3337 comparison code we will be using.
3339 ??? Actually, CODE is slightly weaker than that. A target is still
3340 required to implement all of the normal bcc operations, but not
3341 required to implement all (or any) of the unordered bcc operations. */
3344 can_compare_p (enum rtx_code code, enum machine_mode mode,
3345 enum can_compare_purpose purpose)
3349 if (cmp_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3351 if (purpose == ccp_jump)
3352 return bcc_gen_fctn[(int) code] != NULL;
3353 else if (purpose == ccp_store_flag)
3354 return setcc_gen_code[(int) code] != CODE_FOR_nothing;
3356 /* There's only one cmov entry point, and it's allowed to fail. */
3359 if (purpose == ccp_jump
3360 && cbranch_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3362 if (purpose == ccp_cmov
3363 && cmov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3365 if (purpose == ccp_store_flag
3366 && cstore_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3368 mode = GET_MODE_WIDER_MODE (mode);
3370 while (mode != VOIDmode);
3375 /* This function is called when we are going to emit a compare instruction that
3376 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3378 *PMODE is the mode of the inputs (in case they are const_int).
3379 *PUNSIGNEDP nonzero says that the operands are unsigned;
3380 this matters if they need to be widened.
3382 If they have mode BLKmode, then SIZE specifies the size of both operands.
3384 This function performs all the setup necessary so that the caller only has
3385 to emit a single comparison insn. This setup can involve doing a BLKmode
3386 comparison or emitting a library call to perform the comparison if no insn
3387 is available to handle it.
3388 The values which are passed in through pointers can be modified; the caller
3389 should perform the comparison on the modified values. Constant
3390 comparisons must have already been folded. */
3393 prepare_cmp_insn (rtx *px, rtx *py, enum rtx_code *pcomparison, rtx size,
3394 enum machine_mode *pmode, int *punsignedp,
3395 enum can_compare_purpose purpose)
3397 enum machine_mode mode = *pmode;
3398 rtx x = *px, y = *py;
3399 int unsignedp = *punsignedp;
3400 enum mode_class class;
3402 class = GET_MODE_CLASS (mode);
3404 /* If we are inside an appropriately-short loop and we are optimizing,
3405 force expensive constants into a register. */
3406 if (CONSTANT_P (x) && optimize
3407 && rtx_cost (x, COMPARE) > COSTS_N_INSNS (1))
3408 x = force_reg (mode, x);
3410 if (CONSTANT_P (y) && optimize
3411 && rtx_cost (y, COMPARE) > COSTS_N_INSNS (1))
3412 y = force_reg (mode, y);
3415 /* Make sure if we have a canonical comparison. The RTL
3416 documentation states that canonical comparisons are required only
3417 for targets which have cc0. */
3418 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
3421 /* Don't let both operands fail to indicate the mode. */
3422 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
3423 x = force_reg (mode, x);
3425 /* Handle all BLKmode compares. */
3427 if (mode == BLKmode)
3429 enum machine_mode cmp_mode, result_mode;
3430 enum insn_code cmp_code;
3435 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
3439 /* Try to use a memory block compare insn - either cmpstr
3440 or cmpmem will do. */
3441 for (cmp_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
3442 cmp_mode != VOIDmode;
3443 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode))
3445 cmp_code = cmpmem_optab[cmp_mode];
3446 if (cmp_code == CODE_FOR_nothing)
3447 cmp_code = cmpstr_optab[cmp_mode];
3448 if (cmp_code == CODE_FOR_nothing)
3449 cmp_code = cmpstrn_optab[cmp_mode];
3450 if (cmp_code == CODE_FOR_nothing)
3453 /* Must make sure the size fits the insn's mode. */
3454 if ((GET_CODE (size) == CONST_INT
3455 && INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode)))
3456 || (GET_MODE_BITSIZE (GET_MODE (size))
3457 > GET_MODE_BITSIZE (cmp_mode)))
3460 result_mode = insn_data[cmp_code].operand[0].mode;
3461 result = gen_reg_rtx (result_mode);
3462 size = convert_to_mode (cmp_mode, size, 1);
3463 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
3467 *pmode = result_mode;
3471 /* Otherwise call a library function, memcmp. */
3472 libfunc = memcmp_libfunc;
3473 length_type = sizetype;
3474 result_mode = TYPE_MODE (integer_type_node);
3475 cmp_mode = TYPE_MODE (length_type);
3476 size = convert_to_mode (TYPE_MODE (length_type), size,
3477 TYPE_UNSIGNED (length_type));
3479 result = emit_library_call_value (libfunc, 0, LCT_PURE_MAKE_BLOCK,
3486 *pmode = result_mode;
3490 /* Don't allow operands to the compare to trap, as that can put the
3491 compare and branch in different basic blocks. */
3492 if (flag_non_call_exceptions)
3495 x = force_reg (mode, x);
3497 y = force_reg (mode, y);
3502 if (can_compare_p (*pcomparison, mode, purpose))
3505 /* Handle a lib call just for the mode we are using. */
3507 if (cmp_optab->handlers[(int) mode].libfunc && class != MODE_FLOAT)
3509 rtx libfunc = cmp_optab->handlers[(int) mode].libfunc;
3512 /* If we want unsigned, and this mode has a distinct unsigned
3513 comparison routine, use that. */
3514 if (unsignedp && ucmp_optab->handlers[(int) mode].libfunc)
3515 libfunc = ucmp_optab->handlers[(int) mode].libfunc;
3517 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST_MAKE_BLOCK,
3518 word_mode, 2, x, mode, y, mode);
3522 if (TARGET_LIB_INT_CMP_BIASED)
3523 /* Integer comparison returns a result that must be compared
3524 against 1, so that even if we do an unsigned compare
3525 afterward, there is still a value that can represent the
3526 result "less than". */
3536 gcc_assert (class == MODE_FLOAT);
3537 prepare_float_lib_cmp (px, py, pcomparison, pmode, punsignedp);
3540 /* Before emitting an insn with code ICODE, make sure that X, which is going
3541 to be used for operand OPNUM of the insn, is converted from mode MODE to
3542 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3543 that it is accepted by the operand predicate. Return the new value. */
3546 prepare_operand (int icode, rtx x, int opnum, enum machine_mode mode,
3547 enum machine_mode wider_mode, int unsignedp)
3549 if (mode != wider_mode)
3550 x = convert_modes (wider_mode, mode, x, unsignedp);
3552 if (!insn_data[icode].operand[opnum].predicate
3553 (x, insn_data[icode].operand[opnum].mode))
3557 x = copy_to_mode_reg (insn_data[icode].operand[opnum].mode, x);
3563 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3564 we can do the comparison.
3565 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3566 be NULL_RTX which indicates that only a comparison is to be generated. */
3569 emit_cmp_and_jump_insn_1 (rtx x, rtx y, enum machine_mode mode,
3570 enum rtx_code comparison, int unsignedp, rtx label)
3572 rtx test = gen_rtx_fmt_ee (comparison, mode, x, y);
3573 enum mode_class class = GET_MODE_CLASS (mode);
3574 enum machine_mode wider_mode = mode;
3576 /* Try combined insns first. */
3579 enum insn_code icode;
3580 PUT_MODE (test, wider_mode);
3584 icode = cbranch_optab->handlers[(int) wider_mode].insn_code;
3586 if (icode != CODE_FOR_nothing
3587 && insn_data[icode].operand[0].predicate (test, wider_mode))
3589 x = prepare_operand (icode, x, 1, mode, wider_mode, unsignedp);
3590 y = prepare_operand (icode, y, 2, mode, wider_mode, unsignedp);
3591 emit_jump_insn (GEN_FCN (icode) (test, x, y, label));
3596 /* Handle some compares against zero. */
3597 icode = (int) tst_optab->handlers[(int) wider_mode].insn_code;
3598 if (y == CONST0_RTX (mode) && icode != CODE_FOR_nothing)
3600 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
3601 emit_insn (GEN_FCN (icode) (x));
3603 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
3607 /* Handle compares for which there is a directly suitable insn. */
3609 icode = (int) cmp_optab->handlers[(int) wider_mode].insn_code;
3610 if (icode != CODE_FOR_nothing)
3612 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
3613 y = prepare_operand (icode, y, 1, mode, wider_mode, unsignedp);
3614 emit_insn (GEN_FCN (icode) (x, y));
3616 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
3620 if (class != MODE_INT && class != MODE_FLOAT
3621 && class != MODE_COMPLEX_FLOAT)
3624 wider_mode = GET_MODE_WIDER_MODE (wider_mode);
3626 while (wider_mode != VOIDmode);
3631 /* Generate code to compare X with Y so that the condition codes are
3632 set and to jump to LABEL if the condition is true. If X is a
3633 constant and Y is not a constant, then the comparison is swapped to
3634 ensure that the comparison RTL has the canonical form.
3636 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3637 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
3638 the proper branch condition code.
3640 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
3642 MODE is the mode of the inputs (in case they are const_int).
3644 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
3645 be passed unchanged to emit_cmp_insn, then potentially converted into an
3646 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
3649 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
3650 enum machine_mode mode, int unsignedp, rtx label)
3652 rtx op0 = x, op1 = y;
3654 /* Swap operands and condition to ensure canonical RTL. */
3655 if (swap_commutative_operands_p (x, y))
3657 /* If we're not emitting a branch, this means some caller
3662 comparison = swap_condition (comparison);
3666 /* If OP0 is still a constant, then both X and Y must be constants.
3667 Force X into a register to create canonical RTL. */
3668 if (CONSTANT_P (op0))
3669 op0 = force_reg (mode, op0);
3673 comparison = unsigned_condition (comparison);
3675 prepare_cmp_insn (&op0, &op1, &comparison, size, &mode, &unsignedp,
3677 emit_cmp_and_jump_insn_1 (op0, op1, mode, comparison, unsignedp, label);
3680 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
3683 emit_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
3684 enum machine_mode mode, int unsignedp)
3686 emit_cmp_and_jump_insns (x, y, comparison, size, mode, unsignedp, 0);
3689 /* Emit a library call comparison between floating point X and Y.
3690 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
3693 prepare_float_lib_cmp (rtx *px, rtx *py, enum rtx_code *pcomparison,
3694 enum machine_mode *pmode, int *punsignedp)
3696 enum rtx_code comparison = *pcomparison;
3697 enum rtx_code swapped = swap_condition (comparison);
3698 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
3701 enum machine_mode orig_mode = GET_MODE (x);
3702 enum machine_mode mode;
3703 rtx value, target, insns, equiv;
3705 bool reversed_p = false;
3707 for (mode = orig_mode; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
3709 if ((libfunc = code_to_optab[comparison]->handlers[mode].libfunc))
3712 if ((libfunc = code_to_optab[swapped]->handlers[mode].libfunc))
3715 tmp = x; x = y; y = tmp;
3716 comparison = swapped;
3720 if ((libfunc = code_to_optab[reversed]->handlers[mode].libfunc)
3721 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, reversed))
3723 comparison = reversed;
3729 gcc_assert (mode != VOIDmode);
3731 if (mode != orig_mode)
3733 x = convert_to_mode (mode, x, 0);
3734 y = convert_to_mode (mode, y, 0);
3737 /* Attach a REG_EQUAL note describing the semantics of the libcall to
3738 the RTL. The allows the RTL optimizers to delete the libcall if the
3739 condition can be determined at compile-time. */
3740 if (comparison == UNORDERED)
3742 rtx temp = simplify_gen_relational (NE, word_mode, mode, x, x);
3743 equiv = simplify_gen_relational (NE, word_mode, mode, y, y);
3744 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
3745 temp, const_true_rtx, equiv);
3749 equiv = simplify_gen_relational (comparison, word_mode, mode, x, y);
3750 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
3752 rtx true_rtx, false_rtx;
3757 true_rtx = const0_rtx;
3758 false_rtx = const_true_rtx;
3762 true_rtx = const_true_rtx;
3763 false_rtx = const0_rtx;
3767 true_rtx = const1_rtx;
3768 false_rtx = const0_rtx;
3772 true_rtx = const0_rtx;
3773 false_rtx = constm1_rtx;
3777 true_rtx = constm1_rtx;
3778 false_rtx = const0_rtx;
3782 true_rtx = const0_rtx;
3783 false_rtx = const1_rtx;
3789 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
3790 equiv, true_rtx, false_rtx);
3795 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
3796 word_mode, 2, x, mode, y, mode);
3797 insns = get_insns ();
3800 target = gen_reg_rtx (word_mode);
3801 emit_libcall_block (insns, target, value, equiv);
3803 if (comparison == UNORDERED
3804 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
3805 comparison = reversed_p ? EQ : NE;
3810 *pcomparison = comparison;
3814 /* Generate code to indirectly jump to a location given in the rtx LOC. */
3817 emit_indirect_jump (rtx loc)
3819 if (!insn_data[(int) CODE_FOR_indirect_jump].operand[0].predicate
3821 loc = copy_to_mode_reg (Pmode, loc);
3823 emit_jump_insn (gen_indirect_jump (loc));
3827 #ifdef HAVE_conditional_move
3829 /* Emit a conditional move instruction if the machine supports one for that
3830 condition and machine mode.
3832 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3833 the mode to use should they be constants. If it is VOIDmode, they cannot
3836 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
3837 should be stored there. MODE is the mode to use should they be constants.
3838 If it is VOIDmode, they cannot both be constants.
3840 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3841 is not supported. */
3844 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
3845 enum machine_mode cmode, rtx op2, rtx op3,
3846 enum machine_mode mode, int unsignedp)
3848 rtx tem, subtarget, comparison, insn;
3849 enum insn_code icode;
3850 enum rtx_code reversed;
3852 /* If one operand is constant, make it the second one. Only do this
3853 if the other operand is not constant as well. */
3855 if (swap_commutative_operands_p (op0, op1))
3860 code = swap_condition (code);
3863 /* get_condition will prefer to generate LT and GT even if the old
3864 comparison was against zero, so undo that canonicalization here since
3865 comparisons against zero are cheaper. */
3866 if (code == LT && op1 == const1_rtx)
3867 code = LE, op1 = const0_rtx;
3868 else if (code == GT && op1 == constm1_rtx)
3869 code = GE, op1 = const0_rtx;
3871 if (cmode == VOIDmode)
3872 cmode = GET_MODE (op0);
3874 if (swap_commutative_operands_p (op2, op3)
3875 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
3884 if (mode == VOIDmode)
3885 mode = GET_MODE (op2);
3887 icode = movcc_gen_code[mode];
3889 if (icode == CODE_FOR_nothing)
3893 target = gen_reg_rtx (mode);
3897 /* If the insn doesn't accept these operands, put them in pseudos. */
3899 if (!insn_data[icode].operand[0].predicate
3900 (subtarget, insn_data[icode].operand[0].mode))
3901 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
3903 if (!insn_data[icode].operand[2].predicate
3904 (op2, insn_data[icode].operand[2].mode))
3905 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
3907 if (!insn_data[icode].operand[3].predicate
3908 (op3, insn_data[icode].operand[3].mode))
3909 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
3911 /* Everything should now be in the suitable form, so emit the compare insn
3912 and then the conditional move. */
3915 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
3917 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
3918 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
3919 return NULL and let the caller figure out how best to deal with this
3921 if (GET_CODE (comparison) != code)
3924 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
3926 /* If that failed, then give up. */
3932 if (subtarget != target)
3933 convert_move (target, subtarget, 0);
3938 /* Return nonzero if a conditional move of mode MODE is supported.
3940 This function is for combine so it can tell whether an insn that looks
3941 like a conditional move is actually supported by the hardware. If we
3942 guess wrong we lose a bit on optimization, but that's it. */
3943 /* ??? sparc64 supports conditionally moving integers values based on fp
3944 comparisons, and vice versa. How do we handle them? */
3947 can_conditionally_move_p (enum machine_mode mode)
3949 if (movcc_gen_code[mode] != CODE_FOR_nothing)
3955 #endif /* HAVE_conditional_move */
3957 /* Emit a conditional addition instruction if the machine supports one for that
3958 condition and machine mode.
3960 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3961 the mode to use should they be constants. If it is VOIDmode, they cannot
3964 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
3965 should be stored there. MODE is the mode to use should they be constants.
3966 If it is VOIDmode, they cannot both be constants.
3968 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3969 is not supported. */
3972 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
3973 enum machine_mode cmode, rtx op2, rtx op3,
3974 enum machine_mode mode, int unsignedp)
3976 rtx tem, subtarget, comparison, insn;
3977 enum insn_code icode;
3978 enum rtx_code reversed;
3980 /* If one operand is constant, make it the second one. Only do this
3981 if the other operand is not constant as well. */
3983 if (swap_commutative_operands_p (op0, op1))
3988 code = swap_condition (code);
3991 /* get_condition will prefer to generate LT and GT even if the old
3992 comparison was against zero, so undo that canonicalization here since
3993 comparisons against zero are cheaper. */
3994 if (code == LT && op1 == const1_rtx)
3995 code = LE, op1 = const0_rtx;
3996 else if (code == GT && op1 == constm1_rtx)
3997 code = GE, op1 = const0_rtx;
3999 if (cmode == VOIDmode)
4000 cmode = GET_MODE (op0);
4002 if (swap_commutative_operands_p (op2, op3)
4003 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4012 if (mode == VOIDmode)
4013 mode = GET_MODE (op2);
4015 icode = addcc_optab->handlers[(int) mode].insn_code;
4017 if (icode == CODE_FOR_nothing)
4021 target = gen_reg_rtx (mode);
4023 /* If the insn doesn't accept these operands, put them in pseudos. */
4025 if (!insn_data[icode].operand[0].predicate
4026 (target, insn_data[icode].operand[0].mode))
4027 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4031 if (!insn_data[icode].operand[2].predicate
4032 (op2, insn_data[icode].operand[2].mode))
4033 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4035 if (!insn_data[icode].operand[3].predicate
4036 (op3, insn_data[icode].operand[3].mode))
4037 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4039 /* Everything should now be in the suitable form, so emit the compare insn
4040 and then the conditional move. */
4043 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4045 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4046 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4047 return NULL and let the caller figure out how best to deal with this
4049 if (GET_CODE (comparison) != code)
4052 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4054 /* If that failed, then give up. */
4060 if (subtarget != target)
4061 convert_move (target, subtarget, 0);
4066 /* These functions attempt to generate an insn body, rather than
4067 emitting the insn, but if the gen function already emits them, we
4068 make no attempt to turn them back into naked patterns. */
4070 /* Generate and return an insn body to add Y to X. */
4073 gen_add2_insn (rtx x, rtx y)
4075 int icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4077 gcc_assert (insn_data[icode].operand[0].predicate
4078 (x, insn_data[icode].operand[0].mode));
4079 gcc_assert (insn_data[icode].operand[1].predicate
4080 (x, insn_data[icode].operand[1].mode));
4081 gcc_assert (insn_data[icode].operand[2].predicate
4082 (y, insn_data[icode].operand[2].mode));
4084 return GEN_FCN (icode) (x, x, y);
4087 /* Generate and return an insn body to add r1 and c,
4088 storing the result in r0. */
4090 gen_add3_insn (rtx r0, rtx r1, rtx c)
4092 int icode = (int) add_optab->handlers[(int) GET_MODE (r0)].insn_code;
4094 if (icode == CODE_FOR_nothing
4095 || !(insn_data[icode].operand[0].predicate
4096 (r0, insn_data[icode].operand[0].mode))
4097 || !(insn_data[icode].operand[1].predicate
4098 (r1, insn_data[icode].operand[1].mode))
4099 || !(insn_data[icode].operand[2].predicate
4100 (c, insn_data[icode].operand[2].mode)))
4103 return GEN_FCN (icode) (r0, r1, c);
4107 have_add2_insn (rtx x, rtx y)
4111 gcc_assert (GET_MODE (x) != VOIDmode);
4113 icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4115 if (icode == CODE_FOR_nothing)
4118 if (!(insn_data[icode].operand[0].predicate
4119 (x, insn_data[icode].operand[0].mode))
4120 || !(insn_data[icode].operand[1].predicate
4121 (x, insn_data[icode].operand[1].mode))
4122 || !(insn_data[icode].operand[2].predicate
4123 (y, insn_data[icode].operand[2].mode)))
4129 /* Generate and return an insn body to subtract Y from X. */
4132 gen_sub2_insn (rtx x, rtx y)
4134 int icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4136 gcc_assert (insn_data[icode].operand[0].predicate
4137 (x, insn_data[icode].operand[0].mode));
4138 gcc_assert (insn_data[icode].operand[1].predicate
4139 (x, insn_data[icode].operand[1].mode));
4140 gcc_assert (insn_data[icode].operand[2].predicate
4141 (y, insn_data[icode].operand[2].mode));
4143 return GEN_FCN (icode) (x, x, y);
4146 /* Generate and return an insn body to subtract r1 and c,
4147 storing the result in r0. */
4149 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4151 int icode = (int) sub_optab->handlers[(int) GET_MODE (r0)].insn_code;
4153 if (icode == CODE_FOR_nothing
4154 || !(insn_data[icode].operand[0].predicate
4155 (r0, insn_data[icode].operand[0].mode))
4156 || !(insn_data[icode].operand[1].predicate
4157 (r1, insn_data[icode].operand[1].mode))
4158 || !(insn_data[icode].operand[2].predicate
4159 (c, insn_data[icode].operand[2].mode)))
4162 return GEN_FCN (icode) (r0, r1, c);
4166 have_sub2_insn (rtx x, rtx y)
4170 gcc_assert (GET_MODE (x) != VOIDmode);
4172 icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4174 if (icode == CODE_FOR_nothing)
4177 if (!(insn_data[icode].operand[0].predicate
4178 (x, insn_data[icode].operand[0].mode))
4179 || !(insn_data[icode].operand[1].predicate
4180 (x, insn_data[icode].operand[1].mode))
4181 || !(insn_data[icode].operand[2].predicate
4182 (y, insn_data[icode].operand[2].mode)))
4188 /* Generate the body of an instruction to copy Y into X.
4189 It may be a list of insns, if one insn isn't enough. */
4192 gen_move_insn (rtx x, rtx y)
4197 emit_move_insn_1 (x, y);
4203 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4204 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4205 no such operation exists, CODE_FOR_nothing will be returned. */
4208 can_extend_p (enum machine_mode to_mode, enum machine_mode from_mode,
4212 #ifdef HAVE_ptr_extend
4214 return CODE_FOR_ptr_extend;
4217 tab = unsignedp ? zext_optab : sext_optab;
4218 return tab->handlers[to_mode][from_mode].insn_code;
4221 /* Generate the body of an insn to extend Y (with mode MFROM)
4222 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4225 gen_extend_insn (rtx x, rtx y, enum machine_mode mto,
4226 enum machine_mode mfrom, int unsignedp)
4228 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
4229 return GEN_FCN (icode) (x, y);
4232 /* can_fix_p and can_float_p say whether the target machine
4233 can directly convert a given fixed point type to
4234 a given floating point type, or vice versa.
4235 The returned value is the CODE_FOR_... value to use,
4236 or CODE_FOR_nothing if these modes cannot be directly converted.
4238 *TRUNCP_PTR is set to 1 if it is necessary to output
4239 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4241 static enum insn_code
4242 can_fix_p (enum machine_mode fixmode, enum machine_mode fltmode,
4243 int unsignedp, int *truncp_ptr)
4246 enum insn_code icode;
4248 tab = unsignedp ? ufixtrunc_optab : sfixtrunc_optab;
4249 icode = tab->handlers[fixmode][fltmode].insn_code;
4250 if (icode != CODE_FOR_nothing)
4256 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4257 for this to work. We need to rework the fix* and ftrunc* patterns
4258 and documentation. */
4259 tab = unsignedp ? ufix_optab : sfix_optab;
4260 icode = tab->handlers[fixmode][fltmode].insn_code;
4261 if (icode != CODE_FOR_nothing
4262 && ftrunc_optab->handlers[fltmode].insn_code != CODE_FOR_nothing)
4269 return CODE_FOR_nothing;
4272 static enum insn_code
4273 can_float_p (enum machine_mode fltmode, enum machine_mode fixmode,
4278 tab = unsignedp ? ufloat_optab : sfloat_optab;
4279 return tab->handlers[fltmode][fixmode].insn_code;
4282 /* Generate code to convert FROM to floating point
4283 and store in TO. FROM must be fixed point and not VOIDmode.
4284 UNSIGNEDP nonzero means regard FROM as unsigned.
4285 Normally this is done by correcting the final value
4286 if it is negative. */
4289 expand_float (rtx to, rtx from, int unsignedp)
4291 enum insn_code icode;
4293 enum machine_mode fmode, imode;
4295 /* Crash now, because we won't be able to decide which mode to use. */
4296 gcc_assert (GET_MODE (from) != VOIDmode);
4298 /* Look for an insn to do the conversion. Do it in the specified
4299 modes if possible; otherwise convert either input, output or both to
4300 wider mode. If the integer mode is wider than the mode of FROM,
4301 we can do the conversion signed even if the input is unsigned. */
4303 for (fmode = GET_MODE (to); fmode != VOIDmode;
4304 fmode = GET_MODE_WIDER_MODE (fmode))
4305 for (imode = GET_MODE (from); imode != VOIDmode;
4306 imode = GET_MODE_WIDER_MODE (imode))
4308 int doing_unsigned = unsignedp;
4310 if (fmode != GET_MODE (to)
4311 && significand_size (fmode) < GET_MODE_BITSIZE (GET_MODE (from)))
4314 icode = can_float_p (fmode, imode, unsignedp);
4315 if (icode == CODE_FOR_nothing && imode != GET_MODE (from) && unsignedp)
4316 icode = can_float_p (fmode, imode, 0), doing_unsigned = 0;
4318 if (icode != CODE_FOR_nothing)
4320 if (imode != GET_MODE (from))
4321 from = convert_to_mode (imode, from, unsignedp);
4323 if (fmode != GET_MODE (to))
4324 target = gen_reg_rtx (fmode);
4326 emit_unop_insn (icode, target, from,
4327 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
4330 convert_move (to, target, 0);
4335 /* Unsigned integer, and no way to convert directly.
4336 Convert as signed, then conditionally adjust the result. */
4339 rtx label = gen_label_rtx ();
4341 REAL_VALUE_TYPE offset;
4343 /* Look for a usable floating mode FMODE wider than the source and at
4344 least as wide as the target. Using FMODE will avoid rounding woes
4345 with unsigned values greater than the signed maximum value. */
4347 for (fmode = GET_MODE (to); fmode != VOIDmode;
4348 fmode = GET_MODE_WIDER_MODE (fmode))
4349 if (GET_MODE_BITSIZE (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
4350 && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
4353 if (fmode == VOIDmode)
4355 /* There is no such mode. Pretend the target is wide enough. */
4356 fmode = GET_MODE (to);
4358 /* Avoid double-rounding when TO is narrower than FROM. */
4359 if ((significand_size (fmode) + 1)
4360 < GET_MODE_BITSIZE (GET_MODE (from)))
4363 rtx neglabel = gen_label_rtx ();
4365 /* Don't use TARGET if it isn't a register, is a hard register,
4366 or is the wrong mode. */
4368 || REGNO (target) < FIRST_PSEUDO_REGISTER
4369 || GET_MODE (target) != fmode)
4370 target = gen_reg_rtx (fmode);
4372 imode = GET_MODE (from);
4373 do_pending_stack_adjust ();
4375 /* Test whether the sign bit is set. */
4376 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
4379 /* The sign bit is not set. Convert as signed. */
4380 expand_float (target, from, 0);
4381 emit_jump_insn (gen_jump (label));
4384 /* The sign bit is set.
4385 Convert to a usable (positive signed) value by shifting right
4386 one bit, while remembering if a nonzero bit was shifted
4387 out; i.e., compute (from & 1) | (from >> 1). */
4389 emit_label (neglabel);
4390 temp = expand_binop (imode, and_optab, from, const1_rtx,
4391 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4392 temp1 = expand_shift (RSHIFT_EXPR, imode, from, integer_one_node,
4394 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
4396 expand_float (target, temp, 0);
4398 /* Multiply by 2 to undo the shift above. */
4399 temp = expand_binop (fmode, add_optab, target, target,
4400 target, 0, OPTAB_LIB_WIDEN);
4402 emit_move_insn (target, temp);
4404 do_pending_stack_adjust ();
4410 /* If we are about to do some arithmetic to correct for an
4411 unsigned operand, do it in a pseudo-register. */
4413 if (GET_MODE (to) != fmode
4414 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
4415 target = gen_reg_rtx (fmode);
4417 /* Convert as signed integer to floating. */
4418 expand_float (target, from, 0);
4420 /* If FROM is negative (and therefore TO is negative),
4421 correct its value by 2**bitwidth. */
4423 do_pending_stack_adjust ();
4424 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from),
4428 real_2expN (&offset, GET_MODE_BITSIZE (GET_MODE (from)));
4429 temp = expand_binop (fmode, add_optab, target,
4430 CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode),
4431 target, 0, OPTAB_LIB_WIDEN);
4433 emit_move_insn (target, temp);
4435 do_pending_stack_adjust ();
4440 /* No hardware instruction available; call a library routine. */
4445 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
4447 if (GET_MODE_SIZE (GET_MODE (from)) < GET_MODE_SIZE (SImode))
4448 from = convert_to_mode (SImode, from, unsignedp);
4450 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
4451 gcc_assert (libfunc);
4455 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4456 GET_MODE (to), 1, from,
4458 insns = get_insns ();
4461 emit_libcall_block (insns, target, value,
4462 gen_rtx_FLOAT (GET_MODE (to), from));
4467 /* Copy result to requested destination
4468 if we have been computing in a temp location. */
4472 if (GET_MODE (target) == GET_MODE (to))
4473 emit_move_insn (to, target);
4475 convert_move (to, target, 0);
4479 /* Generate code to convert FROM to fixed point and store in TO. FROM
4480 must be floating point. */
4483 expand_fix (rtx to, rtx from, int unsignedp)
4485 enum insn_code icode;
4487 enum machine_mode fmode, imode;
4490 /* We first try to find a pair of modes, one real and one integer, at
4491 least as wide as FROM and TO, respectively, in which we can open-code
4492 this conversion. If the integer mode is wider than the mode of TO,
4493 we can do the conversion either signed or unsigned. */
4495 for (fmode = GET_MODE (from); fmode != VOIDmode;
4496 fmode = GET_MODE_WIDER_MODE (fmode))
4497 for (imode = GET_MODE (to); imode != VOIDmode;
4498 imode = GET_MODE_WIDER_MODE (imode))
4500 int doing_unsigned = unsignedp;
4502 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
4503 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
4504 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
4506 if (icode != CODE_FOR_nothing)
4508 if (fmode != GET_MODE (from))
4509 from = convert_to_mode (fmode, from, 0);
4513 rtx temp = gen_reg_rtx (GET_MODE (from));
4514 from = expand_unop (GET_MODE (from), ftrunc_optab, from,
4518 if (imode != GET_MODE (to))
4519 target = gen_reg_rtx (imode);
4521 emit_unop_insn (icode, target, from,
4522 doing_unsigned ? UNSIGNED_FIX : FIX);
4524 convert_move (to, target, unsignedp);
4529 /* For an unsigned conversion, there is one more way to do it.
4530 If we have a signed conversion, we generate code that compares
4531 the real value to the largest representable positive number. If if
4532 is smaller, the conversion is done normally. Otherwise, subtract
4533 one plus the highest signed number, convert, and add it back.
4535 We only need to check all real modes, since we know we didn't find
4536 anything with a wider integer mode.
4538 This code used to extend FP value into mode wider than the destination.
4539 This is not needed. Consider, for instance conversion from SFmode
4542 The hot path trought the code is dealing with inputs smaller than 2^63
4543 and doing just the conversion, so there is no bits to lose.
4545 In the other path we know the value is positive in the range 2^63..2^64-1
4546 inclusive. (as for other imput overflow happens and result is undefined)
4547 So we know that the most important bit set in mantissa corresponds to
4548 2^63. The subtraction of 2^63 should not generate any rounding as it
4549 simply clears out that bit. The rest is trivial. */
4551 if (unsignedp && GET_MODE_BITSIZE (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
4552 for (fmode = GET_MODE (from); fmode != VOIDmode;
4553 fmode = GET_MODE_WIDER_MODE (fmode))
4554 if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0,
4558 REAL_VALUE_TYPE offset;
4559 rtx limit, lab1, lab2, insn;
4561 bitsize = GET_MODE_BITSIZE (GET_MODE (to));
4562 real_2expN (&offset, bitsize - 1);
4563 limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode);
4564 lab1 = gen_label_rtx ();
4565 lab2 = gen_label_rtx ();
4567 if (fmode != GET_MODE (from))
4568 from = convert_to_mode (fmode, from, 0);
4570 /* See if we need to do the subtraction. */
4571 do_pending_stack_adjust ();
4572 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from),
4575 /* If not, do the signed "fix" and branch around fixup code. */
4576 expand_fix (to, from, 0);
4577 emit_jump_insn (gen_jump (lab2));
4580 /* Otherwise, subtract 2**(N-1), convert to signed number,
4581 then add 2**(N-1). Do the addition using XOR since this
4582 will often generate better code. */
4584 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
4585 NULL_RTX, 0, OPTAB_LIB_WIDEN);
4586 expand_fix (to, target, 0);
4587 target = expand_binop (GET_MODE (to), xor_optab, to,
4589 ((HOST_WIDE_INT) 1 << (bitsize - 1),
4591 to, 1, OPTAB_LIB_WIDEN);
4594 emit_move_insn (to, target);
4598 if (mov_optab->handlers[(int) GET_MODE (to)].insn_code
4599 != CODE_FOR_nothing)
4601 /* Make a place for a REG_NOTE and add it. */
4602 insn = emit_move_insn (to, to);
4603 set_unique_reg_note (insn,
4605 gen_rtx_fmt_e (UNSIGNED_FIX,
4613 /* We can't do it with an insn, so use a library call. But first ensure
4614 that the mode of TO is at least as wide as SImode, since those are the
4615 only library calls we know about. */
4617 if (GET_MODE_SIZE (GET_MODE (to)) < GET_MODE_SIZE (SImode))
4619 target = gen_reg_rtx (SImode);
4621 expand_fix (target, from, unsignedp);
4629 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
4630 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
4631 gcc_assert (libfunc);
4635 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4636 GET_MODE (to), 1, from,
4638 insns = get_insns ();
4641 emit_libcall_block (insns, target, value,
4642 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
4643 GET_MODE (to), from));
4648 if (GET_MODE (to) == GET_MODE (target))
4649 emit_move_insn (to, target);
4651 convert_move (to, target, 0);
4655 /* Report whether we have an instruction to perform the operation
4656 specified by CODE on operands of mode MODE. */
4658 have_insn_for (enum rtx_code code, enum machine_mode mode)
4660 return (code_to_optab[(int) code] != 0
4661 && (code_to_optab[(int) code]->handlers[(int) mode].insn_code
4662 != CODE_FOR_nothing));
4665 /* Create a blank optab. */
4670 optab op = ggc_alloc (sizeof (struct optab));
4671 for (i = 0; i < NUM_MACHINE_MODES; i++)
4673 op->handlers[i].insn_code = CODE_FOR_nothing;
4674 op->handlers[i].libfunc = 0;
4680 static convert_optab
4681 new_convert_optab (void)
4684 convert_optab op = ggc_alloc (sizeof (struct convert_optab));
4685 for (i = 0; i < NUM_MACHINE_MODES; i++)
4686 for (j = 0; j < NUM_MACHINE_MODES; j++)
4688 op->handlers[i][j].insn_code = CODE_FOR_nothing;
4689 op->handlers[i][j].libfunc = 0;
4694 /* Same, but fill in its code as CODE, and write it into the
4695 code_to_optab table. */
4697 init_optab (enum rtx_code code)
4699 optab op = new_optab ();
4701 code_to_optab[(int) code] = op;
4705 /* Same, but fill in its code as CODE, and do _not_ write it into
4706 the code_to_optab table. */
4708 init_optabv (enum rtx_code code)
4710 optab op = new_optab ();
4715 /* Conversion optabs never go in the code_to_optab table. */
4716 static inline convert_optab
4717 init_convert_optab (enum rtx_code code)
4719 convert_optab op = new_convert_optab ();
4724 /* Initialize the libfunc fields of an entire group of entries in some
4725 optab. Each entry is set equal to a string consisting of a leading
4726 pair of underscores followed by a generic operation name followed by
4727 a mode name (downshifted to lowercase) followed by a single character
4728 representing the number of operands for the given operation (which is
4729 usually one of the characters '2', '3', or '4').
4731 OPTABLE is the table in which libfunc fields are to be initialized.
4732 FIRST_MODE is the first machine mode index in the given optab to
4734 LAST_MODE is the last machine mode index in the given optab to
4736 OPNAME is the generic (string) name of the operation.
4737 SUFFIX is the character which specifies the number of operands for
4738 the given generic operation.
4742 init_libfuncs (optab optable, int first_mode, int last_mode,
4743 const char *opname, int suffix)
4746 unsigned opname_len = strlen (opname);
4748 for (mode = first_mode; (int) mode <= (int) last_mode;
4749 mode = (enum machine_mode) ((int) mode + 1))
4751 const char *mname = GET_MODE_NAME (mode);
4752 unsigned mname_len = strlen (mname);
4753 char *libfunc_name = alloca (2 + opname_len + mname_len + 1 + 1);
4760 for (q = opname; *q; )
4762 for (q = mname; *q; q++)
4763 *p++ = TOLOWER (*q);
4767 optable->handlers[(int) mode].libfunc
4768 = init_one_libfunc (ggc_alloc_string (libfunc_name, p - libfunc_name));
4772 /* Initialize the libfunc fields of an entire group of entries in some
4773 optab which correspond to all integer mode operations. The parameters
4774 have the same meaning as similarly named ones for the `init_libfuncs'
4775 routine. (See above). */
4778 init_integral_libfuncs (optab optable, const char *opname, int suffix)
4780 int maxsize = 2*BITS_PER_WORD;
4781 if (maxsize < LONG_LONG_TYPE_SIZE)
4782 maxsize = LONG_LONG_TYPE_SIZE;
4783 init_libfuncs (optable, word_mode,
4784 mode_for_size (maxsize, MODE_INT, 0),
4788 /* Initialize the libfunc fields of an entire group of entries in some
4789 optab which correspond to all real mode operations. The parameters
4790 have the same meaning as similarly named ones for the `init_libfuncs'
4791 routine. (See above). */
4794 init_floating_libfuncs (optab optable, const char *opname, int suffix)
4796 init_libfuncs (optable, MIN_MODE_FLOAT, MAX_MODE_FLOAT, opname, suffix);
4799 /* Initialize the libfunc fields of an entire group of entries of an
4800 inter-mode-class conversion optab. The string formation rules are
4801 similar to the ones for init_libfuncs, above, but instead of having
4802 a mode name and an operand count these functions have two mode names
4803 and no operand count. */
4805 init_interclass_conv_libfuncs (convert_optab tab, const char *opname,
4806 enum mode_class from_class,
4807 enum mode_class to_class)
4809 enum machine_mode first_from_mode = GET_CLASS_NARROWEST_MODE (from_class);
4810 enum machine_mode first_to_mode = GET_CLASS_NARROWEST_MODE (to_class);
4811 size_t opname_len = strlen (opname);
4812 size_t max_mname_len = 0;
4814 enum machine_mode fmode, tmode;
4815 const char *fname, *tname;
4817 char *libfunc_name, *suffix;
4820 for (fmode = first_from_mode;
4822 fmode = GET_MODE_WIDER_MODE (fmode))
4823 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (fmode)));
4825 for (tmode = first_to_mode;
4827 tmode = GET_MODE_WIDER_MODE (tmode))
4828 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (tmode)));
4830 libfunc_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
4831 libfunc_name[0] = '_';
4832 libfunc_name[1] = '_';
4833 memcpy (&libfunc_name[2], opname, opname_len);
4834 suffix = libfunc_name + opname_len + 2;
4836 for (fmode = first_from_mode; fmode != VOIDmode;
4837 fmode = GET_MODE_WIDER_MODE (fmode))
4838 for (tmode = first_to_mode; tmode != VOIDmode;
4839 tmode = GET_MODE_WIDER_MODE (tmode))
4841 fname = GET_MODE_NAME (fmode);
4842 tname = GET_MODE_NAME (tmode);
4845 for (q = fname; *q; p++, q++)
4847 for (q = tname; *q; p++, q++)
4852 tab->handlers[tmode][fmode].libfunc
4853 = init_one_libfunc (ggc_alloc_string (libfunc_name,
4858 /* Initialize the libfunc fields of an entire group of entries of an
4859 intra-mode-class conversion optab. The string formation rules are
4860 similar to the ones for init_libfunc, above. WIDENING says whether
4861 the optab goes from narrow to wide modes or vice versa. These functions
4862 have two mode names _and_ an operand count. */
4864 init_intraclass_conv_libfuncs (convert_optab tab, const char *opname,
4865 enum mode_class class, bool widening)
4867 enum machine_mode first_mode = GET_CLASS_NARROWEST_MODE (class);
4868 size_t opname_len = strlen (opname);
4869 size_t max_mname_len = 0;
4871 enum machine_mode nmode, wmode;
4872 const char *nname, *wname;
4874 char *libfunc_name, *suffix;
4877 for (nmode = first_mode; nmode != VOIDmode;
4878 nmode = GET_MODE_WIDER_MODE (nmode))
4879 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (nmode)));
4881 libfunc_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
4882 libfunc_name[0] = '_';
4883 libfunc_name[1] = '_';
4884 memcpy (&libfunc_name[2], opname, opname_len);
4885 suffix = libfunc_name + opname_len + 2;
4887 for (nmode = first_mode; nmode != VOIDmode;
4888 nmode = GET_MODE_WIDER_MODE (nmode))
4889 for (wmode = GET_MODE_WIDER_MODE (nmode); wmode != VOIDmode;
4890 wmode = GET_MODE_WIDER_MODE (wmode))
4892 nname = GET_MODE_NAME (nmode);
4893 wname = GET_MODE_NAME (wmode);
4896 for (q = widening ? nname : wname; *q; p++, q++)
4898 for (q = widening ? wname : nname; *q; p++, q++)
4904 tab->handlers[widening ? wmode : nmode]
4905 [widening ? nmode : wmode].libfunc
4906 = init_one_libfunc (ggc_alloc_string (libfunc_name,
4913 init_one_libfunc (const char *name)
4917 /* Create a FUNCTION_DECL that can be passed to
4918 targetm.encode_section_info. */
4919 /* ??? We don't have any type information except for this is
4920 a function. Pretend this is "int foo()". */
4921 tree decl = build_decl (FUNCTION_DECL, get_identifier (name),
4922 build_function_type (integer_type_node, NULL_TREE));
4923 DECL_ARTIFICIAL (decl) = 1;
4924 DECL_EXTERNAL (decl) = 1;
4925 TREE_PUBLIC (decl) = 1;
4927 symbol = XEXP (DECL_RTL (decl), 0);
4929 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
4930 are the flags assigned by targetm.encode_section_info. */
4931 SYMBOL_REF_DECL (symbol) = 0;
4936 /* Call this to reset the function entry for one optab (OPTABLE) in mode
4937 MODE to NAME, which should be either 0 or a string constant. */
4939 set_optab_libfunc (optab optable, enum machine_mode mode, const char *name)
4942 optable->handlers[mode].libfunc = init_one_libfunc (name);
4944 optable->handlers[mode].libfunc = 0;
4947 /* Call this to reset the function entry for one conversion optab
4948 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
4949 either 0 or a string constant. */
4951 set_conv_libfunc (convert_optab optable, enum machine_mode tmode,
4952 enum machine_mode fmode, const char *name)
4955 optable->handlers[tmode][fmode].libfunc = init_one_libfunc (name);
4957 optable->handlers[tmode][fmode].libfunc = 0;
4960 /* Call this once to initialize the contents of the optabs
4961 appropriately for the current target machine. */
4968 /* Start by initializing all tables to contain CODE_FOR_nothing. */
4970 for (i = 0; i < NUM_RTX_CODE; i++)
4971 setcc_gen_code[i] = CODE_FOR_nothing;
4973 #ifdef HAVE_conditional_move
4974 for (i = 0; i < NUM_MACHINE_MODES; i++)
4975 movcc_gen_code[i] = CODE_FOR_nothing;
4978 for (i = 0; i < NUM_MACHINE_MODES; i++)
4980 vcond_gen_code[i] = CODE_FOR_nothing;
4981 vcondu_gen_code[i] = CODE_FOR_nothing;
4984 add_optab = init_optab (PLUS);
4985 addv_optab = init_optabv (PLUS);
4986 sub_optab = init_optab (MINUS);
4987 subv_optab = init_optabv (MINUS);
4988 smul_optab = init_optab (MULT);
4989 smulv_optab = init_optabv (MULT);
4990 smul_highpart_optab = init_optab (UNKNOWN);
4991 umul_highpart_optab = init_optab (UNKNOWN);
4992 smul_widen_optab = init_optab (UNKNOWN);
4993 umul_widen_optab = init_optab (UNKNOWN);
4994 sdiv_optab = init_optab (DIV);
4995 sdivv_optab = init_optabv (DIV);
4996 sdivmod_optab = init_optab (UNKNOWN);
4997 udiv_optab = init_optab (UDIV);
4998 udivmod_optab = init_optab (UNKNOWN);
4999 smod_optab = init_optab (MOD);
5000 umod_optab = init_optab (UMOD);
5001 fmod_optab = init_optab (UNKNOWN);
5002 drem_optab = init_optab (UNKNOWN);
5003 ftrunc_optab = init_optab (UNKNOWN);
5004 and_optab = init_optab (AND);
5005 ior_optab = init_optab (IOR);
5006 xor_optab = init_optab (XOR);
5007 ashl_optab = init_optab (ASHIFT);
5008 ashr_optab = init_optab (ASHIFTRT);
5009 lshr_optab = init_optab (LSHIFTRT);
5010 rotl_optab = init_optab (ROTATE);
5011 rotr_optab = init_optab (ROTATERT);
5012 smin_optab = init_optab (SMIN);
5013 smax_optab = init_optab (SMAX);
5014 umin_optab = init_optab (UMIN);
5015 umax_optab = init_optab (UMAX);
5016 pow_optab = init_optab (UNKNOWN);
5017 atan2_optab = init_optab (UNKNOWN);
5019 /* These three have codes assigned exclusively for the sake of
5021 mov_optab = init_optab (SET);
5022 movstrict_optab = init_optab (STRICT_LOW_PART);
5023 cmp_optab = init_optab (COMPARE);
5025 ucmp_optab = init_optab (UNKNOWN);
5026 tst_optab = init_optab (UNKNOWN);
5028 eq_optab = init_optab (EQ);
5029 ne_optab = init_optab (NE);
5030 gt_optab = init_optab (GT);
5031 ge_optab = init_optab (GE);
5032 lt_optab = init_optab (LT);
5033 le_optab = init_optab (LE);
5034 unord_optab = init_optab (UNORDERED);
5036 neg_optab = init_optab (NEG);
5037 negv_optab = init_optabv (NEG);
5038 abs_optab = init_optab (ABS);
5039 absv_optab = init_optabv (ABS);
5040 addcc_optab = init_optab (UNKNOWN);
5041 one_cmpl_optab = init_optab (NOT);
5042 ffs_optab = init_optab (FFS);
5043 clz_optab = init_optab (CLZ);
5044 ctz_optab = init_optab (CTZ);
5045 popcount_optab = init_optab (POPCOUNT);
5046 parity_optab = init_optab (PARITY);
5047 sqrt_optab = init_optab (SQRT);
5048 floor_optab = init_optab (UNKNOWN);
5049 lfloor_optab = init_optab (UNKNOWN);
5050 ceil_optab = init_optab (UNKNOWN);
5051 lceil_optab = init_optab (UNKNOWN);
5052 round_optab = init_optab (UNKNOWN);
5053 btrunc_optab = init_optab (UNKNOWN);
5054 nearbyint_optab = init_optab (UNKNOWN);
5055 rint_optab = init_optab (UNKNOWN);
5056 lrint_optab = init_optab (UNKNOWN);
5057 sincos_optab = init_optab (UNKNOWN);
5058 sin_optab = init_optab (UNKNOWN);
5059 asin_optab = init_optab (UNKNOWN);
5060 cos_optab = init_optab (UNKNOWN);
5061 acos_optab = init_optab (UNKNOWN);
5062 exp_optab = init_optab (UNKNOWN);
5063 exp10_optab = init_optab (UNKNOWN);
5064 exp2_optab = init_optab (UNKNOWN);
5065 expm1_optab = init_optab (UNKNOWN);
5066 ldexp_optab = init_optab (UNKNOWN);
5067 logb_optab = init_optab (UNKNOWN);
5068 ilogb_optab = init_optab (UNKNOWN);
5069 log_optab = init_optab (UNKNOWN);
5070 log10_optab = init_optab (UNKNOWN);
5071 log2_optab = init_optab (UNKNOWN);
5072 log1p_optab = init_optab (UNKNOWN);
5073 tan_optab = init_optab (UNKNOWN);
5074 atan_optab = init_optab (UNKNOWN);
5075 copysign_optab = init_optab (UNKNOWN);
5077 strlen_optab = init_optab (UNKNOWN);
5078 cbranch_optab = init_optab (UNKNOWN);
5079 cmov_optab = init_optab (UNKNOWN);
5080 cstore_optab = init_optab (UNKNOWN);
5081 push_optab = init_optab (UNKNOWN);
5083 reduc_smax_optab = init_optab (UNKNOWN);
5084 reduc_umax_optab = init_optab (UNKNOWN);
5085 reduc_smin_optab = init_optab (UNKNOWN);
5086 reduc_umin_optab = init_optab (UNKNOWN);
5087 reduc_splus_optab = init_optab (UNKNOWN);
5088 reduc_uplus_optab = init_optab (UNKNOWN);
5090 vec_extract_optab = init_optab (UNKNOWN);
5091 vec_set_optab = init_optab (UNKNOWN);
5092 vec_init_optab = init_optab (UNKNOWN);
5093 vec_shl_optab = init_optab (UNKNOWN);
5094 vec_shr_optab = init_optab (UNKNOWN);
5095 vec_realign_load_optab = init_optab (UNKNOWN);
5096 movmisalign_optab = init_optab (UNKNOWN);
5098 powi_optab = init_optab (UNKNOWN);
5101 sext_optab = init_convert_optab (SIGN_EXTEND);
5102 zext_optab = init_convert_optab (ZERO_EXTEND);
5103 trunc_optab = init_convert_optab (TRUNCATE);
5104 sfix_optab = init_convert_optab (FIX);
5105 ufix_optab = init_convert_optab (UNSIGNED_FIX);
5106 sfixtrunc_optab = init_convert_optab (UNKNOWN);
5107 ufixtrunc_optab = init_convert_optab (UNKNOWN);
5108 sfloat_optab = init_convert_optab (FLOAT);
5109 ufloat_optab = init_convert_optab (UNSIGNED_FLOAT);
5111 for (i = 0; i < NUM_MACHINE_MODES; i++)
5113 movmem_optab[i] = CODE_FOR_nothing;
5114 cmpstr_optab[i] = CODE_FOR_nothing;
5115 cmpstrn_optab[i] = CODE_FOR_nothing;
5116 cmpmem_optab[i] = CODE_FOR_nothing;
5117 setmem_optab[i] = CODE_FOR_nothing;
5119 sync_add_optab[i] = CODE_FOR_nothing;
5120 sync_sub_optab[i] = CODE_FOR_nothing;
5121 sync_ior_optab[i] = CODE_FOR_nothing;
5122 sync_and_optab[i] = CODE_FOR_nothing;
5123 sync_xor_optab[i] = CODE_FOR_nothing;
5124 sync_nand_optab[i] = CODE_FOR_nothing;
5125 sync_old_add_optab[i] = CODE_FOR_nothing;
5126 sync_old_sub_optab[i] = CODE_FOR_nothing;
5127 sync_old_ior_optab[i] = CODE_FOR_nothing;
5128 sync_old_and_optab[i] = CODE_FOR_nothing;
5129 sync_old_xor_optab[i] = CODE_FOR_nothing;
5130 sync_old_nand_optab[i] = CODE_FOR_nothing;
5131 sync_new_add_optab[i] = CODE_FOR_nothing;
5132 sync_new_sub_optab[i] = CODE_FOR_nothing;
5133 sync_new_ior_optab[i] = CODE_FOR_nothing;
5134 sync_new_and_optab[i] = CODE_FOR_nothing;
5135 sync_new_xor_optab[i] = CODE_FOR_nothing;
5136 sync_new_nand_optab[i] = CODE_FOR_nothing;
5137 sync_compare_and_swap[i] = CODE_FOR_nothing;
5138 sync_compare_and_swap_cc[i] = CODE_FOR_nothing;
5139 sync_lock_test_and_set[i] = CODE_FOR_nothing;
5140 sync_lock_release[i] = CODE_FOR_nothing;
5142 #ifdef HAVE_SECONDARY_RELOADS
5143 reload_in_optab[i] = reload_out_optab[i] = CODE_FOR_nothing;
5147 /* Fill in the optabs with the insns we support. */
5150 /* Initialize the optabs with the names of the library functions. */
5151 init_integral_libfuncs (add_optab, "add", '3');
5152 init_floating_libfuncs (add_optab, "add", '3');
5153 init_integral_libfuncs (addv_optab, "addv", '3');
5154 init_floating_libfuncs (addv_optab, "add", '3');
5155 init_integral_libfuncs (sub_optab, "sub", '3');
5156 init_floating_libfuncs (sub_optab, "sub", '3');
5157 init_integral_libfuncs (subv_optab, "subv", '3');
5158 init_floating_libfuncs (subv_optab, "sub", '3');
5159 init_integral_libfuncs (smul_optab, "mul", '3');
5160 init_floating_libfuncs (smul_optab, "mul", '3');
5161 init_integral_libfuncs (smulv_optab, "mulv", '3');
5162 init_floating_libfuncs (smulv_optab, "mul", '3');
5163 init_integral_libfuncs (sdiv_optab, "div", '3');
5164 init_floating_libfuncs (sdiv_optab, "div", '3');
5165 init_integral_libfuncs (sdivv_optab, "divv", '3');
5166 init_integral_libfuncs (udiv_optab, "udiv", '3');
5167 init_integral_libfuncs (sdivmod_optab, "divmod", '4');
5168 init_integral_libfuncs (udivmod_optab, "udivmod", '4');
5169 init_integral_libfuncs (smod_optab, "mod", '3');
5170 init_integral_libfuncs (umod_optab, "umod", '3');
5171 init_floating_libfuncs (ftrunc_optab, "ftrunc", '2');
5172 init_integral_libfuncs (and_optab, "and", '3');
5173 init_integral_libfuncs (ior_optab, "ior", '3');
5174 init_integral_libfuncs (xor_optab, "xor", '3');
5175 init_integral_libfuncs (ashl_optab, "ashl", '3');
5176 init_integral_libfuncs (ashr_optab, "ashr", '3');
5177 init_integral_libfuncs (lshr_optab, "lshr", '3');
5178 init_integral_libfuncs (smin_optab, "min", '3');
5179 init_floating_libfuncs (smin_optab, "min", '3');
5180 init_integral_libfuncs (smax_optab, "max", '3');
5181 init_floating_libfuncs (smax_optab, "max", '3');
5182 init_integral_libfuncs (umin_optab, "umin", '3');
5183 init_integral_libfuncs (umax_optab, "umax", '3');
5184 init_integral_libfuncs (neg_optab, "neg", '2');
5185 init_floating_libfuncs (neg_optab, "neg", '2');
5186 init_integral_libfuncs (negv_optab, "negv", '2');
5187 init_floating_libfuncs (negv_optab, "neg", '2');
5188 init_integral_libfuncs (one_cmpl_optab, "one_cmpl", '2');
5189 init_integral_libfuncs (ffs_optab, "ffs", '2');
5190 init_integral_libfuncs (clz_optab, "clz", '2');
5191 init_integral_libfuncs (ctz_optab, "ctz", '2');
5192 init_integral_libfuncs (popcount_optab, "popcount", '2');
5193 init_integral_libfuncs (parity_optab, "parity", '2');
5195 /* Comparison libcalls for integers MUST come in pairs,
5197 init_integral_libfuncs (cmp_optab, "cmp", '2');
5198 init_integral_libfuncs (ucmp_optab, "ucmp", '2');
5199 init_floating_libfuncs (cmp_optab, "cmp", '2');
5201 /* EQ etc are floating point only. */
5202 init_floating_libfuncs (eq_optab, "eq", '2');
5203 init_floating_libfuncs (ne_optab, "ne", '2');
5204 init_floating_libfuncs (gt_optab, "gt", '2');
5205 init_floating_libfuncs (ge_optab, "ge", '2');
5206 init_floating_libfuncs (lt_optab, "lt", '2');
5207 init_floating_libfuncs (le_optab, "le", '2');
5208 init_floating_libfuncs (unord_optab, "unord", '2');
5210 init_floating_libfuncs (powi_optab, "powi", '2');
5213 init_interclass_conv_libfuncs (sfloat_optab, "float",
5214 MODE_INT, MODE_FLOAT);
5215 init_interclass_conv_libfuncs (sfix_optab, "fix",
5216 MODE_FLOAT, MODE_INT);
5217 init_interclass_conv_libfuncs (ufix_optab, "fixuns",
5218 MODE_FLOAT, MODE_INT);
5220 /* sext_optab is also used for FLOAT_EXTEND. */
5221 init_intraclass_conv_libfuncs (sext_optab, "extend", MODE_FLOAT, true);
5222 init_intraclass_conv_libfuncs (trunc_optab, "trunc", MODE_FLOAT, false);
5224 /* Use cabs for double complex abs, since systems generally have cabs.
5225 Don't define any libcall for float complex, so that cabs will be used. */
5226 if (complex_double_type_node)
5227 abs_optab->handlers[TYPE_MODE (complex_double_type_node)].libfunc
5228 = init_one_libfunc ("cabs");
5230 /* The ffs function operates on `int'. */
5231 ffs_optab->handlers[(int) mode_for_size (INT_TYPE_SIZE, MODE_INT, 0)].libfunc
5232 = init_one_libfunc ("ffs");
5234 abort_libfunc = init_one_libfunc ("abort");
5235 memcpy_libfunc = init_one_libfunc ("memcpy");
5236 memmove_libfunc = init_one_libfunc ("memmove");
5237 memcmp_libfunc = init_one_libfunc ("memcmp");
5238 memset_libfunc = init_one_libfunc ("memset");
5239 setbits_libfunc = init_one_libfunc ("__setbits");
5241 #ifndef DONT_USE_BUILTIN_SETJMP
5242 setjmp_libfunc = init_one_libfunc ("__builtin_setjmp");
5243 longjmp_libfunc = init_one_libfunc ("__builtin_longjmp");
5245 setjmp_libfunc = init_one_libfunc ("setjmp");
5246 longjmp_libfunc = init_one_libfunc ("longjmp");
5248 unwind_sjlj_register_libfunc = init_one_libfunc ("_Unwind_SjLj_Register");
5249 unwind_sjlj_unregister_libfunc
5250 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5252 /* For function entry/exit instrumentation. */
5253 profile_function_entry_libfunc
5254 = init_one_libfunc ("__cyg_profile_func_enter");
5255 profile_function_exit_libfunc
5256 = init_one_libfunc ("__cyg_profile_func_exit");
5258 gcov_flush_libfunc = init_one_libfunc ("__gcov_flush");
5260 if (HAVE_conditional_trap)
5261 trap_rtx = gen_rtx_fmt_ee (EQ, VOIDmode, NULL_RTX, NULL_RTX);
5263 /* Allow the target to add more libcalls or rename some, etc. */
5264 targetm.init_libfuncs ();
5269 /* Print information about the current contents of the optabs on
5273 debug_optab_libfuncs (void)
5279 /* Dump the arithmetic optabs. */
5280 for (i = 0; i != (int) OTI_MAX; i++)
5281 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5284 struct optab_handlers *h;
5287 h = &o->handlers[j];
5290 gcc_assert (GET_CODE (h->libfunc) = SYMBOL_REF);
5291 fprintf (stderr, "%s\t%s:\t%s\n",
5292 GET_RTX_NAME (o->code),
5294 XSTR (h->libfunc, 0));
5298 /* Dump the conversion optabs. */
5299 for (i = 0; i < (int) COI_MAX; ++i)
5300 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5301 for (k = 0; k < NUM_MACHINE_MODES; ++k)
5304 struct optab_handlers *h;
5306 o = &convert_optab_table[i];
5307 h = &o->handlers[j][k];
5310 gcc_assert (GET_CODE (h->libfunc) = SYMBOL_REF);
5311 fprintf (stderr, "%s\t%s\t%s:\t%s\n",
5312 GET_RTX_NAME (o->code),
5315 XSTR (h->libfunc, 0));
5323 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5324 CODE. Return 0 on failure. */
5327 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED, rtx op1,
5328 rtx op2 ATTRIBUTE_UNUSED, rtx tcode ATTRIBUTE_UNUSED)
5330 enum machine_mode mode = GET_MODE (op1);
5331 enum insn_code icode;
5334 if (!HAVE_conditional_trap)
5337 if (mode == VOIDmode)
5340 icode = cmp_optab->handlers[(int) mode].insn_code;
5341 if (icode == CODE_FOR_nothing)
5345 op1 = prepare_operand (icode, op1, 0, mode, mode, 0);
5346 op2 = prepare_operand (icode, op2, 1, mode, mode, 0);
5352 emit_insn (GEN_FCN (icode) (op1, op2));
5354 PUT_CODE (trap_rtx, code);
5355 gcc_assert (HAVE_conditional_trap);
5356 insn = gen_conditional_trap (trap_rtx, tcode);
5360 insn = get_insns ();
5367 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5368 or unsigned operation code. */
5370 static enum rtx_code
5371 get_rtx_code (enum tree_code tcode, bool unsignedp)
5383 code = unsignedp ? LTU : LT;
5386 code = unsignedp ? LEU : LE;
5389 code = unsignedp ? GTU : GT;
5392 code = unsignedp ? GEU : GE;
5395 case UNORDERED_EXPR:
5426 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
5427 unsigned operators. Do not generate compare instruction. */
5430 vector_compare_rtx (tree cond, bool unsignedp, enum insn_code icode)
5432 enum rtx_code rcode;
5434 rtx rtx_op0, rtx_op1;
5436 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
5437 ensures that condition is a relational operation. */
5438 gcc_assert (COMPARISON_CLASS_P (cond));
5440 rcode = get_rtx_code (TREE_CODE (cond), unsignedp);
5441 t_op0 = TREE_OPERAND (cond, 0);
5442 t_op1 = TREE_OPERAND (cond, 1);
5444 /* Expand operands. */
5445 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)), 1);
5446 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)), 1);
5448 if (!insn_data[icode].operand[4].predicate (rtx_op0, GET_MODE (rtx_op0))
5449 && GET_MODE (rtx_op0) != VOIDmode)
5450 rtx_op0 = force_reg (GET_MODE (rtx_op0), rtx_op0);
5452 if (!insn_data[icode].operand[5].predicate (rtx_op1, GET_MODE (rtx_op1))
5453 && GET_MODE (rtx_op1) != VOIDmode)
5454 rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1);
5456 return gen_rtx_fmt_ee (rcode, VOIDmode, rtx_op0, rtx_op1);
5459 /* Return insn code for VEC_COND_EXPR EXPR. */
5461 static inline enum insn_code
5462 get_vcond_icode (tree expr, enum machine_mode mode)
5464 enum insn_code icode = CODE_FOR_nothing;
5466 if (TYPE_UNSIGNED (TREE_TYPE (expr)))
5467 icode = vcondu_gen_code[mode];
5469 icode = vcond_gen_code[mode];
5473 /* Return TRUE iff, appropriate vector insns are available
5474 for vector cond expr expr in VMODE mode. */
5477 expand_vec_cond_expr_p (tree expr, enum machine_mode vmode)
5479 if (get_vcond_icode (expr, vmode) == CODE_FOR_nothing)
5484 /* Generate insns for VEC_COND_EXPR. */
5487 expand_vec_cond_expr (tree vec_cond_expr, rtx target)
5489 enum insn_code icode;
5490 rtx comparison, rtx_op1, rtx_op2, cc_op0, cc_op1;
5491 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_cond_expr));
5492 bool unsignedp = TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr));
5494 icode = get_vcond_icode (vec_cond_expr, mode);
5495 if (icode == CODE_FOR_nothing)
5498 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
5499 target = gen_reg_rtx (mode);
5501 /* Get comparison rtx. First expand both cond expr operands. */
5502 comparison = vector_compare_rtx (TREE_OPERAND (vec_cond_expr, 0),
5504 cc_op0 = XEXP (comparison, 0);
5505 cc_op1 = XEXP (comparison, 1);
5506 /* Expand both operands and force them in reg, if required. */
5507 rtx_op1 = expand_expr (TREE_OPERAND (vec_cond_expr, 1),
5508 NULL_RTX, VOIDmode, 1);
5509 if (!insn_data[icode].operand[1].predicate (rtx_op1, mode)
5510 && mode != VOIDmode)
5511 rtx_op1 = force_reg (mode, rtx_op1);
5513 rtx_op2 = expand_expr (TREE_OPERAND (vec_cond_expr, 2),
5514 NULL_RTX, VOIDmode, 1);
5515 if (!insn_data[icode].operand[2].predicate (rtx_op2, mode)
5516 && mode != VOIDmode)
5517 rtx_op2 = force_reg (mode, rtx_op2);
5519 /* Emit instruction! */
5520 emit_insn (GEN_FCN (icode) (target, rtx_op1, rtx_op2,
5521 comparison, cc_op0, cc_op1));
5527 /* This is an internal subroutine of the other compare_and_swap expanders.
5528 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
5529 operation. TARGET is an optional place to store the value result of
5530 the operation. ICODE is the particular instruction to expand. Return
5531 the result of the operation. */
5534 expand_val_compare_and_swap_1 (rtx mem, rtx old_val, rtx new_val,
5535 rtx target, enum insn_code icode)
5537 enum machine_mode mode = GET_MODE (mem);
5540 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
5541 target = gen_reg_rtx (mode);
5543 if (GET_MODE (old_val) != VOIDmode && GET_MODE (old_val) != mode)
5544 old_val = convert_modes (mode, GET_MODE (old_val), old_val, 1);
5545 if (!insn_data[icode].operand[2].predicate (old_val, mode))
5546 old_val = force_reg (mode, old_val);
5548 if (GET_MODE (new_val) != VOIDmode && GET_MODE (new_val) != mode)
5549 new_val = convert_modes (mode, GET_MODE (new_val), new_val, 1);
5550 if (!insn_data[icode].operand[3].predicate (new_val, mode))
5551 new_val = force_reg (mode, new_val);
5553 insn = GEN_FCN (icode) (target, mem, old_val, new_val);
5554 if (insn == NULL_RTX)
5561 /* Expand a compare-and-swap operation and return its value. */
5564 expand_val_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
5566 enum machine_mode mode = GET_MODE (mem);
5567 enum insn_code icode = sync_compare_and_swap[mode];
5569 if (icode == CODE_FOR_nothing)
5572 return expand_val_compare_and_swap_1 (mem, old_val, new_val, target, icode);
5575 /* Expand a compare-and-swap operation and store true into the result if
5576 the operation was successful and false otherwise. Return the result.
5577 Unlike other routines, TARGET is not optional. */
5580 expand_bool_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
5582 enum machine_mode mode = GET_MODE (mem);
5583 enum insn_code icode;
5584 rtx subtarget, label0, label1;
5586 /* If the target supports a compare-and-swap pattern that simultaneously
5587 sets some flag for success, then use it. Otherwise use the regular
5588 compare-and-swap and follow that immediately with a compare insn. */
5589 icode = sync_compare_and_swap_cc[mode];
5593 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
5595 if (subtarget != NULL_RTX)
5599 case CODE_FOR_nothing:
5600 icode = sync_compare_and_swap[mode];
5601 if (icode == CODE_FOR_nothing)
5604 /* Ensure that if old_val == mem, that we're not comparing
5605 against an old value. */
5606 if (MEM_P (old_val))
5607 old_val = force_reg (mode, old_val);
5609 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
5611 if (subtarget == NULL_RTX)
5614 emit_cmp_insn (subtarget, old_val, EQ, const0_rtx, mode, true);
5617 /* If the target has a sane STORE_FLAG_VALUE, then go ahead and use a
5618 setcc instruction from the beginning. We don't work too hard here,
5619 but it's nice to not be stupid about initial code gen either. */
5620 if (STORE_FLAG_VALUE == 1)
5622 icode = setcc_gen_code[EQ];
5623 if (icode != CODE_FOR_nothing)
5625 enum machine_mode cmode = insn_data[icode].operand[0].mode;
5629 if (!insn_data[icode].operand[0].predicate (target, cmode))
5630 subtarget = gen_reg_rtx (cmode);
5632 insn = GEN_FCN (icode) (subtarget);
5636 if (GET_MODE (target) != GET_MODE (subtarget))
5638 convert_move (target, subtarget, 1);
5646 /* Without an appropriate setcc instruction, use a set of branches to
5647 get 1 and 0 stored into target. Presumably if the target has a
5648 STORE_FLAG_VALUE that isn't 1, then this will get cleaned up by ifcvt. */
5650 label0 = gen_label_rtx ();
5651 label1 = gen_label_rtx ();
5653 emit_jump_insn (bcc_gen_fctn[EQ] (label0));
5654 emit_move_insn (target, const0_rtx);
5655 emit_jump_insn (gen_jump (label1));
5657 emit_label (label0);
5658 emit_move_insn (target, const1_rtx);
5659 emit_label (label1);
5664 /* This is a helper function for the other atomic operations. This function
5665 emits a loop that contains SEQ that iterates until a compare-and-swap
5666 operation at the end succeeds. MEM is the memory to be modified. SEQ is
5667 a set of instructions that takes a value from OLD_REG as an input and
5668 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
5669 set to the current contents of MEM. After SEQ, a compare-and-swap will
5670 attempt to update MEM with NEW_REG. The function returns true when the
5671 loop was generated successfully. */
5674 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
5676 enum machine_mode mode = GET_MODE (mem);
5677 enum insn_code icode;
5678 rtx label, cmp_reg, subtarget;
5680 /* The loop we want to generate looks like
5686 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
5687 if (cmp_reg != old_reg)
5690 Note that we only do the plain load from memory once. Subsequent
5691 iterations use the value loaded by the compare-and-swap pattern. */
5693 label = gen_label_rtx ();
5694 cmp_reg = gen_reg_rtx (mode);
5696 emit_move_insn (cmp_reg, mem);
5698 emit_move_insn (old_reg, cmp_reg);
5702 /* If the target supports a compare-and-swap pattern that simultaneously
5703 sets some flag for success, then use it. Otherwise use the regular
5704 compare-and-swap and follow that immediately with a compare insn. */
5705 icode = sync_compare_and_swap_cc[mode];
5709 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
5711 if (subtarget != NULL_RTX)
5713 gcc_assert (subtarget == cmp_reg);
5718 case CODE_FOR_nothing:
5719 icode = sync_compare_and_swap[mode];
5720 if (icode == CODE_FOR_nothing)
5723 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
5725 if (subtarget == NULL_RTX)
5727 if (subtarget != cmp_reg)
5728 emit_move_insn (cmp_reg, subtarget);
5730 emit_cmp_insn (cmp_reg, old_reg, EQ, const0_rtx, mode, true);
5733 /* ??? Mark this jump predicted not taken? */
5734 emit_jump_insn (bcc_gen_fctn[NE] (label));
5739 /* This function generates the atomic operation MEM CODE= VAL. In this
5740 case, we do not care about any resulting value. Returns NULL if we
5741 cannot generate the operation. */
5744 expand_sync_operation (rtx mem, rtx val, enum rtx_code code)
5746 enum machine_mode mode = GET_MODE (mem);
5747 enum insn_code icode;
5750 /* Look to see if the target supports the operation directly. */
5754 icode = sync_add_optab[mode];
5757 icode = sync_ior_optab[mode];
5760 icode = sync_xor_optab[mode];
5763 icode = sync_and_optab[mode];
5766 icode = sync_nand_optab[mode];
5770 icode = sync_sub_optab[mode];
5771 if (icode == CODE_FOR_nothing)
5773 icode = sync_add_optab[mode];
5774 if (icode != CODE_FOR_nothing)
5776 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
5786 /* Generate the direct operation, if present. */
5787 if (icode != CODE_FOR_nothing)
5789 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
5790 val = convert_modes (mode, GET_MODE (val), val, 1);
5791 if (!insn_data[icode].operand[1].predicate (val, mode))
5792 val = force_reg (mode, val);
5794 insn = GEN_FCN (icode) (mem, val);
5802 /* Failing that, generate a compare-and-swap loop in which we perform the
5803 operation with normal arithmetic instructions. */
5804 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
5806 rtx t0 = gen_reg_rtx (mode), t1;
5813 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
5816 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
5817 true, OPTAB_LIB_WIDEN);
5819 insn = get_insns ();
5822 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
5829 /* This function generates the atomic operation MEM CODE= VAL. In this
5830 case, we do care about the resulting value: if AFTER is true then
5831 return the value MEM holds after the operation, if AFTER is false
5832 then return the value MEM holds before the operation. TARGET is an
5833 optional place for the result value to be stored. */
5836 expand_sync_fetch_operation (rtx mem, rtx val, enum rtx_code code,
5837 bool after, rtx target)
5839 enum machine_mode mode = GET_MODE (mem);
5840 enum insn_code old_code, new_code, icode;
5844 /* Look to see if the target supports the operation directly. */
5848 old_code = sync_old_add_optab[mode];
5849 new_code = sync_new_add_optab[mode];
5852 old_code = sync_old_ior_optab[mode];
5853 new_code = sync_new_ior_optab[mode];
5856 old_code = sync_old_xor_optab[mode];
5857 new_code = sync_new_xor_optab[mode];
5860 old_code = sync_old_and_optab[mode];
5861 new_code = sync_new_and_optab[mode];
5864 old_code = sync_old_nand_optab[mode];
5865 new_code = sync_new_nand_optab[mode];
5869 old_code = sync_old_sub_optab[mode];
5870 new_code = sync_new_sub_optab[mode];
5871 if (old_code == CODE_FOR_nothing && new_code == CODE_FOR_nothing)
5873 old_code = sync_old_add_optab[mode];
5874 new_code = sync_new_add_optab[mode];
5875 if (old_code != CODE_FOR_nothing || new_code != CODE_FOR_nothing)
5877 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
5887 /* If the target does supports the proper new/old operation, great. But
5888 if we only support the opposite old/new operation, check to see if we
5889 can compensate. In the case in which the old value is supported, then
5890 we can always perform the operation again with normal arithmetic. In
5891 the case in which the new value is supported, then we can only handle
5892 this in the case the operation is reversible. */
5897 if (icode == CODE_FOR_nothing)
5900 if (icode != CODE_FOR_nothing)
5907 if (icode == CODE_FOR_nothing
5908 && (code == PLUS || code == MINUS || code == XOR))
5911 if (icode != CODE_FOR_nothing)
5916 /* If we found something supported, great. */
5917 if (icode != CODE_FOR_nothing)
5919 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
5920 target = gen_reg_rtx (mode);
5922 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
5923 val = convert_modes (mode, GET_MODE (val), val, 1);
5924 if (!insn_data[icode].operand[2].predicate (val, mode))
5925 val = force_reg (mode, val);
5927 insn = GEN_FCN (icode) (target, mem, val);
5932 /* If we need to compensate for using an operation with the
5933 wrong return value, do so now. */
5940 else if (code == MINUS)
5945 target = expand_simple_unop (mode, NOT, target, NULL_RTX, true);
5946 target = expand_simple_binop (mode, code, target, val, NULL_RTX,
5947 true, OPTAB_LIB_WIDEN);
5954 /* Failing that, generate a compare-and-swap loop in which we perform the
5955 operation with normal arithmetic instructions. */
5956 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
5958 rtx t0 = gen_reg_rtx (mode), t1;
5960 if (!target || !register_operand (target, mode))
5961 target = gen_reg_rtx (mode);
5966 emit_move_insn (target, t0);
5970 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
5973 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
5974 true, OPTAB_LIB_WIDEN);
5976 emit_move_insn (target, t1);
5978 insn = get_insns ();
5981 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
5988 /* This function expands a test-and-set operation. Ideally we atomically
5989 store VAL in MEM and return the previous value in MEM. Some targets
5990 may not support this operation and only support VAL with the constant 1;
5991 in this case while the return value will be 0/1, but the exact value
5992 stored in MEM is target defined. TARGET is an option place to stick
5993 the return value. */
5996 expand_sync_lock_test_and_set (rtx mem, rtx val, rtx target)
5998 enum machine_mode mode = GET_MODE (mem);
5999 enum insn_code icode;
6002 /* If the target supports the test-and-set directly, great. */
6003 icode = sync_lock_test_and_set[mode];
6004 if (icode != CODE_FOR_nothing)
6006 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6007 target = gen_reg_rtx (mode);
6009 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6010 val = convert_modes (mode, GET_MODE (val), val, 1);
6011 if (!insn_data[icode].operand[2].predicate (val, mode))
6012 val = force_reg (mode, val);
6014 insn = GEN_FCN (icode) (target, mem, val);
6022 /* Otherwise, use a compare-and-swap loop for the exchange. */
6023 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6025 if (!target || !register_operand (target, mode))
6026 target = gen_reg_rtx (mode);
6027 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6028 val = convert_modes (mode, GET_MODE (val), val, 1);
6029 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
6036 #include "gt-optabs.h"