1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
45 #include "basic-block.h"
48 /* Each optab contains info on how this target machine
49 can perform a particular operation
50 for all sizes and kinds of operands.
52 The operation to be performed is often specified
53 by passing one of these optabs as an argument.
55 See expr.h for documentation of these optabs. */
57 optab optab_table[OTI_MAX];
59 rtx libfunc_table[LTI_MAX];
61 /* Tables of patterns for converting one mode to another. */
62 convert_optab convert_optab_table[CTI_MAX];
64 /* Contains the optab used for each rtx code. */
65 optab code_to_optab[NUM_RTX_CODE + 1];
67 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
68 gives the gen_function to make a branch to test that condition. */
70 rtxfun bcc_gen_fctn[NUM_RTX_CODE];
72 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
73 gives the insn code to make a store-condition insn
74 to test that condition. */
76 enum insn_code setcc_gen_code[NUM_RTX_CODE];
78 #ifdef HAVE_conditional_move
79 /* Indexed by the machine mode, gives the insn code to make a conditional
80 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
81 setcc_gen_code to cut down on the number of named patterns. Consider a day
82 when a lot more rtx codes are conditional (eg: for the ARM). */
84 enum insn_code movcc_gen_code[NUM_MACHINE_MODES];
87 /* Indexed by the machine mode, gives the insn code for vector conditional
90 enum insn_code vcond_gen_code[NUM_MACHINE_MODES];
91 enum insn_code vcondu_gen_code[NUM_MACHINE_MODES];
93 /* The insn generating function can not take an rtx_code argument.
94 TRAP_RTX is used as an rtx argument. Its code is replaced with
95 the code to be used in the trap insn and all other fields are ignored. */
96 static GTY(()) rtx trap_rtx;
98 static int add_equal_note (rtx, rtx, enum rtx_code, rtx, rtx);
99 static rtx widen_operand (rtx, enum machine_mode, enum machine_mode, int,
101 static void prepare_cmp_insn (rtx *, rtx *, enum rtx_code *, rtx,
102 enum machine_mode *, int *,
103 enum can_compare_purpose);
104 static enum insn_code can_fix_p (enum machine_mode, enum machine_mode, int,
106 static enum insn_code can_float_p (enum machine_mode, enum machine_mode, int);
107 static optab new_optab (void);
108 static convert_optab new_convert_optab (void);
109 static inline optab init_optab (enum rtx_code);
110 static inline optab init_optabv (enum rtx_code);
111 static inline convert_optab init_convert_optab (enum rtx_code);
112 static void init_libfuncs (optab, int, int, const char *, int);
113 static void init_integral_libfuncs (optab, const char *, int);
114 static void init_floating_libfuncs (optab, const char *, int);
115 static void init_interclass_conv_libfuncs (convert_optab, const char *,
116 enum mode_class, enum mode_class);
117 static void init_intraclass_conv_libfuncs (convert_optab, const char *,
118 enum mode_class, bool);
119 static void emit_cmp_and_jump_insn_1 (rtx, rtx, enum machine_mode,
120 enum rtx_code, int, rtx);
121 static void prepare_float_lib_cmp (rtx *, rtx *, enum rtx_code *,
122 enum machine_mode *, int *);
123 static rtx widen_clz (enum machine_mode, rtx, rtx);
124 static rtx expand_parity (enum machine_mode, rtx, rtx);
125 static enum rtx_code get_rtx_code (enum tree_code, bool);
126 static rtx vector_compare_rtx (tree, bool, enum insn_code);
128 #ifndef HAVE_conditional_trap
129 #define HAVE_conditional_trap 0
132 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
133 the result of operation CODE applied to OP0 (and OP1 if it is a binary
136 If the last insn does not set TARGET, don't do anything, but return 1.
138 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
139 don't add the REG_EQUAL note but return 0. Our caller can then try
140 again, ensuring that TARGET is not one of the operands. */
143 add_equal_note (rtx insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
145 rtx last_insn, insn, set;
148 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
150 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
151 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
152 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
153 && GET_RTX_CLASS (code) != RTX_COMPARE
154 && GET_RTX_CLASS (code) != RTX_UNARY)
157 if (GET_CODE (target) == ZERO_EXTRACT)
160 for (last_insn = insns;
161 NEXT_INSN (last_insn) != NULL_RTX;
162 last_insn = NEXT_INSN (last_insn))
165 set = single_set (last_insn);
169 if (! rtx_equal_p (SET_DEST (set), target)
170 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
171 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
172 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
175 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
176 besides the last insn. */
177 if (reg_overlap_mentioned_p (target, op0)
178 || (op1 && reg_overlap_mentioned_p (target, op1)))
180 insn = PREV_INSN (last_insn);
181 while (insn != NULL_RTX)
183 if (reg_set_p (target, insn))
186 insn = PREV_INSN (insn);
190 if (GET_RTX_CLASS (code) == RTX_UNARY)
191 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
193 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
195 set_unique_reg_note (last_insn, REG_EQUAL, note);
200 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
201 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
202 not actually do a sign-extend or zero-extend, but can leave the
203 higher-order bits of the result rtx undefined, for example, in the case
204 of logical operations, but not right shifts. */
207 widen_operand (rtx op, enum machine_mode mode, enum machine_mode oldmode,
208 int unsignedp, int no_extend)
212 /* If we don't have to extend and this is a constant, return it. */
213 if (no_extend && GET_MODE (op) == VOIDmode)
216 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
217 extend since it will be more efficient to do so unless the signedness of
218 a promoted object differs from our extension. */
220 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
221 && SUBREG_PROMOTED_UNSIGNED_P (op) == unsignedp))
222 return convert_modes (mode, oldmode, op, unsignedp);
224 /* If MODE is no wider than a single word, we return a paradoxical
226 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
227 return gen_rtx_SUBREG (mode, force_reg (GET_MODE (op), op), 0);
229 /* Otherwise, get an object of MODE, clobber it, and set the low-order
232 result = gen_reg_rtx (mode);
233 emit_insn (gen_rtx_CLOBBER (VOIDmode, result));
234 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
238 /* Return the optab used for computing the operation given by
239 the tree code, CODE. This function is not always usable (for
240 example, it cannot give complete results for multiplication
241 or division) but probably ought to be relied on more widely
242 throughout the expander. */
244 optab_for_tree_code (enum tree_code code, tree type)
256 return one_cmpl_optab;
265 return TYPE_UNSIGNED (type) ? umod_optab : smod_optab;
273 return TYPE_UNSIGNED (type) ? udiv_optab : sdiv_optab;
279 return TYPE_UNSIGNED (type) ? lshr_optab : ashr_optab;
288 return TYPE_UNSIGNED (type) ? umax_optab : smax_optab;
291 return TYPE_UNSIGNED (type) ? umin_optab : smin_optab;
293 case REALIGN_LOAD_EXPR:
294 return vec_realign_load_optab;
300 trapv = flag_trapv && INTEGRAL_TYPE_P (type) && !TYPE_UNSIGNED (type);
304 return trapv ? addv_optab : add_optab;
307 return trapv ? subv_optab : sub_optab;
310 return trapv ? smulv_optab : smul_optab;
313 return trapv ? negv_optab : neg_optab;
316 return trapv ? absv_optab : abs_optab;
324 /* Generate code to perform an operation specified by TERNARY_OPTAB
325 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
327 UNSIGNEDP is for the case where we have to widen the operands
328 to perform the operation. It says to use zero-extension.
330 If TARGET is nonzero, the value
331 is generated there, if it is convenient to do so.
332 In all cases an rtx is returned for the locus of the value;
333 this may or may not be TARGET. */
336 expand_ternary_op (enum machine_mode mode, optab ternary_optab, rtx op0,
337 rtx op1, rtx op2, rtx target, int unsignedp)
339 int icode = (int) ternary_optab->handlers[(int) mode].insn_code;
340 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
341 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
342 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
345 rtx xop0 = op0, xop1 = op1, xop2 = op2;
347 gcc_assert (ternary_optab->handlers[(int) mode].insn_code
348 != CODE_FOR_nothing);
350 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
351 temp = gen_reg_rtx (mode);
355 /* In case the insn wants input operands in modes different from
356 those of the actual operands, convert the operands. It would
357 seem that we don't need to convert CONST_INTs, but we do, so
358 that they're properly zero-extended, sign-extended or truncated
361 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
362 xop0 = convert_modes (mode0,
363 GET_MODE (op0) != VOIDmode
368 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
369 xop1 = convert_modes (mode1,
370 GET_MODE (op1) != VOIDmode
375 if (GET_MODE (op2) != mode2 && mode2 != VOIDmode)
376 xop2 = convert_modes (mode2,
377 GET_MODE (op2) != VOIDmode
382 /* Now, if insn's predicates don't allow our operands, put them into
385 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
386 && mode0 != VOIDmode)
387 xop0 = copy_to_mode_reg (mode0, xop0);
389 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
390 && mode1 != VOIDmode)
391 xop1 = copy_to_mode_reg (mode1, xop1);
393 if (!insn_data[icode].operand[3].predicate (xop2, mode2)
394 && mode2 != VOIDmode)
395 xop2 = copy_to_mode_reg (mode2, xop2);
397 pat = GEN_FCN (icode) (temp, xop0, xop1, xop2);
404 /* Like expand_binop, but return a constant rtx if the result can be
405 calculated at compile time. The arguments and return value are
406 otherwise the same as for expand_binop. */
409 simplify_expand_binop (enum machine_mode mode, optab binoptab,
410 rtx op0, rtx op1, rtx target, int unsignedp,
411 enum optab_methods methods)
413 if (CONSTANT_P (op0) && CONSTANT_P (op1))
414 return simplify_gen_binary (binoptab->code, mode, op0, op1);
416 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
419 /* Like simplify_expand_binop, but always put the result in TARGET.
420 Return true if the expansion succeeded. */
423 force_expand_binop (enum machine_mode mode, optab binoptab,
424 rtx op0, rtx op1, rtx target, int unsignedp,
425 enum optab_methods methods)
427 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
428 target, unsignedp, methods);
432 emit_move_insn (target, x);
436 /* This subroutine of expand_doubleword_shift handles the cases in which
437 the effective shift value is >= BITS_PER_WORD. The arguments and return
438 value are the same as for the parent routine, except that SUPERWORD_OP1
439 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
440 INTO_TARGET may be null if the caller has decided to calculate it. */
443 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
444 rtx outof_target, rtx into_target,
445 int unsignedp, enum optab_methods methods)
447 if (into_target != 0)
448 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
449 into_target, unsignedp, methods))
452 if (outof_target != 0)
454 /* For a signed right shift, we must fill OUTOF_TARGET with copies
455 of the sign bit, otherwise we must fill it with zeros. */
456 if (binoptab != ashr_optab)
457 emit_move_insn (outof_target, CONST0_RTX (word_mode));
459 if (!force_expand_binop (word_mode, binoptab,
460 outof_input, GEN_INT (BITS_PER_WORD - 1),
461 outof_target, unsignedp, methods))
467 /* This subroutine of expand_doubleword_shift handles the cases in which
468 the effective shift value is < BITS_PER_WORD. The arguments and return
469 value are the same as for the parent routine. */
472 expand_subword_shift (enum machine_mode op1_mode, optab binoptab,
473 rtx outof_input, rtx into_input, rtx op1,
474 rtx outof_target, rtx into_target,
475 int unsignedp, enum optab_methods methods,
476 unsigned HOST_WIDE_INT shift_mask)
478 optab reverse_unsigned_shift, unsigned_shift;
481 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
482 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
484 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
485 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
486 the opposite direction to BINOPTAB. */
487 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
489 carries = outof_input;
490 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
491 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
496 /* We must avoid shifting by BITS_PER_WORD bits since that is either
497 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
498 has unknown behavior. Do a single shift first, then shift by the
499 remainder. It's OK to use ~OP1 as the remainder if shift counts
500 are truncated to the mode size. */
501 carries = expand_binop (word_mode, reverse_unsigned_shift,
502 outof_input, const1_rtx, 0, unsignedp, methods);
503 if (shift_mask == BITS_PER_WORD - 1)
505 tmp = immed_double_const (-1, -1, op1_mode);
506 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
511 tmp = immed_double_const (BITS_PER_WORD - 1, 0, op1_mode);
512 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
516 if (tmp == 0 || carries == 0)
518 carries = expand_binop (word_mode, reverse_unsigned_shift,
519 carries, tmp, 0, unsignedp, methods);
523 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
524 so the result can go directly into INTO_TARGET if convenient. */
525 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
526 into_target, unsignedp, methods);
530 /* Now OR in the bits carried over from OUTOF_INPUT. */
531 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
532 into_target, unsignedp, methods))
535 /* Use a standard word_mode shift for the out-of half. */
536 if (outof_target != 0)
537 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
538 outof_target, unsignedp, methods))
545 #ifdef HAVE_conditional_move
546 /* Try implementing expand_doubleword_shift using conditional moves.
547 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
548 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
549 are the shift counts to use in the former and latter case. All other
550 arguments are the same as the parent routine. */
553 expand_doubleword_shift_condmove (enum machine_mode op1_mode, optab binoptab,
554 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
555 rtx outof_input, rtx into_input,
556 rtx subword_op1, rtx superword_op1,
557 rtx outof_target, rtx into_target,
558 int unsignedp, enum optab_methods methods,
559 unsigned HOST_WIDE_INT shift_mask)
561 rtx outof_superword, into_superword;
563 /* Put the superword version of the output into OUTOF_SUPERWORD and
565 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
566 if (outof_target != 0 && subword_op1 == superword_op1)
568 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
569 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
570 into_superword = outof_target;
571 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
572 outof_superword, 0, unsignedp, methods))
577 into_superword = gen_reg_rtx (word_mode);
578 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
579 outof_superword, into_superword,
584 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
585 if (!expand_subword_shift (op1_mode, binoptab,
586 outof_input, into_input, subword_op1,
587 outof_target, into_target,
588 unsignedp, methods, shift_mask))
591 /* Select between them. Do the INTO half first because INTO_SUPERWORD
592 might be the current value of OUTOF_TARGET. */
593 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
594 into_target, into_superword, word_mode, false))
597 if (outof_target != 0)
598 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
599 outof_target, outof_superword,
607 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
608 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
609 input operand; the shift moves bits in the direction OUTOF_INPUT->
610 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
611 of the target. OP1 is the shift count and OP1_MODE is its mode.
612 If OP1 is constant, it will have been truncated as appropriate
613 and is known to be nonzero.
615 If SHIFT_MASK is zero, the result of word shifts is undefined when the
616 shift count is outside the range [0, BITS_PER_WORD). This routine must
617 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
619 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
620 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
621 fill with zeros or sign bits as appropriate.
623 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
624 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
625 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
626 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
629 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
630 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
631 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
632 function wants to calculate it itself.
634 Return true if the shift could be successfully synthesized. */
637 expand_doubleword_shift (enum machine_mode op1_mode, optab binoptab,
638 rtx outof_input, rtx into_input, rtx op1,
639 rtx outof_target, rtx into_target,
640 int unsignedp, enum optab_methods methods,
641 unsigned HOST_WIDE_INT shift_mask)
643 rtx superword_op1, tmp, cmp1, cmp2;
644 rtx subword_label, done_label;
645 enum rtx_code cmp_code;
647 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
648 fill the result with sign or zero bits as appropriate. If so, the value
649 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
650 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
651 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
653 This isn't worthwhile for constant shifts since the optimizers will
654 cope better with in-range shift counts. */
655 if (shift_mask >= BITS_PER_WORD
657 && !CONSTANT_P (op1))
659 if (!expand_doubleword_shift (op1_mode, binoptab,
660 outof_input, into_input, op1,
662 unsignedp, methods, shift_mask))
664 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
665 outof_target, unsignedp, methods))
670 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
671 is true when the effective shift value is less than BITS_PER_WORD.
672 Set SUPERWORD_OP1 to the shift count that should be used to shift
673 OUTOF_INPUT into INTO_TARGET when the condition is false. */
674 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
675 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
677 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
678 is a subword shift count. */
679 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
681 cmp2 = CONST0_RTX (op1_mode);
687 /* Set CMP1 to OP1 - BITS_PER_WORD. */
688 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
690 cmp2 = CONST0_RTX (op1_mode);
692 superword_op1 = cmp1;
697 /* If we can compute the condition at compile time, pick the
698 appropriate subroutine. */
699 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
700 if (tmp != 0 && GET_CODE (tmp) == CONST_INT)
702 if (tmp == const0_rtx)
703 return expand_superword_shift (binoptab, outof_input, superword_op1,
704 outof_target, into_target,
707 return expand_subword_shift (op1_mode, binoptab,
708 outof_input, into_input, op1,
709 outof_target, into_target,
710 unsignedp, methods, shift_mask);
713 #ifdef HAVE_conditional_move
714 /* Try using conditional moves to generate straight-line code. */
716 rtx start = get_last_insn ();
717 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
718 cmp_code, cmp1, cmp2,
719 outof_input, into_input,
721 outof_target, into_target,
722 unsignedp, methods, shift_mask))
724 delete_insns_since (start);
728 /* As a last resort, use branches to select the correct alternative. */
729 subword_label = gen_label_rtx ();
730 done_label = gen_label_rtx ();
732 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
733 0, 0, subword_label);
735 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
736 outof_target, into_target,
740 emit_jump_insn (gen_jump (done_label));
742 emit_label (subword_label);
744 if (!expand_subword_shift (op1_mode, binoptab,
745 outof_input, into_input, op1,
746 outof_target, into_target,
747 unsignedp, methods, shift_mask))
750 emit_label (done_label);
754 /* Subroutine of expand_binop. Perform a double word multiplication of
755 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
756 as the target's word_mode. This function return NULL_RTX if anything
757 goes wrong, in which case it may have already emitted instructions
758 which need to be deleted.
760 If we want to multiply two two-word values and have normal and widening
761 multiplies of single-word values, we can do this with three smaller
762 multiplications. Note that we do not make a REG_NO_CONFLICT block here
763 because we are not operating on one word at a time.
765 The multiplication proceeds as follows:
766 _______________________
767 [__op0_high_|__op0_low__]
768 _______________________
769 * [__op1_high_|__op1_low__]
770 _______________________________________________
771 _______________________
772 (1) [__op0_low__*__op1_low__]
773 _______________________
774 (2a) [__op0_low__*__op1_high_]
775 _______________________
776 (2b) [__op0_high_*__op1_low__]
777 _______________________
778 (3) [__op0_high_*__op1_high_]
781 This gives a 4-word result. Since we are only interested in the
782 lower 2 words, partial result (3) and the upper words of (2a) and
783 (2b) don't need to be calculated. Hence (2a) and (2b) can be
784 calculated using non-widening multiplication.
786 (1), however, needs to be calculated with an unsigned widening
787 multiplication. If this operation is not directly supported we
788 try using a signed widening multiplication and adjust the result.
789 This adjustment works as follows:
791 If both operands are positive then no adjustment is needed.
793 If the operands have different signs, for example op0_low < 0 and
794 op1_low >= 0, the instruction treats the most significant bit of
795 op0_low as a sign bit instead of a bit with significance
796 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
797 with 2**BITS_PER_WORD - op0_low, and two's complements the
798 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
801 Similarly, if both operands are negative, we need to add
802 (op0_low + op1_low) * 2**BITS_PER_WORD.
804 We use a trick to adjust quickly. We logically shift op0_low right
805 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
806 op0_high (op1_high) before it is used to calculate 2b (2a). If no
807 logical shift exists, we do an arithmetic right shift and subtract
811 expand_doubleword_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
812 bool umulp, enum optab_methods methods)
814 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
815 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
816 rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
817 rtx product, adjust, product_high, temp;
819 rtx op0_high = operand_subword_force (op0, high, mode);
820 rtx op0_low = operand_subword_force (op0, low, mode);
821 rtx op1_high = operand_subword_force (op1, high, mode);
822 rtx op1_low = operand_subword_force (op1, low, mode);
824 /* If we're using an unsigned multiply to directly compute the product
825 of the low-order words of the operands and perform any required
826 adjustments of the operands, we begin by trying two more multiplications
827 and then computing the appropriate sum.
829 We have checked above that the required addition is provided.
830 Full-word addition will normally always succeed, especially if
831 it is provided at all, so we don't worry about its failure. The
832 multiplication may well fail, however, so we do handle that. */
836 /* ??? This could be done with emit_store_flag where available. */
837 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
838 NULL_RTX, 1, methods);
840 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
841 NULL_RTX, 0, OPTAB_DIRECT);
844 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
845 NULL_RTX, 0, methods);
848 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
849 NULL_RTX, 0, OPTAB_DIRECT);
856 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
857 NULL_RTX, 0, OPTAB_DIRECT);
861 /* OP0_HIGH should now be dead. */
865 /* ??? This could be done with emit_store_flag where available. */
866 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
867 NULL_RTX, 1, methods);
869 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
870 NULL_RTX, 0, OPTAB_DIRECT);
873 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
874 NULL_RTX, 0, methods);
877 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
878 NULL_RTX, 0, OPTAB_DIRECT);
885 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
886 NULL_RTX, 0, OPTAB_DIRECT);
890 /* OP1_HIGH should now be dead. */
892 adjust = expand_binop (word_mode, add_optab, adjust, temp,
893 adjust, 0, OPTAB_DIRECT);
895 if (target && !REG_P (target))
899 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
900 target, 1, OPTAB_DIRECT);
902 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
903 target, 1, OPTAB_DIRECT);
908 product_high = operand_subword (product, high, 1, mode);
909 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
910 REG_P (product_high) ? product_high : adjust,
912 emit_move_insn (product_high, adjust);
916 /* Wrapper around expand_binop which takes an rtx code to specify
917 the operation to perform, not an optab pointer. All other
918 arguments are the same. */
920 expand_simple_binop (enum machine_mode mode, enum rtx_code code, rtx op0,
921 rtx op1, rtx target, int unsignedp,
922 enum optab_methods methods)
924 optab binop = code_to_optab[(int) code];
927 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
930 /* Generate code to perform an operation specified by BINOPTAB
931 on operands OP0 and OP1, with result having machine-mode MODE.
933 UNSIGNEDP is for the case where we have to widen the operands
934 to perform the operation. It says to use zero-extension.
936 If TARGET is nonzero, the value
937 is generated there, if it is convenient to do so.
938 In all cases an rtx is returned for the locus of the value;
939 this may or may not be TARGET. */
942 expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1,
943 rtx target, int unsignedp, enum optab_methods methods)
945 enum optab_methods next_methods
946 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
947 ? OPTAB_WIDEN : methods);
948 enum mode_class class;
949 enum machine_mode wider_mode;
951 int commutative_op = 0;
952 int shift_op = (binoptab->code == ASHIFT
953 || binoptab->code == ASHIFTRT
954 || binoptab->code == LSHIFTRT
955 || binoptab->code == ROTATE
956 || binoptab->code == ROTATERT);
957 rtx entry_last = get_last_insn ();
960 class = GET_MODE_CLASS (mode);
964 /* Load duplicate non-volatile operands once. */
965 if (rtx_equal_p (op0, op1) && ! volatile_refs_p (op0))
967 op0 = force_not_mem (op0);
972 op0 = force_not_mem (op0);
973 op1 = force_not_mem (op1);
977 /* If subtracting an integer constant, convert this into an addition of
978 the negated constant. */
980 if (binoptab == sub_optab && GET_CODE (op1) == CONST_INT)
982 op1 = negate_rtx (mode, op1);
983 binoptab = add_optab;
986 /* If we are inside an appropriately-short loop and we are optimizing,
987 force expensive constants into a register. */
988 if (CONSTANT_P (op0) && optimize
989 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
991 if (GET_MODE (op0) != VOIDmode)
992 op0 = convert_modes (mode, VOIDmode, op0, unsignedp);
993 op0 = force_reg (mode, op0);
996 if (CONSTANT_P (op1) && optimize
997 && ! shift_op && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
999 if (GET_MODE (op1) != VOIDmode)
1000 op1 = convert_modes (mode, VOIDmode, op1, unsignedp);
1001 op1 = force_reg (mode, op1);
1004 /* Record where to delete back to if we backtrack. */
1005 last = get_last_insn ();
1007 /* If operation is commutative,
1008 try to make the first operand a register.
1009 Even better, try to make it the same as the target.
1010 Also try to make the last operand a constant. */
1011 if (GET_RTX_CLASS (binoptab->code) == RTX_COMM_ARITH
1012 || binoptab == smul_widen_optab
1013 || binoptab == umul_widen_optab
1014 || binoptab == smul_highpart_optab
1015 || binoptab == umul_highpart_optab)
1019 if (((target == 0 || REG_P (target))
1023 : rtx_equal_p (op1, target))
1024 || GET_CODE (op0) == CONST_INT)
1032 /* If we can do it with a three-operand insn, do so. */
1034 if (methods != OPTAB_MUST_WIDEN
1035 && binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1037 int icode = (int) binoptab->handlers[(int) mode].insn_code;
1038 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
1039 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
1041 rtx xop0 = op0, xop1 = op1;
1046 temp = gen_reg_rtx (mode);
1048 /* If it is a commutative operator and the modes would match
1049 if we would swap the operands, we can save the conversions. */
1052 if (GET_MODE (op0) != mode0 && GET_MODE (op1) != mode1
1053 && GET_MODE (op0) == mode1 && GET_MODE (op1) == mode0)
1057 tmp = op0; op0 = op1; op1 = tmp;
1058 tmp = xop0; xop0 = xop1; xop1 = tmp;
1062 /* In case the insn wants input operands in modes different from
1063 those of the actual operands, convert the operands. It would
1064 seem that we don't need to convert CONST_INTs, but we do, so
1065 that they're properly zero-extended, sign-extended or truncated
1068 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
1069 xop0 = convert_modes (mode0,
1070 GET_MODE (op0) != VOIDmode
1075 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
1076 xop1 = convert_modes (mode1,
1077 GET_MODE (op1) != VOIDmode
1082 /* Now, if insn's predicates don't allow our operands, put them into
1085 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
1086 && mode0 != VOIDmode)
1087 xop0 = copy_to_mode_reg (mode0, xop0);
1089 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
1090 && mode1 != VOIDmode)
1091 xop1 = copy_to_mode_reg (mode1, xop1);
1093 if (!insn_data[icode].operand[0].predicate (temp, mode))
1094 temp = gen_reg_rtx (mode);
1096 pat = GEN_FCN (icode) (temp, xop0, xop1);
1099 /* If PAT is composed of more than one insn, try to add an appropriate
1100 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1101 operand, call ourselves again, this time without a target. */
1102 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1103 && ! add_equal_note (pat, temp, binoptab->code, xop0, xop1))
1105 delete_insns_since (last);
1106 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1107 unsignedp, methods);
1114 delete_insns_since (last);
1117 /* If this is a multiply, see if we can do a widening operation that
1118 takes operands of this mode and makes a wider mode. */
1120 if (binoptab == smul_optab && GET_MODE_WIDER_MODE (mode) != VOIDmode
1121 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1122 ->handlers[(int) GET_MODE_WIDER_MODE (mode)].insn_code)
1123 != CODE_FOR_nothing))
1125 temp = expand_binop (GET_MODE_WIDER_MODE (mode),
1126 unsignedp ? umul_widen_optab : smul_widen_optab,
1127 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1131 if (GET_MODE_CLASS (mode) == MODE_INT)
1132 return gen_lowpart (mode, temp);
1134 return convert_to_mode (mode, temp, unsignedp);
1138 /* Look for a wider mode of the same class for which we think we
1139 can open-code the operation. Check for a widening multiply at the
1140 wider mode as well. */
1142 if ((class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
1143 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1144 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
1145 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1147 if (binoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
1148 || (binoptab == smul_optab
1149 && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
1150 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1151 ->handlers[(int) GET_MODE_WIDER_MODE (wider_mode)].insn_code)
1152 != CODE_FOR_nothing)))
1154 rtx xop0 = op0, xop1 = op1;
1157 /* For certain integer operations, we need not actually extend
1158 the narrow operands, as long as we will truncate
1159 the results to the same narrowness. */
1161 if ((binoptab == ior_optab || binoptab == and_optab
1162 || binoptab == xor_optab
1163 || binoptab == add_optab || binoptab == sub_optab
1164 || binoptab == smul_optab || binoptab == ashl_optab)
1165 && class == MODE_INT)
1168 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1170 /* The second operand of a shift must always be extended. */
1171 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1172 no_extend && binoptab != ashl_optab);
1174 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1175 unsignedp, OPTAB_DIRECT);
1178 if (class != MODE_INT)
1181 target = gen_reg_rtx (mode);
1182 convert_move (target, temp, 0);
1186 return gen_lowpart (mode, temp);
1189 delete_insns_since (last);
1193 /* These can be done a word at a time. */
1194 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1195 && class == MODE_INT
1196 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
1197 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1203 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1204 won't be accurate, so use a new target. */
1205 if (target == 0 || target == op0 || target == op1)
1206 target = gen_reg_rtx (mode);
1210 /* Do the actual arithmetic. */
1211 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
1213 rtx target_piece = operand_subword (target, i, 1, mode);
1214 rtx x = expand_binop (word_mode, binoptab,
1215 operand_subword_force (op0, i, mode),
1216 operand_subword_force (op1, i, mode),
1217 target_piece, unsignedp, next_methods);
1222 if (target_piece != x)
1223 emit_move_insn (target_piece, x);
1226 insns = get_insns ();
1229 if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
1231 if (binoptab->code != UNKNOWN)
1233 = gen_rtx_fmt_ee (binoptab->code, mode,
1234 copy_rtx (op0), copy_rtx (op1));
1238 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1243 /* Synthesize double word shifts from single word shifts. */
1244 if ((binoptab == lshr_optab || binoptab == ashl_optab
1245 || binoptab == ashr_optab)
1246 && class == MODE_INT
1247 && (GET_CODE (op1) == CONST_INT || !optimize_size)
1248 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1249 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1250 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1251 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1253 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1254 enum machine_mode op1_mode;
1256 double_shift_mask = targetm.shift_truncation_mask (mode);
1257 shift_mask = targetm.shift_truncation_mask (word_mode);
1258 op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode;
1260 /* Apply the truncation to constant shifts. */
1261 if (double_shift_mask > 0 && GET_CODE (op1) == CONST_INT)
1262 op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
1264 if (op1 == CONST0_RTX (op1_mode))
1267 /* Make sure that this is a combination that expand_doubleword_shift
1268 can handle. See the comments there for details. */
1269 if (double_shift_mask == 0
1270 || (shift_mask == BITS_PER_WORD - 1
1271 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1273 rtx insns, equiv_value;
1274 rtx into_target, outof_target;
1275 rtx into_input, outof_input;
1276 int left_shift, outof_word;
1278 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1279 won't be accurate, so use a new target. */
1280 if (target == 0 || target == op0 || target == op1)
1281 target = gen_reg_rtx (mode);
1285 /* OUTOF_* is the word we are shifting bits away from, and
1286 INTO_* is the word that we are shifting bits towards, thus
1287 they differ depending on the direction of the shift and
1288 WORDS_BIG_ENDIAN. */
1290 left_shift = binoptab == ashl_optab;
1291 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1293 outof_target = operand_subword (target, outof_word, 1, mode);
1294 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1296 outof_input = operand_subword_force (op0, outof_word, mode);
1297 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1299 if (expand_doubleword_shift (op1_mode, binoptab,
1300 outof_input, into_input, op1,
1301 outof_target, into_target,
1302 unsignedp, methods, shift_mask))
1304 insns = get_insns ();
1307 equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, op0, op1);
1308 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1315 /* Synthesize double word rotates from single word shifts. */
1316 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1317 && class == MODE_INT
1318 && GET_CODE (op1) == CONST_INT
1319 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1320 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1321 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1323 rtx insns, equiv_value;
1324 rtx into_target, outof_target;
1325 rtx into_input, outof_input;
1327 int shift_count, left_shift, outof_word;
1329 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1330 won't be accurate, so use a new target. Do this also if target is not
1331 a REG, first because having a register instead may open optimization
1332 opportunities, and second because if target and op0 happen to be MEMs
1333 designating the same location, we would risk clobbering it too early
1334 in the code sequence we generate below. */
1335 if (target == 0 || target == op0 || target == op1 || ! REG_P (target))
1336 target = gen_reg_rtx (mode);
1340 shift_count = INTVAL (op1);
1342 /* OUTOF_* is the word we are shifting bits away from, and
1343 INTO_* is the word that we are shifting bits towards, thus
1344 they differ depending on the direction of the shift and
1345 WORDS_BIG_ENDIAN. */
1347 left_shift = (binoptab == rotl_optab);
1348 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1350 outof_target = operand_subword (target, outof_word, 1, mode);
1351 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1353 outof_input = operand_subword_force (op0, outof_word, mode);
1354 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1356 if (shift_count == BITS_PER_WORD)
1358 /* This is just a word swap. */
1359 emit_move_insn (outof_target, into_input);
1360 emit_move_insn (into_target, outof_input);
1365 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1366 rtx first_shift_count, second_shift_count;
1367 optab reverse_unsigned_shift, unsigned_shift;
1369 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1370 ? lshr_optab : ashl_optab);
1372 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1373 ? ashl_optab : lshr_optab);
1375 if (shift_count > BITS_PER_WORD)
1377 first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1378 second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1382 first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1383 second_shift_count = GEN_INT (shift_count);
1386 into_temp1 = expand_binop (word_mode, unsigned_shift,
1387 outof_input, first_shift_count,
1388 NULL_RTX, unsignedp, next_methods);
1389 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1390 into_input, second_shift_count,
1391 NULL_RTX, unsignedp, next_methods);
1393 if (into_temp1 != 0 && into_temp2 != 0)
1394 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1395 into_target, unsignedp, next_methods);
1399 if (inter != 0 && inter != into_target)
1400 emit_move_insn (into_target, inter);
1402 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1403 into_input, first_shift_count,
1404 NULL_RTX, unsignedp, next_methods);
1405 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1406 outof_input, second_shift_count,
1407 NULL_RTX, unsignedp, next_methods);
1409 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1410 inter = expand_binop (word_mode, ior_optab,
1411 outof_temp1, outof_temp2,
1412 outof_target, unsignedp, next_methods);
1414 if (inter != 0 && inter != outof_target)
1415 emit_move_insn (outof_target, inter);
1418 insns = get_insns ();
1423 if (binoptab->code != UNKNOWN)
1424 equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, op0, op1);
1428 /* We can't make this a no conflict block if this is a word swap,
1429 because the word swap case fails if the input and output values
1430 are in the same register. */
1431 if (shift_count != BITS_PER_WORD)
1432 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1441 /* These can be done a word at a time by propagating carries. */
1442 if ((binoptab == add_optab || binoptab == sub_optab)
1443 && class == MODE_INT
1444 && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
1445 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1448 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1449 const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
1450 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1451 rtx xop0, xop1, xtarget;
1453 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1454 value is one of those, use it. Otherwise, use 1 since it is the
1455 one easiest to get. */
1456 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1457 int normalizep = STORE_FLAG_VALUE;
1462 /* Prepare the operands. */
1463 xop0 = force_reg (mode, op0);
1464 xop1 = force_reg (mode, op1);
1466 xtarget = gen_reg_rtx (mode);
1468 if (target == 0 || !REG_P (target))
1471 /* Indicate for flow that the entire target reg is being set. */
1473 emit_insn (gen_rtx_CLOBBER (VOIDmode, xtarget));
1475 /* Do the actual arithmetic. */
1476 for (i = 0; i < nwords; i++)
1478 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1479 rtx target_piece = operand_subword (xtarget, index, 1, mode);
1480 rtx op0_piece = operand_subword_force (xop0, index, mode);
1481 rtx op1_piece = operand_subword_force (xop1, index, mode);
1484 /* Main add/subtract of the input operands. */
1485 x = expand_binop (word_mode, binoptab,
1486 op0_piece, op1_piece,
1487 target_piece, unsignedp, next_methods);
1493 /* Store carry from main add/subtract. */
1494 carry_out = gen_reg_rtx (word_mode);
1495 carry_out = emit_store_flag_force (carry_out,
1496 (binoptab == add_optab
1499 word_mode, 1, normalizep);
1506 /* Add/subtract previous carry to main result. */
1507 newx = expand_binop (word_mode,
1508 normalizep == 1 ? binoptab : otheroptab,
1510 NULL_RTX, 1, next_methods);
1514 /* Get out carry from adding/subtracting carry in. */
1515 rtx carry_tmp = gen_reg_rtx (word_mode);
1516 carry_tmp = emit_store_flag_force (carry_tmp,
1517 (binoptab == add_optab
1520 word_mode, 1, normalizep);
1522 /* Logical-ior the two poss. carry together. */
1523 carry_out = expand_binop (word_mode, ior_optab,
1524 carry_out, carry_tmp,
1525 carry_out, 0, next_methods);
1529 emit_move_insn (target_piece, newx);
1533 if (x != target_piece)
1534 emit_move_insn (target_piece, x);
1537 carry_in = carry_out;
1540 if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
1542 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
1543 || ! rtx_equal_p (target, xtarget))
1545 rtx temp = emit_move_insn (target, xtarget);
1547 set_unique_reg_note (temp,
1549 gen_rtx_fmt_ee (binoptab->code, mode,
1560 delete_insns_since (last);
1563 /* Attempt to synthesize double word multiplies using a sequence of word
1564 mode multiplications. We first attempt to generate a sequence using a
1565 more efficient unsigned widening multiply, and if that fails we then
1566 try using a signed widening multiply. */
1568 if (binoptab == smul_optab
1569 && class == MODE_INT
1570 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1571 && smul_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1572 && add_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1574 rtx product = NULL_RTX;
1576 if (umul_widen_optab->handlers[(int) mode].insn_code
1577 != CODE_FOR_nothing)
1579 product = expand_doubleword_mult (mode, op0, op1, target,
1582 delete_insns_since (last);
1585 if (product == NULL_RTX
1586 && smul_widen_optab->handlers[(int) mode].insn_code
1587 != CODE_FOR_nothing)
1589 product = expand_doubleword_mult (mode, op0, op1, target,
1592 delete_insns_since (last);
1595 if (product != NULL_RTX)
1597 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1599 temp = emit_move_insn (target ? target : product, product);
1600 set_unique_reg_note (temp,
1602 gen_rtx_fmt_ee (MULT, mode,
1610 /* It can't be open-coded in this mode.
1611 Use a library call if one is available and caller says that's ok. */
1613 if (binoptab->handlers[(int) mode].libfunc
1614 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
1618 enum machine_mode op1_mode = mode;
1625 op1_mode = word_mode;
1626 /* Specify unsigned here,
1627 since negative shift counts are meaningless. */
1628 op1x = convert_to_mode (word_mode, op1, 1);
1631 if (GET_MODE (op0) != VOIDmode
1632 && GET_MODE (op0) != mode)
1633 op0 = convert_to_mode (mode, op0, unsignedp);
1635 /* Pass 1 for NO_QUEUE so we don't lose any increments
1636 if the libcall is cse'd or moved. */
1637 value = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
1638 NULL_RTX, LCT_CONST, mode, 2,
1639 op0, mode, op1x, op1_mode);
1641 insns = get_insns ();
1644 target = gen_reg_rtx (mode);
1645 emit_libcall_block (insns, target, value,
1646 gen_rtx_fmt_ee (binoptab->code, mode, op0, op1));
1651 delete_insns_since (last);
1653 /* It can't be done in this mode. Can we do it in a wider mode? */
1655 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
1656 || methods == OPTAB_MUST_WIDEN))
1658 /* Caller says, don't even try. */
1659 delete_insns_since (entry_last);
1663 /* Compute the value of METHODS to pass to recursive calls.
1664 Don't allow widening to be tried recursively. */
1666 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
1668 /* Look for a wider mode of the same class for which it appears we can do
1671 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
1673 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
1674 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1676 if ((binoptab->handlers[(int) wider_mode].insn_code
1677 != CODE_FOR_nothing)
1678 || (methods == OPTAB_LIB
1679 && binoptab->handlers[(int) wider_mode].libfunc))
1681 rtx xop0 = op0, xop1 = op1;
1684 /* For certain integer operations, we need not actually extend
1685 the narrow operands, as long as we will truncate
1686 the results to the same narrowness. */
1688 if ((binoptab == ior_optab || binoptab == and_optab
1689 || binoptab == xor_optab
1690 || binoptab == add_optab || binoptab == sub_optab
1691 || binoptab == smul_optab || binoptab == ashl_optab)
1692 && class == MODE_INT)
1695 xop0 = widen_operand (xop0, wider_mode, mode,
1696 unsignedp, no_extend);
1698 /* The second operand of a shift must always be extended. */
1699 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1700 no_extend && binoptab != ashl_optab);
1702 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1703 unsignedp, methods);
1706 if (class != MODE_INT)
1709 target = gen_reg_rtx (mode);
1710 convert_move (target, temp, 0);
1714 return gen_lowpart (mode, temp);
1717 delete_insns_since (last);
1722 delete_insns_since (entry_last);
1726 /* Expand a binary operator which has both signed and unsigned forms.
1727 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1730 If we widen unsigned operands, we may use a signed wider operation instead
1731 of an unsigned wider operation, since the result would be the same. */
1734 sign_expand_binop (enum machine_mode mode, optab uoptab, optab soptab,
1735 rtx op0, rtx op1, rtx target, int unsignedp,
1736 enum optab_methods methods)
1739 optab direct_optab = unsignedp ? uoptab : soptab;
1740 struct optab wide_soptab;
1742 /* Do it without widening, if possible. */
1743 temp = expand_binop (mode, direct_optab, op0, op1, target,
1744 unsignedp, OPTAB_DIRECT);
1745 if (temp || methods == OPTAB_DIRECT)
1748 /* Try widening to a signed int. Make a fake signed optab that
1749 hides any signed insn for direct use. */
1750 wide_soptab = *soptab;
1751 wide_soptab.handlers[(int) mode].insn_code = CODE_FOR_nothing;
1752 wide_soptab.handlers[(int) mode].libfunc = 0;
1754 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
1755 unsignedp, OPTAB_WIDEN);
1757 /* For unsigned operands, try widening to an unsigned int. */
1758 if (temp == 0 && unsignedp)
1759 temp = expand_binop (mode, uoptab, op0, op1, target,
1760 unsignedp, OPTAB_WIDEN);
1761 if (temp || methods == OPTAB_WIDEN)
1764 /* Use the right width lib call if that exists. */
1765 temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_LIB);
1766 if (temp || methods == OPTAB_LIB)
1769 /* Must widen and use a lib call, use either signed or unsigned. */
1770 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
1771 unsignedp, methods);
1775 return expand_binop (mode, uoptab, op0, op1, target,
1776 unsignedp, methods);
1780 /* Generate code to perform an operation specified by UNOPPTAB
1781 on operand OP0, with two results to TARG0 and TARG1.
1782 We assume that the order of the operands for the instruction
1783 is TARG0, TARG1, OP0.
1785 Either TARG0 or TARG1 may be zero, but what that means is that
1786 the result is not actually wanted. We will generate it into
1787 a dummy pseudo-reg and discard it. They may not both be zero.
1789 Returns 1 if this operation can be performed; 0 if not. */
1792 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
1795 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
1796 enum mode_class class;
1797 enum machine_mode wider_mode;
1798 rtx entry_last = get_last_insn ();
1801 class = GET_MODE_CLASS (mode);
1804 op0 = force_not_mem (op0);
1807 targ0 = gen_reg_rtx (mode);
1809 targ1 = gen_reg_rtx (mode);
1811 /* Record where to go back to if we fail. */
1812 last = get_last_insn ();
1814 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1816 int icode = (int) unoptab->handlers[(int) mode].insn_code;
1817 enum machine_mode mode0 = insn_data[icode].operand[2].mode;
1821 if (GET_MODE (xop0) != VOIDmode
1822 && GET_MODE (xop0) != mode0)
1823 xop0 = convert_to_mode (mode0, xop0, unsignedp);
1825 /* Now, if insn doesn't accept these operands, put them into pseudos. */
1826 if (!insn_data[icode].operand[2].predicate (xop0, mode0))
1827 xop0 = copy_to_mode_reg (mode0, xop0);
1829 /* We could handle this, but we should always be called with a pseudo
1830 for our targets and all insns should take them as outputs. */
1831 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
1832 gcc_assert (insn_data[icode].operand[1].predicate (targ1, mode));
1834 pat = GEN_FCN (icode) (targ0, targ1, xop0);
1841 delete_insns_since (last);
1844 /* It can't be done in this mode. Can we do it in a wider mode? */
1846 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
1848 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
1849 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1851 if (unoptab->handlers[(int) wider_mode].insn_code
1852 != CODE_FOR_nothing)
1854 rtx t0 = gen_reg_rtx (wider_mode);
1855 rtx t1 = gen_reg_rtx (wider_mode);
1856 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
1858 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
1860 convert_move (targ0, t0, unsignedp);
1861 convert_move (targ1, t1, unsignedp);
1865 delete_insns_since (last);
1870 delete_insns_since (entry_last);
1874 /* Generate code to perform an operation specified by BINOPTAB
1875 on operands OP0 and OP1, with two results to TARG1 and TARG2.
1876 We assume that the order of the operands for the instruction
1877 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
1878 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
1880 Either TARG0 or TARG1 may be zero, but what that means is that
1881 the result is not actually wanted. We will generate it into
1882 a dummy pseudo-reg and discard it. They may not both be zero.
1884 Returns 1 if this operation can be performed; 0 if not. */
1887 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
1890 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
1891 enum mode_class class;
1892 enum machine_mode wider_mode;
1893 rtx entry_last = get_last_insn ();
1896 class = GET_MODE_CLASS (mode);
1900 op0 = force_not_mem (op0);
1901 op1 = force_not_mem (op1);
1904 /* If we are inside an appropriately-short loop and we are optimizing,
1905 force expensive constants into a register. */
1906 if (CONSTANT_P (op0) && optimize
1907 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
1908 op0 = force_reg (mode, op0);
1910 if (CONSTANT_P (op1) && optimize
1911 && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
1912 op1 = force_reg (mode, op1);
1915 targ0 = gen_reg_rtx (mode);
1917 targ1 = gen_reg_rtx (mode);
1919 /* Record where to go back to if we fail. */
1920 last = get_last_insn ();
1922 if (binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1924 int icode = (int) binoptab->handlers[(int) mode].insn_code;
1925 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
1926 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
1928 rtx xop0 = op0, xop1 = op1;
1930 /* In case the insn wants input operands in modes different from
1931 those of the actual operands, convert the operands. It would
1932 seem that we don't need to convert CONST_INTs, but we do, so
1933 that they're properly zero-extended, sign-extended or truncated
1936 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
1937 xop0 = convert_modes (mode0,
1938 GET_MODE (op0) != VOIDmode
1943 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
1944 xop1 = convert_modes (mode1,
1945 GET_MODE (op1) != VOIDmode
1950 /* Now, if insn doesn't accept these operands, put them into pseudos. */
1951 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
1952 xop0 = copy_to_mode_reg (mode0, xop0);
1954 if (!insn_data[icode].operand[2].predicate (xop1, mode1))
1955 xop1 = copy_to_mode_reg (mode1, xop1);
1957 /* We could handle this, but we should always be called with a pseudo
1958 for our targets and all insns should take them as outputs. */
1959 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
1960 gcc_assert (insn_data[icode].operand[3].predicate (targ1, mode));
1962 pat = GEN_FCN (icode) (targ0, xop0, xop1, targ1);
1969 delete_insns_since (last);
1972 /* It can't be done in this mode. Can we do it in a wider mode? */
1974 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
1976 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
1977 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1979 if (binoptab->handlers[(int) wider_mode].insn_code
1980 != CODE_FOR_nothing)
1982 rtx t0 = gen_reg_rtx (wider_mode);
1983 rtx t1 = gen_reg_rtx (wider_mode);
1984 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
1985 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
1987 if (expand_twoval_binop (binoptab, cop0, cop1,
1990 convert_move (targ0, t0, unsignedp);
1991 convert_move (targ1, t1, unsignedp);
1995 delete_insns_since (last);
2000 delete_insns_since (entry_last);
2004 /* Expand the two-valued library call indicated by BINOPTAB, but
2005 preserve only one of the values. If TARG0 is non-NULL, the first
2006 value is placed into TARG0; otherwise the second value is placed
2007 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2008 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2009 This routine assumes that the value returned by the library call is
2010 as if the return value was of an integral mode twice as wide as the
2011 mode of OP0. Returns 1 if the call was successful. */
2014 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2015 rtx targ0, rtx targ1, enum rtx_code code)
2017 enum machine_mode mode;
2018 enum machine_mode libval_mode;
2022 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2023 gcc_assert (!targ0 != !targ1);
2025 mode = GET_MODE (op0);
2026 if (!binoptab->handlers[(int) mode].libfunc)
2029 /* The value returned by the library function will have twice as
2030 many bits as the nominal MODE. */
2031 libval_mode = smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode),
2034 libval = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
2035 NULL_RTX, LCT_CONST,
2039 /* Get the part of VAL containing the value that we want. */
2040 libval = simplify_gen_subreg (mode, libval, libval_mode,
2041 targ0 ? 0 : GET_MODE_SIZE (mode));
2042 insns = get_insns ();
2044 /* Move the into the desired location. */
2045 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2046 gen_rtx_fmt_ee (code, mode, op0, op1));
2052 /* Wrapper around expand_unop which takes an rtx code to specify
2053 the operation to perform, not an optab pointer. All other
2054 arguments are the same. */
2056 expand_simple_unop (enum machine_mode mode, enum rtx_code code, rtx op0,
2057 rtx target, int unsignedp)
2059 optab unop = code_to_optab[(int) code];
2062 return expand_unop (mode, unop, op0, target, unsignedp);
2068 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2070 widen_clz (enum machine_mode mode, rtx op0, rtx target)
2072 enum mode_class class = GET_MODE_CLASS (mode);
2073 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2075 enum machine_mode wider_mode;
2076 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
2077 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2079 if (clz_optab->handlers[(int) wider_mode].insn_code
2080 != CODE_FOR_nothing)
2082 rtx xop0, temp, last;
2084 last = get_last_insn ();
2087 target = gen_reg_rtx (mode);
2088 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2089 temp = expand_unop (wider_mode, clz_optab, xop0, NULL_RTX, true);
2091 temp = expand_binop (wider_mode, sub_optab, temp,
2092 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2093 - GET_MODE_BITSIZE (mode)),
2094 target, true, OPTAB_DIRECT);
2096 delete_insns_since (last);
2105 /* Try calculating (parity x) as (and (popcount x) 1), where
2106 popcount can also be done in a wider mode. */
2108 expand_parity (enum machine_mode mode, rtx op0, rtx target)
2110 enum mode_class class = GET_MODE_CLASS (mode);
2111 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2113 enum machine_mode wider_mode;
2114 for (wider_mode = mode; wider_mode != VOIDmode;
2115 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2117 if (popcount_optab->handlers[(int) wider_mode].insn_code
2118 != CODE_FOR_nothing)
2120 rtx xop0, temp, last;
2122 last = get_last_insn ();
2125 target = gen_reg_rtx (mode);
2126 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2127 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2130 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2131 target, true, OPTAB_DIRECT);
2133 delete_insns_since (last);
2142 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2143 conditions, VAL may already be a SUBREG against which we cannot generate
2144 a further SUBREG. In this case, we expect forcing the value into a
2145 register will work around the situation. */
2148 lowpart_subreg_maybe_copy (enum machine_mode omode, rtx val,
2149 enum machine_mode imode)
2152 ret = lowpart_subreg (omode, val, imode);
2155 val = force_reg (imode, val);
2156 ret = lowpart_subreg (omode, val, imode);
2157 gcc_assert (ret != NULL);
2162 /* Expand a floating point absolute value or negation operation via a
2163 logical operation on the sign bit. */
2166 expand_absneg_bit (enum rtx_code code, enum machine_mode mode,
2167 rtx op0, rtx target)
2169 const struct real_format *fmt;
2170 int bitpos, word, nwords, i;
2171 enum machine_mode imode;
2172 HOST_WIDE_INT hi, lo;
2175 /* The format has to have a simple sign bit. */
2176 fmt = REAL_MODE_FORMAT (mode);
2180 bitpos = fmt->signbit_rw;
2184 /* Don't create negative zeros if the format doesn't support them. */
2185 if (code == NEG && !fmt->has_signed_zero)
2188 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2190 imode = int_mode_for_mode (mode);
2191 if (imode == BLKmode)
2200 if (FLOAT_WORDS_BIG_ENDIAN)
2201 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2203 word = bitpos / BITS_PER_WORD;
2204 bitpos = bitpos % BITS_PER_WORD;
2205 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2208 if (bitpos < HOST_BITS_PER_WIDE_INT)
2211 lo = (HOST_WIDE_INT) 1 << bitpos;
2215 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2221 if (target == 0 || target == op0)
2222 target = gen_reg_rtx (mode);
2228 for (i = 0; i < nwords; ++i)
2230 rtx targ_piece = operand_subword (target, i, 1, mode);
2231 rtx op0_piece = operand_subword_force (op0, i, mode);
2235 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2237 immed_double_const (lo, hi, imode),
2238 targ_piece, 1, OPTAB_LIB_WIDEN);
2239 if (temp != targ_piece)
2240 emit_move_insn (targ_piece, temp);
2243 emit_move_insn (targ_piece, op0_piece);
2246 insns = get_insns ();
2249 temp = gen_rtx_fmt_e (code, mode, copy_rtx (op0));
2250 emit_no_conflict_block (insns, target, op0, NULL_RTX, temp);
2254 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2255 gen_lowpart (imode, op0),
2256 immed_double_const (lo, hi, imode),
2257 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2258 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2260 set_unique_reg_note (get_last_insn (), REG_EQUAL,
2261 gen_rtx_fmt_e (code, mode, copy_rtx (op0)));
2267 /* Generate code to perform an operation specified by UNOPTAB
2268 on operand OP0, with result having machine-mode MODE.
2270 UNSIGNEDP is for the case where we have to widen the operands
2271 to perform the operation. It says to use zero-extension.
2273 If TARGET is nonzero, the value
2274 is generated there, if it is convenient to do so.
2275 In all cases an rtx is returned for the locus of the value;
2276 this may or may not be TARGET. */
2279 expand_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
2282 enum mode_class class;
2283 enum machine_mode wider_mode;
2285 rtx last = get_last_insn ();
2288 class = GET_MODE_CLASS (mode);
2291 op0 = force_not_mem (op0);
2293 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2295 int icode = (int) unoptab->handlers[(int) mode].insn_code;
2296 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2302 temp = gen_reg_rtx (mode);
2304 if (GET_MODE (xop0) != VOIDmode
2305 && GET_MODE (xop0) != mode0)
2306 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2308 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2310 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2311 xop0 = copy_to_mode_reg (mode0, xop0);
2313 if (!insn_data[icode].operand[0].predicate (temp, mode))
2314 temp = gen_reg_rtx (mode);
2316 pat = GEN_FCN (icode) (temp, xop0);
2319 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
2320 && ! add_equal_note (pat, temp, unoptab->code, xop0, NULL_RTX))
2322 delete_insns_since (last);
2323 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
2331 delete_insns_since (last);
2334 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2336 /* Widening clz needs special treatment. */
2337 if (unoptab == clz_optab)
2339 temp = widen_clz (mode, op0, target);
2346 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2347 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
2348 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2350 if (unoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing)
2354 /* For certain operations, we need not actually extend
2355 the narrow operand, as long as we will truncate the
2356 results to the same narrowness. */
2358 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2359 (unoptab == neg_optab
2360 || unoptab == one_cmpl_optab)
2361 && class == MODE_INT);
2363 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2368 if (class != MODE_INT)
2371 target = gen_reg_rtx (mode);
2372 convert_move (target, temp, 0);
2376 return gen_lowpart (mode, temp);
2379 delete_insns_since (last);
2383 /* These can be done a word at a time. */
2384 if (unoptab == one_cmpl_optab
2385 && class == MODE_INT
2386 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
2387 && unoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
2392 if (target == 0 || target == op0)
2393 target = gen_reg_rtx (mode);
2397 /* Do the actual arithmetic. */
2398 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
2400 rtx target_piece = operand_subword (target, i, 1, mode);
2401 rtx x = expand_unop (word_mode, unoptab,
2402 operand_subword_force (op0, i, mode),
2403 target_piece, unsignedp);
2405 if (target_piece != x)
2406 emit_move_insn (target_piece, x);
2409 insns = get_insns ();
2412 emit_no_conflict_block (insns, target, op0, NULL_RTX,
2413 gen_rtx_fmt_e (unoptab->code, mode,
2418 if (unoptab->code == NEG)
2420 /* Try negating floating point values by flipping the sign bit. */
2421 if (class == MODE_FLOAT)
2423 temp = expand_absneg_bit (NEG, mode, op0, target);
2428 /* If there is no negation pattern, and we have no negative zero,
2429 try subtracting from zero. */
2430 if (!HONOR_SIGNED_ZEROS (mode))
2432 temp = expand_binop (mode, (unoptab == negv_optab
2433 ? subv_optab : sub_optab),
2434 CONST0_RTX (mode), op0, target,
2435 unsignedp, OPTAB_DIRECT);
2441 /* Try calculating parity (x) as popcount (x) % 2. */
2442 if (unoptab == parity_optab)
2444 temp = expand_parity (mode, op0, target);
2450 /* Now try a library call in this mode. */
2451 if (unoptab->handlers[(int) mode].libfunc)
2455 enum machine_mode outmode = mode;
2457 /* All of these functions return small values. Thus we choose to
2458 have them return something that isn't a double-word. */
2459 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
2460 || unoptab == popcount_optab || unoptab == parity_optab)
2462 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node)));
2466 /* Pass 1 for NO_QUEUE so we don't lose any increments
2467 if the libcall is cse'd or moved. */
2468 value = emit_library_call_value (unoptab->handlers[(int) mode].libfunc,
2469 NULL_RTX, LCT_CONST, outmode,
2471 insns = get_insns ();
2474 target = gen_reg_rtx (outmode);
2475 emit_libcall_block (insns, target, value,
2476 gen_rtx_fmt_e (unoptab->code, mode, op0));
2481 /* It can't be done in this mode. Can we do it in a wider mode? */
2483 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2485 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
2486 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2488 if ((unoptab->handlers[(int) wider_mode].insn_code
2489 != CODE_FOR_nothing)
2490 || unoptab->handlers[(int) wider_mode].libfunc)
2494 /* For certain operations, we need not actually extend
2495 the narrow operand, as long as we will truncate the
2496 results to the same narrowness. */
2498 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2499 (unoptab == neg_optab
2500 || unoptab == one_cmpl_optab)
2501 && class == MODE_INT);
2503 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2506 /* If we are generating clz using wider mode, adjust the
2508 if (unoptab == clz_optab && temp != 0)
2509 temp = expand_binop (wider_mode, sub_optab, temp,
2510 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2511 - GET_MODE_BITSIZE (mode)),
2512 target, true, OPTAB_DIRECT);
2516 if (class != MODE_INT)
2519 target = gen_reg_rtx (mode);
2520 convert_move (target, temp, 0);
2524 return gen_lowpart (mode, temp);
2527 delete_insns_since (last);
2532 /* One final attempt at implementing negation via subtraction,
2533 this time allowing widening of the operand. */
2534 if (unoptab->code == NEG && !HONOR_SIGNED_ZEROS (mode))
2537 temp = expand_binop (mode,
2538 unoptab == negv_optab ? subv_optab : sub_optab,
2539 CONST0_RTX (mode), op0,
2540 target, unsignedp, OPTAB_LIB_WIDEN);
2548 /* Emit code to compute the absolute value of OP0, with result to
2549 TARGET if convenient. (TARGET may be 0.) The return value says
2550 where the result actually is to be found.
2552 MODE is the mode of the operand; the mode of the result is
2553 different but can be deduced from MODE.
2558 expand_abs_nojump (enum machine_mode mode, rtx op0, rtx target,
2559 int result_unsignedp)
2564 result_unsignedp = 1;
2566 /* First try to do it with a special abs instruction. */
2567 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
2572 /* For floating point modes, try clearing the sign bit. */
2573 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2575 temp = expand_absneg_bit (ABS, mode, op0, target);
2580 /* If we have a MAX insn, we can do this as MAX (x, -x). */
2581 if (smax_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
2582 && !HONOR_SIGNED_ZEROS (mode))
2584 rtx last = get_last_insn ();
2586 temp = expand_unop (mode, neg_optab, op0, NULL_RTX, 0);
2588 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
2594 delete_insns_since (last);
2597 /* If this machine has expensive jumps, we can do integer absolute
2598 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2599 where W is the width of MODE. */
2601 if (GET_MODE_CLASS (mode) == MODE_INT && BRANCH_COST >= 2)
2603 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
2604 size_int (GET_MODE_BITSIZE (mode) - 1),
2607 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
2610 temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab,
2611 temp, extended, target, 0, OPTAB_LIB_WIDEN);
2621 expand_abs (enum machine_mode mode, rtx op0, rtx target,
2622 int result_unsignedp, int safe)
2627 result_unsignedp = 1;
2629 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
2633 /* If that does not win, use conditional jump and negate. */
2635 /* It is safe to use the target if it is the same
2636 as the source if this is also a pseudo register */
2637 if (op0 == target && REG_P (op0)
2638 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
2641 op1 = gen_label_rtx ();
2642 if (target == 0 || ! safe
2643 || GET_MODE (target) != mode
2644 || (MEM_P (target) && MEM_VOLATILE_P (target))
2646 && REGNO (target) < FIRST_PSEUDO_REGISTER))
2647 target = gen_reg_rtx (mode);
2649 emit_move_insn (target, op0);
2652 /* If this mode is an integer too wide to compare properly,
2653 compare word by word. Rely on CSE to optimize constant cases. */
2654 if (GET_MODE_CLASS (mode) == MODE_INT
2655 && ! can_compare_p (GE, mode, ccp_jump))
2656 do_jump_by_parts_greater_rtx (mode, 0, target, const0_rtx,
2659 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
2660 NULL_RTX, NULL_RTX, op1);
2662 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
2665 emit_move_insn (target, op0);
2671 /* A subroutine of expand_copysign, perform the copysign operation using the
2672 abs and neg primitives advertised to exist on the target. The assumption
2673 is that we have a split register file, and leaving op0 in fp registers,
2674 and not playing with subregs so much, will help the register allocator. */
2677 expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target,
2678 int bitpos, bool op0_is_abs)
2680 enum machine_mode imode;
2681 HOST_WIDE_INT hi, lo;
2690 op0 = expand_unop (mode, abs_optab, op0, target, 0);
2697 if (target == NULL_RTX)
2698 target = copy_to_reg (op0);
2700 emit_move_insn (target, op0);
2703 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2705 imode = int_mode_for_mode (mode);
2706 if (imode == BLKmode)
2708 op1 = gen_lowpart (imode, op1);
2713 if (FLOAT_WORDS_BIG_ENDIAN)
2714 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2716 word = bitpos / BITS_PER_WORD;
2717 bitpos = bitpos % BITS_PER_WORD;
2718 op1 = operand_subword_force (op1, word, mode);
2721 if (bitpos < HOST_BITS_PER_WIDE_INT)
2724 lo = (HOST_WIDE_INT) 1 << bitpos;
2728 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2732 op1 = expand_binop (imode, and_optab, op1,
2733 immed_double_const (lo, hi, imode),
2734 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2736 label = gen_label_rtx ();
2737 emit_cmp_and_jump_insns (op1, const0_rtx, EQ, NULL_RTX, imode, 1, label);
2739 if (GET_CODE (op0) == CONST_DOUBLE)
2740 op0 = simplify_unary_operation (NEG, mode, op0, mode);
2742 op0 = expand_unop (mode, neg_optab, op0, target, 0);
2744 emit_move_insn (target, op0);
2752 /* A subroutine of expand_copysign, perform the entire copysign operation
2753 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
2754 is true if op0 is known to have its sign bit clear. */
2757 expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
2758 int bitpos, bool op0_is_abs)
2760 enum machine_mode imode;
2761 HOST_WIDE_INT hi, lo;
2762 int word, nwords, i;
2765 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2767 imode = int_mode_for_mode (mode);
2768 if (imode == BLKmode)
2777 if (FLOAT_WORDS_BIG_ENDIAN)
2778 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2780 word = bitpos / BITS_PER_WORD;
2781 bitpos = bitpos % BITS_PER_WORD;
2782 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2785 if (bitpos < HOST_BITS_PER_WIDE_INT)
2788 lo = (HOST_WIDE_INT) 1 << bitpos;
2792 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2796 if (target == 0 || target == op0 || target == op1)
2797 target = gen_reg_rtx (mode);
2803 for (i = 0; i < nwords; ++i)
2805 rtx targ_piece = operand_subword (target, i, 1, mode);
2806 rtx op0_piece = operand_subword_force (op0, i, mode);
2811 op0_piece = expand_binop (imode, and_optab, op0_piece,
2812 immed_double_const (~lo, ~hi, imode),
2813 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2815 op1 = expand_binop (imode, and_optab,
2816 operand_subword_force (op1, i, mode),
2817 immed_double_const (lo, hi, imode),
2818 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2820 temp = expand_binop (imode, ior_optab, op0_piece, op1,
2821 targ_piece, 1, OPTAB_LIB_WIDEN);
2822 if (temp != targ_piece)
2823 emit_move_insn (targ_piece, temp);
2826 emit_move_insn (targ_piece, op0_piece);
2829 insns = get_insns ();
2832 emit_no_conflict_block (insns, target, op0, op1, NULL_RTX);
2836 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
2837 immed_double_const (lo, hi, imode),
2838 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2840 op0 = gen_lowpart (imode, op0);
2842 op0 = expand_binop (imode, and_optab, op0,
2843 immed_double_const (~lo, ~hi, imode),
2844 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2846 temp = expand_binop (imode, ior_optab, op0, op1,
2847 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2848 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2854 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
2855 scalar floating point mode. Return NULL if we do not know how to
2856 expand the operation inline. */
2859 expand_copysign (rtx op0, rtx op1, rtx target)
2861 enum machine_mode mode = GET_MODE (op0);
2862 const struct real_format *fmt;
2866 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
2867 gcc_assert (GET_MODE (op1) == mode);
2869 /* First try to do it with a special instruction. */
2870 temp = expand_binop (mode, copysign_optab, op0, op1,
2871 target, 0, OPTAB_DIRECT);
2875 fmt = REAL_MODE_FORMAT (mode);
2876 if (fmt == NULL || !fmt->has_signed_zero)
2880 if (GET_CODE (op0) == CONST_DOUBLE)
2882 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
2883 op0 = simplify_unary_operation (ABS, mode, op0, mode);
2887 if (fmt->signbit_ro >= 0
2888 && (GET_CODE (op0) == CONST_DOUBLE
2889 || (neg_optab->handlers[mode].insn_code != CODE_FOR_nothing
2890 && abs_optab->handlers[mode].insn_code != CODE_FOR_nothing)))
2892 temp = expand_copysign_absneg (mode, op0, op1, target,
2893 fmt->signbit_ro, op0_is_abs);
2898 if (fmt->signbit_rw < 0)
2900 return expand_copysign_bit (mode, op0, op1, target,
2901 fmt->signbit_rw, op0_is_abs);
2904 /* Generate an instruction whose insn-code is INSN_CODE,
2905 with two operands: an output TARGET and an input OP0.
2906 TARGET *must* be nonzero, and the output is always stored there.
2907 CODE is an rtx code such that (CODE OP0) is an rtx that describes
2908 the value that is stored into TARGET. */
2911 emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code)
2914 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2919 /* Sign and zero extension from memory is often done specially on
2920 RISC machines, so forcing into a register here can pessimize
2922 if (flag_force_mem && code != SIGN_EXTEND && code != ZERO_EXTEND)
2923 op0 = force_not_mem (op0);
2925 /* Now, if insn does not accept our operands, put them into pseudos. */
2927 if (!insn_data[icode].operand[1].predicate (op0, mode0))
2928 op0 = copy_to_mode_reg (mode0, op0);
2930 if (!insn_data[icode].operand[0].predicate (temp, GET_MODE (temp))
2931 || (flag_force_mem && MEM_P (temp)))
2932 temp = gen_reg_rtx (GET_MODE (temp));
2934 pat = GEN_FCN (icode) (temp, op0);
2936 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && code != UNKNOWN)
2937 add_equal_note (pat, temp, code, op0, NULL_RTX);
2942 emit_move_insn (target, temp);
2945 /* Emit code to perform a series of operations on a multi-word quantity, one
2948 Such a block is preceded by a CLOBBER of the output, consists of multiple
2949 insns, each setting one word of the output, and followed by a SET copying
2950 the output to itself.
2952 Each of the insns setting words of the output receives a REG_NO_CONFLICT
2953 note indicating that it doesn't conflict with the (also multi-word)
2954 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
2957 INSNS is a block of code generated to perform the operation, not including
2958 the CLOBBER and final copy. All insns that compute intermediate values
2959 are first emitted, followed by the block as described above.
2961 TARGET, OP0, and OP1 are the output and inputs of the operations,
2962 respectively. OP1 may be zero for a unary operation.
2964 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
2967 If TARGET is not a register, INSNS is simply emitted with no special
2968 processing. Likewise if anything in INSNS is not an INSN or if
2969 there is a libcall block inside INSNS.
2971 The final insn emitted is returned. */
2974 emit_no_conflict_block (rtx insns, rtx target, rtx op0, rtx op1, rtx equiv)
2976 rtx prev, next, first, last, insn;
2978 if (!REG_P (target) || reload_in_progress)
2979 return emit_insn (insns);
2981 for (insn = insns; insn; insn = NEXT_INSN (insn))
2982 if (!NONJUMP_INSN_P (insn)
2983 || find_reg_note (insn, REG_LIBCALL, NULL_RTX))
2984 return emit_insn (insns);
2986 /* First emit all insns that do not store into words of the output and remove
2987 these from the list. */
2988 for (insn = insns; insn; insn = next)
2993 next = NEXT_INSN (insn);
2995 /* Some ports (cris) create a libcall regions at their own. We must
2996 avoid any potential nesting of LIBCALLs. */
2997 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
2998 remove_note (insn, note);
2999 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3000 remove_note (insn, note);
3002 if (GET_CODE (PATTERN (insn)) == SET || GET_CODE (PATTERN (insn)) == USE
3003 || GET_CODE (PATTERN (insn)) == CLOBBER)
3004 set = PATTERN (insn);
3005 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
3007 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
3008 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
3010 set = XVECEXP (PATTERN (insn), 0, i);
3017 if (! reg_overlap_mentioned_p (target, SET_DEST (set)))
3019 if (PREV_INSN (insn))
3020 NEXT_INSN (PREV_INSN (insn)) = next;
3025 PREV_INSN (next) = PREV_INSN (insn);
3031 prev = get_last_insn ();
3033 /* Now write the CLOBBER of the output, followed by the setting of each
3034 of the words, followed by the final copy. */
3035 if (target != op0 && target != op1)
3036 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
3038 for (insn = insns; insn; insn = next)
3040 next = NEXT_INSN (insn);
3043 if (op1 && REG_P (op1))
3044 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op1,
3047 if (op0 && REG_P (op0))
3048 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op0,
3052 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3053 != CODE_FOR_nothing)
3055 last = emit_move_insn (target, target);
3057 set_unique_reg_note (last, REG_EQUAL, equiv);
3061 last = get_last_insn ();
3063 /* Remove any existing REG_EQUAL note from "last", or else it will
3064 be mistaken for a note referring to the full contents of the
3065 alleged libcall value when found together with the REG_RETVAL
3066 note added below. An existing note can come from an insn
3067 expansion at "last". */
3068 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3072 first = get_insns ();
3074 first = NEXT_INSN (prev);
3076 /* Encapsulate the block so it gets manipulated as a unit. */
3077 REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
3079 REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first, REG_NOTES (last));
3084 /* Emit code to make a call to a constant function or a library call.
3086 INSNS is a list containing all insns emitted in the call.
3087 These insns leave the result in RESULT. Our block is to copy RESULT
3088 to TARGET, which is logically equivalent to EQUIV.
3090 We first emit any insns that set a pseudo on the assumption that these are
3091 loading constants into registers; doing so allows them to be safely cse'ed
3092 between blocks. Then we emit all the other insns in the block, followed by
3093 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3094 note with an operand of EQUIV.
3096 Moving assignments to pseudos outside of the block is done to improve
3097 the generated code, but is not required to generate correct code,
3098 hence being unable to move an assignment is not grounds for not making
3099 a libcall block. There are two reasons why it is safe to leave these
3100 insns inside the block: First, we know that these pseudos cannot be
3101 used in generated RTL outside the block since they are created for
3102 temporary purposes within the block. Second, CSE will not record the
3103 values of anything set inside a libcall block, so we know they must
3104 be dead at the end of the block.
3106 Except for the first group of insns (the ones setting pseudos), the
3107 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3110 emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv)
3112 rtx final_dest = target;
3113 rtx prev, next, first, last, insn;
3115 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3116 into a MEM later. Protect the libcall block from this change. */
3117 if (! REG_P (target) || REG_USERVAR_P (target))
3118 target = gen_reg_rtx (GET_MODE (target));
3120 /* If we're using non-call exceptions, a libcall corresponding to an
3121 operation that may trap may also trap. */
3122 if (flag_non_call_exceptions && may_trap_p (equiv))
3124 for (insn = insns; insn; insn = NEXT_INSN (insn))
3127 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3129 if (note != 0 && INTVAL (XEXP (note, 0)) <= 0)
3130 remove_note (insn, note);
3134 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3135 reg note to indicate that this call cannot throw or execute a nonlocal
3136 goto (unless there is already a REG_EH_REGION note, in which case
3138 for (insn = insns; insn; insn = NEXT_INSN (insn))
3141 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3144 XEXP (note, 0) = constm1_rtx;
3146 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx,
3150 /* First emit all insns that set pseudos. Remove them from the list as
3151 we go. Avoid insns that set pseudos which were referenced in previous
3152 insns. These can be generated by move_by_pieces, for example,
3153 to update an address. Similarly, avoid insns that reference things
3154 set in previous insns. */
3156 for (insn = insns; insn; insn = next)
3158 rtx set = single_set (insn);
3161 /* Some ports (cris) create a libcall regions at their own. We must
3162 avoid any potential nesting of LIBCALLs. */
3163 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3164 remove_note (insn, note);
3165 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3166 remove_note (insn, note);
3168 next = NEXT_INSN (insn);
3170 if (set != 0 && REG_P (SET_DEST (set))
3171 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
3173 || ((! INSN_P(insns)
3174 || ! reg_mentioned_p (SET_DEST (set), PATTERN (insns)))
3175 && ! reg_used_between_p (SET_DEST (set), insns, insn)
3176 && ! modified_in_p (SET_SRC (set), insns)
3177 && ! modified_between_p (SET_SRC (set), insns, insn))))
3179 if (PREV_INSN (insn))
3180 NEXT_INSN (PREV_INSN (insn)) = next;
3185 PREV_INSN (next) = PREV_INSN (insn);
3190 /* Some ports use a loop to copy large arguments onto the stack.
3191 Don't move anything outside such a loop. */
3196 prev = get_last_insn ();
3198 /* Write the remaining insns followed by the final copy. */
3200 for (insn = insns; insn; insn = next)
3202 next = NEXT_INSN (insn);
3207 last = emit_move_insn (target, result);
3208 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3209 != CODE_FOR_nothing)
3210 set_unique_reg_note (last, REG_EQUAL, copy_rtx (equiv));
3213 /* Remove any existing REG_EQUAL note from "last", or else it will
3214 be mistaken for a note referring to the full contents of the
3215 libcall value when found together with the REG_RETVAL note added
3216 below. An existing note can come from an insn expansion at
3218 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3221 if (final_dest != target)
3222 emit_move_insn (final_dest, target);
3225 first = get_insns ();
3227 first = NEXT_INSN (prev);
3229 /* Encapsulate the block so it gets manipulated as a unit. */
3230 if (!flag_non_call_exceptions || !may_trap_p (equiv))
3232 /* We can't attach the REG_LIBCALL and REG_RETVAL notes
3233 when the encapsulated region would not be in one basic block,
3234 i.e. when there is a control_flow_insn_p insn between FIRST and LAST.
3236 bool attach_libcall_retval_notes = true;
3237 next = NEXT_INSN (last);
3238 for (insn = first; insn != next; insn = NEXT_INSN (insn))
3239 if (control_flow_insn_p (insn))
3241 attach_libcall_retval_notes = false;
3245 if (attach_libcall_retval_notes)
3247 REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
3249 REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first,
3255 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3256 PURPOSE describes how this comparison will be used. CODE is the rtx
3257 comparison code we will be using.
3259 ??? Actually, CODE is slightly weaker than that. A target is still
3260 required to implement all of the normal bcc operations, but not
3261 required to implement all (or any) of the unordered bcc operations. */
3264 can_compare_p (enum rtx_code code, enum machine_mode mode,
3265 enum can_compare_purpose purpose)
3269 if (cmp_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3271 if (purpose == ccp_jump)
3272 return bcc_gen_fctn[(int) code] != NULL;
3273 else if (purpose == ccp_store_flag)
3274 return setcc_gen_code[(int) code] != CODE_FOR_nothing;
3276 /* There's only one cmov entry point, and it's allowed to fail. */
3279 if (purpose == ccp_jump
3280 && cbranch_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3282 if (purpose == ccp_cmov
3283 && cmov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3285 if (purpose == ccp_store_flag
3286 && cstore_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3288 mode = GET_MODE_WIDER_MODE (mode);
3290 while (mode != VOIDmode);
3295 /* This function is called when we are going to emit a compare instruction that
3296 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3298 *PMODE is the mode of the inputs (in case they are const_int).
3299 *PUNSIGNEDP nonzero says that the operands are unsigned;
3300 this matters if they need to be widened.
3302 If they have mode BLKmode, then SIZE specifies the size of both operands.
3304 This function performs all the setup necessary so that the caller only has
3305 to emit a single comparison insn. This setup can involve doing a BLKmode
3306 comparison or emitting a library call to perform the comparison if no insn
3307 is available to handle it.
3308 The values which are passed in through pointers can be modified; the caller
3309 should perform the comparison on the modified values. */
3312 prepare_cmp_insn (rtx *px, rtx *py, enum rtx_code *pcomparison, rtx size,
3313 enum machine_mode *pmode, int *punsignedp,
3314 enum can_compare_purpose purpose)
3316 enum machine_mode mode = *pmode;
3317 rtx x = *px, y = *py;
3318 int unsignedp = *punsignedp;
3319 enum mode_class class;
3321 class = GET_MODE_CLASS (mode);
3323 /* They could both be VOIDmode if both args are immediate constants,
3324 but we should fold that at an earlier stage.
3325 With no special code here, this will call abort,
3326 reminding the programmer to implement such folding. */
3328 if (mode != BLKmode && flag_force_mem)
3330 /* Load duplicate non-volatile operands once. */
3331 if (rtx_equal_p (x, y) && ! volatile_refs_p (x))
3333 x = force_not_mem (x);
3338 x = force_not_mem (x);
3339 y = force_not_mem (y);
3343 /* If we are inside an appropriately-short loop and we are optimizing,
3344 force expensive constants into a register. */
3345 if (CONSTANT_P (x) && optimize
3346 && rtx_cost (x, COMPARE) > COSTS_N_INSNS (1))
3347 x = force_reg (mode, x);
3349 if (CONSTANT_P (y) && optimize
3350 && rtx_cost (y, COMPARE) > COSTS_N_INSNS (1))
3351 y = force_reg (mode, y);
3354 /* Abort if we have a non-canonical comparison. The RTL documentation
3355 states that canonical comparisons are required only for targets which
3357 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
3360 /* Don't let both operands fail to indicate the mode. */
3361 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
3362 x = force_reg (mode, x);
3364 /* Handle all BLKmode compares. */
3366 if (mode == BLKmode)
3368 enum machine_mode cmp_mode, result_mode;
3369 enum insn_code cmp_code;
3374 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
3378 /* Try to use a memory block compare insn - either cmpstr
3379 or cmpmem will do. */
3380 for (cmp_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
3381 cmp_mode != VOIDmode;
3382 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode))
3384 cmp_code = cmpmem_optab[cmp_mode];
3385 if (cmp_code == CODE_FOR_nothing)
3386 cmp_code = cmpstr_optab[cmp_mode];
3387 if (cmp_code == CODE_FOR_nothing)
3390 /* Must make sure the size fits the insn's mode. */
3391 if ((GET_CODE (size) == CONST_INT
3392 && INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode)))
3393 || (GET_MODE_BITSIZE (GET_MODE (size))
3394 > GET_MODE_BITSIZE (cmp_mode)))
3397 result_mode = insn_data[cmp_code].operand[0].mode;
3398 result = gen_reg_rtx (result_mode);
3399 size = convert_to_mode (cmp_mode, size, 1);
3400 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
3404 *pmode = result_mode;
3408 /* Otherwise call a library function, memcmp. */
3409 libfunc = memcmp_libfunc;
3410 length_type = sizetype;
3411 result_mode = TYPE_MODE (integer_type_node);
3412 cmp_mode = TYPE_MODE (length_type);
3413 size = convert_to_mode (TYPE_MODE (length_type), size,
3414 TYPE_UNSIGNED (length_type));
3416 result = emit_library_call_value (libfunc, 0, LCT_PURE_MAKE_BLOCK,
3423 *pmode = result_mode;
3427 /* Don't allow operands to the compare to trap, as that can put the
3428 compare and branch in different basic blocks. */
3429 if (flag_non_call_exceptions)
3432 x = force_reg (mode, x);
3434 y = force_reg (mode, y);
3439 if (can_compare_p (*pcomparison, mode, purpose))
3442 /* Handle a lib call just for the mode we are using. */
3444 if (cmp_optab->handlers[(int) mode].libfunc && class != MODE_FLOAT)
3446 rtx libfunc = cmp_optab->handlers[(int) mode].libfunc;
3449 /* If we want unsigned, and this mode has a distinct unsigned
3450 comparison routine, use that. */
3451 if (unsignedp && ucmp_optab->handlers[(int) mode].libfunc)
3452 libfunc = ucmp_optab->handlers[(int) mode].libfunc;
3454 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST_MAKE_BLOCK,
3455 word_mode, 2, x, mode, y, mode);
3459 if (TARGET_LIB_INT_CMP_BIASED)
3460 /* Integer comparison returns a result that must be compared
3461 against 1, so that even if we do an unsigned compare
3462 afterward, there is still a value that can represent the
3463 result "less than". */
3473 gcc_assert (class == MODE_FLOAT);
3474 prepare_float_lib_cmp (px, py, pcomparison, pmode, punsignedp);
3477 /* Before emitting an insn with code ICODE, make sure that X, which is going
3478 to be used for operand OPNUM of the insn, is converted from mode MODE to
3479 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3480 that it is accepted by the operand predicate. Return the new value. */
3483 prepare_operand (int icode, rtx x, int opnum, enum machine_mode mode,
3484 enum machine_mode wider_mode, int unsignedp)
3486 if (mode != wider_mode)
3487 x = convert_modes (wider_mode, mode, x, unsignedp);
3489 if (!insn_data[icode].operand[opnum].predicate
3490 (x, insn_data[icode].operand[opnum].mode))
3494 x = copy_to_mode_reg (insn_data[icode].operand[opnum].mode, x);
3500 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3501 we can do the comparison.
3502 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3503 be NULL_RTX which indicates that only a comparison is to be generated. */
3506 emit_cmp_and_jump_insn_1 (rtx x, rtx y, enum machine_mode mode,
3507 enum rtx_code comparison, int unsignedp, rtx label)
3509 rtx test = gen_rtx_fmt_ee (comparison, mode, x, y);
3510 enum mode_class class = GET_MODE_CLASS (mode);
3511 enum machine_mode wider_mode = mode;
3513 /* Try combined insns first. */
3516 enum insn_code icode;
3517 PUT_MODE (test, wider_mode);
3521 icode = cbranch_optab->handlers[(int) wider_mode].insn_code;
3523 if (icode != CODE_FOR_nothing
3524 && insn_data[icode].operand[0].predicate (test, wider_mode))
3526 x = prepare_operand (icode, x, 1, mode, wider_mode, unsignedp);
3527 y = prepare_operand (icode, y, 2, mode, wider_mode, unsignedp);
3528 emit_jump_insn (GEN_FCN (icode) (test, x, y, label));
3533 /* Handle some compares against zero. */
3534 icode = (int) tst_optab->handlers[(int) wider_mode].insn_code;
3535 if (y == CONST0_RTX (mode) && icode != CODE_FOR_nothing)
3537 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
3538 emit_insn (GEN_FCN (icode) (x));
3540 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
3544 /* Handle compares for which there is a directly suitable insn. */
3546 icode = (int) cmp_optab->handlers[(int) wider_mode].insn_code;
3547 if (icode != CODE_FOR_nothing)
3549 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
3550 y = prepare_operand (icode, y, 1, mode, wider_mode, unsignedp);
3551 emit_insn (GEN_FCN (icode) (x, y));
3553 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
3557 if (class != MODE_INT && class != MODE_FLOAT
3558 && class != MODE_COMPLEX_FLOAT)
3561 wider_mode = GET_MODE_WIDER_MODE (wider_mode);
3563 while (wider_mode != VOIDmode);
3568 /* Generate code to compare X with Y so that the condition codes are
3569 set and to jump to LABEL if the condition is true. If X is a
3570 constant and Y is not a constant, then the comparison is swapped to
3571 ensure that the comparison RTL has the canonical form.
3573 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3574 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
3575 the proper branch condition code.
3577 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
3579 MODE is the mode of the inputs (in case they are const_int).
3581 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
3582 be passed unchanged to emit_cmp_insn, then potentially converted into an
3583 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
3586 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
3587 enum machine_mode mode, int unsignedp, rtx label)
3589 rtx op0 = x, op1 = y;
3591 /* Swap operands and condition to ensure canonical RTL. */
3592 if (swap_commutative_operands_p (x, y))
3594 /* If we're not emitting a branch, this means some caller
3599 comparison = swap_condition (comparison);
3603 /* If OP0 is still a constant, then both X and Y must be constants. Force
3604 X into a register to avoid aborting in emit_cmp_insn due to non-canonical
3606 if (CONSTANT_P (op0))
3607 op0 = force_reg (mode, op0);
3611 comparison = unsigned_condition (comparison);
3613 prepare_cmp_insn (&op0, &op1, &comparison, size, &mode, &unsignedp,
3615 emit_cmp_and_jump_insn_1 (op0, op1, mode, comparison, unsignedp, label);
3618 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
3621 emit_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
3622 enum machine_mode mode, int unsignedp)
3624 emit_cmp_and_jump_insns (x, y, comparison, size, mode, unsignedp, 0);
3627 /* Emit a library call comparison between floating point X and Y.
3628 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
3631 prepare_float_lib_cmp (rtx *px, rtx *py, enum rtx_code *pcomparison,
3632 enum machine_mode *pmode, int *punsignedp)
3634 enum rtx_code comparison = *pcomparison;
3635 enum rtx_code swapped = swap_condition (comparison);
3636 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
3639 enum machine_mode orig_mode = GET_MODE (x);
3640 enum machine_mode mode;
3641 rtx value, target, insns, equiv;
3643 bool reversed_p = false;
3645 for (mode = orig_mode; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
3647 if ((libfunc = code_to_optab[comparison]->handlers[mode].libfunc))
3650 if ((libfunc = code_to_optab[swapped]->handlers[mode].libfunc))
3653 tmp = x; x = y; y = tmp;
3654 comparison = swapped;
3658 if ((libfunc = code_to_optab[reversed]->handlers[mode].libfunc)
3659 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, reversed))
3661 comparison = reversed;
3667 gcc_assert (mode != VOIDmode);
3669 if (mode != orig_mode)
3671 x = convert_to_mode (mode, x, 0);
3672 y = convert_to_mode (mode, y, 0);
3675 /* Attach a REG_EQUAL note describing the semantics of the libcall to
3676 the RTL. The allows the RTL optimizers to delete the libcall if the
3677 condition can be determined at compile-time. */
3678 if (comparison == UNORDERED)
3680 rtx temp = simplify_gen_relational (NE, word_mode, mode, x, x);
3681 equiv = simplify_gen_relational (NE, word_mode, mode, y, y);
3682 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
3683 temp, const_true_rtx, equiv);
3687 equiv = simplify_gen_relational (comparison, word_mode, mode, x, y);
3688 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
3690 rtx true_rtx, false_rtx;
3695 true_rtx = const0_rtx;
3696 false_rtx = const_true_rtx;
3700 true_rtx = const_true_rtx;
3701 false_rtx = const0_rtx;
3705 true_rtx = const1_rtx;
3706 false_rtx = const0_rtx;
3710 true_rtx = const0_rtx;
3711 false_rtx = constm1_rtx;
3715 true_rtx = constm1_rtx;
3716 false_rtx = const0_rtx;
3720 true_rtx = const0_rtx;
3721 false_rtx = const1_rtx;
3727 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
3728 equiv, true_rtx, false_rtx);
3733 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
3734 word_mode, 2, x, mode, y, mode);
3735 insns = get_insns ();
3738 target = gen_reg_rtx (word_mode);
3739 emit_libcall_block (insns, target, value, equiv);
3741 if (comparison == UNORDERED
3742 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
3743 comparison = reversed_p ? EQ : NE;
3748 *pcomparison = comparison;
3752 /* Generate code to indirectly jump to a location given in the rtx LOC. */
3755 emit_indirect_jump (rtx loc)
3757 if (!insn_data[(int) CODE_FOR_indirect_jump].operand[0].predicate
3759 loc = copy_to_mode_reg (Pmode, loc);
3761 emit_jump_insn (gen_indirect_jump (loc));
3765 #ifdef HAVE_conditional_move
3767 /* Emit a conditional move instruction if the machine supports one for that
3768 condition and machine mode.
3770 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3771 the mode to use should they be constants. If it is VOIDmode, they cannot
3774 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
3775 should be stored there. MODE is the mode to use should they be constants.
3776 If it is VOIDmode, they cannot both be constants.
3778 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3779 is not supported. */
3782 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
3783 enum machine_mode cmode, rtx op2, rtx op3,
3784 enum machine_mode mode, int unsignedp)
3786 rtx tem, subtarget, comparison, insn;
3787 enum insn_code icode;
3788 enum rtx_code reversed;
3790 /* If one operand is constant, make it the second one. Only do this
3791 if the other operand is not constant as well. */
3793 if (swap_commutative_operands_p (op0, op1))
3798 code = swap_condition (code);
3801 /* get_condition will prefer to generate LT and GT even if the old
3802 comparison was against zero, so undo that canonicalization here since
3803 comparisons against zero are cheaper. */
3804 if (code == LT && op1 == const1_rtx)
3805 code = LE, op1 = const0_rtx;
3806 else if (code == GT && op1 == constm1_rtx)
3807 code = GE, op1 = const0_rtx;
3809 if (cmode == VOIDmode)
3810 cmode = GET_MODE (op0);
3812 if (swap_commutative_operands_p (op2, op3)
3813 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
3822 if (mode == VOIDmode)
3823 mode = GET_MODE (op2);
3825 icode = movcc_gen_code[mode];
3827 if (icode == CODE_FOR_nothing)
3832 op2 = force_not_mem (op2);
3833 op3 = force_not_mem (op3);
3837 target = gen_reg_rtx (mode);
3841 /* If the insn doesn't accept these operands, put them in pseudos. */
3843 if (!insn_data[icode].operand[0].predicate
3844 (subtarget, insn_data[icode].operand[0].mode))
3845 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
3847 if (!insn_data[icode].operand[2].predicate
3848 (op2, insn_data[icode].operand[2].mode))
3849 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
3851 if (!insn_data[icode].operand[3].predicate
3852 (op3, insn_data[icode].operand[3].mode))
3853 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
3855 /* Everything should now be in the suitable form, so emit the compare insn
3856 and then the conditional move. */
3859 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
3861 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
3862 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
3863 return NULL and let the caller figure out how best to deal with this
3865 if (GET_CODE (comparison) != code)
3868 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
3870 /* If that failed, then give up. */
3876 if (subtarget != target)
3877 convert_move (target, subtarget, 0);
3882 /* Return nonzero if a conditional move of mode MODE is supported.
3884 This function is for combine so it can tell whether an insn that looks
3885 like a conditional move is actually supported by the hardware. If we
3886 guess wrong we lose a bit on optimization, but that's it. */
3887 /* ??? sparc64 supports conditionally moving integers values based on fp
3888 comparisons, and vice versa. How do we handle them? */
3891 can_conditionally_move_p (enum machine_mode mode)
3893 if (movcc_gen_code[mode] != CODE_FOR_nothing)
3899 #endif /* HAVE_conditional_move */
3901 /* Emit a conditional addition instruction if the machine supports one for that
3902 condition and machine mode.
3904 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3905 the mode to use should they be constants. If it is VOIDmode, they cannot
3908 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
3909 should be stored there. MODE is the mode to use should they be constants.
3910 If it is VOIDmode, they cannot both be constants.
3912 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3913 is not supported. */
3916 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
3917 enum machine_mode cmode, rtx op2, rtx op3,
3918 enum machine_mode mode, int unsignedp)
3920 rtx tem, subtarget, comparison, insn;
3921 enum insn_code icode;
3922 enum rtx_code reversed;
3924 /* If one operand is constant, make it the second one. Only do this
3925 if the other operand is not constant as well. */
3927 if (swap_commutative_operands_p (op0, op1))
3932 code = swap_condition (code);
3935 /* get_condition will prefer to generate LT and GT even if the old
3936 comparison was against zero, so undo that canonicalization here since
3937 comparisons against zero are cheaper. */
3938 if (code == LT && op1 == const1_rtx)
3939 code = LE, op1 = const0_rtx;
3940 else if (code == GT && op1 == constm1_rtx)
3941 code = GE, op1 = const0_rtx;
3943 if (cmode == VOIDmode)
3944 cmode = GET_MODE (op0);
3946 if (swap_commutative_operands_p (op2, op3)
3947 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
3956 if (mode == VOIDmode)
3957 mode = GET_MODE (op2);
3959 icode = addcc_optab->handlers[(int) mode].insn_code;
3961 if (icode == CODE_FOR_nothing)
3966 op2 = force_not_mem (op2);
3967 op3 = force_not_mem (op3);
3971 target = gen_reg_rtx (mode);
3973 /* If the insn doesn't accept these operands, put them in pseudos. */
3975 if (!insn_data[icode].operand[0].predicate
3976 (target, insn_data[icode].operand[0].mode))
3977 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
3981 if (!insn_data[icode].operand[2].predicate
3982 (op2, insn_data[icode].operand[2].mode))
3983 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
3985 if (!insn_data[icode].operand[3].predicate
3986 (op3, insn_data[icode].operand[3].mode))
3987 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
3989 /* Everything should now be in the suitable form, so emit the compare insn
3990 and then the conditional move. */
3993 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
3995 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
3996 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
3997 return NULL and let the caller figure out how best to deal with this
3999 if (GET_CODE (comparison) != code)
4002 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4004 /* If that failed, then give up. */
4010 if (subtarget != target)
4011 convert_move (target, subtarget, 0);
4016 /* These functions attempt to generate an insn body, rather than
4017 emitting the insn, but if the gen function already emits them, we
4018 make no attempt to turn them back into naked patterns. */
4020 /* Generate and return an insn body to add Y to X. */
4023 gen_add2_insn (rtx x, rtx y)
4025 int icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4027 gcc_assert (insn_data[icode].operand[0].predicate
4028 (x, insn_data[icode].operand[0].mode));
4029 gcc_assert (insn_data[icode].operand[1].predicate
4030 (x, insn_data[icode].operand[1].mode));
4031 gcc_assert (insn_data[icode].operand[2].predicate
4032 (y, insn_data[icode].operand[2].mode));
4034 return GEN_FCN (icode) (x, x, y);
4037 /* Generate and return an insn body to add r1 and c,
4038 storing the result in r0. */
4040 gen_add3_insn (rtx r0, rtx r1, rtx c)
4042 int icode = (int) add_optab->handlers[(int) GET_MODE (r0)].insn_code;
4044 if (icode == CODE_FOR_nothing
4045 || !(insn_data[icode].operand[0].predicate
4046 (r0, insn_data[icode].operand[0].mode))
4047 || !(insn_data[icode].operand[1].predicate
4048 (r1, insn_data[icode].operand[1].mode))
4049 || !(insn_data[icode].operand[2].predicate
4050 (c, insn_data[icode].operand[2].mode)))
4053 return GEN_FCN (icode) (r0, r1, c);
4057 have_add2_insn (rtx x, rtx y)
4061 gcc_assert (GET_MODE (x) != VOIDmode);
4063 icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4065 if (icode == CODE_FOR_nothing)
4068 if (!(insn_data[icode].operand[0].predicate
4069 (x, insn_data[icode].operand[0].mode))
4070 || !(insn_data[icode].operand[1].predicate
4071 (x, insn_data[icode].operand[1].mode))
4072 || !(insn_data[icode].operand[2].predicate
4073 (y, insn_data[icode].operand[2].mode)))
4079 /* Generate and return an insn body to subtract Y from X. */
4082 gen_sub2_insn (rtx x, rtx y)
4084 int icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4086 gcc_assert (insn_data[icode].operand[0].predicate
4087 (x, insn_data[icode].operand[0].mode));
4088 gcc_assert (insn_data[icode].operand[1].predicate
4089 (x, insn_data[icode].operand[1].mode));
4090 gcc_assert (insn_data[icode].operand[2].predicate
4091 (y, insn_data[icode].operand[2].mode));
4093 return GEN_FCN (icode) (x, x, y);
4096 /* Generate and return an insn body to subtract r1 and c,
4097 storing the result in r0. */
4099 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4101 int icode = (int) sub_optab->handlers[(int) GET_MODE (r0)].insn_code;
4103 if (icode == CODE_FOR_nothing
4104 || !(insn_data[icode].operand[0].predicate
4105 (r0, insn_data[icode].operand[0].mode))
4106 || !(insn_data[icode].operand[1].predicate
4107 (r1, insn_data[icode].operand[1].mode))
4108 || !(insn_data[icode].operand[2].predicate
4109 (c, insn_data[icode].operand[2].mode)))
4112 return GEN_FCN (icode) (r0, r1, c);
4116 have_sub2_insn (rtx x, rtx y)
4120 gcc_assert (GET_MODE (x) != VOIDmode);
4122 icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4124 if (icode == CODE_FOR_nothing)
4127 if (!(insn_data[icode].operand[0].predicate
4128 (x, insn_data[icode].operand[0].mode))
4129 || !(insn_data[icode].operand[1].predicate
4130 (x, insn_data[icode].operand[1].mode))
4131 || !(insn_data[icode].operand[2].predicate
4132 (y, insn_data[icode].operand[2].mode)))
4138 /* Generate the body of an instruction to copy Y into X.
4139 It may be a list of insns, if one insn isn't enough. */
4142 gen_move_insn (rtx x, rtx y)
4147 emit_move_insn_1 (x, y);
4153 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4154 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4155 no such operation exists, CODE_FOR_nothing will be returned. */
4158 can_extend_p (enum machine_mode to_mode, enum machine_mode from_mode,
4162 #ifdef HAVE_ptr_extend
4164 return CODE_FOR_ptr_extend;
4167 tab = unsignedp ? zext_optab : sext_optab;
4168 return tab->handlers[to_mode][from_mode].insn_code;
4171 /* Generate the body of an insn to extend Y (with mode MFROM)
4172 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4175 gen_extend_insn (rtx x, rtx y, enum machine_mode mto,
4176 enum machine_mode mfrom, int unsignedp)
4178 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
4179 return GEN_FCN (icode) (x, y);
4182 /* can_fix_p and can_float_p say whether the target machine
4183 can directly convert a given fixed point type to
4184 a given floating point type, or vice versa.
4185 The returned value is the CODE_FOR_... value to use,
4186 or CODE_FOR_nothing if these modes cannot be directly converted.
4188 *TRUNCP_PTR is set to 1 if it is necessary to output
4189 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4191 static enum insn_code
4192 can_fix_p (enum machine_mode fixmode, enum machine_mode fltmode,
4193 int unsignedp, int *truncp_ptr)
4196 enum insn_code icode;
4198 tab = unsignedp ? ufixtrunc_optab : sfixtrunc_optab;
4199 icode = tab->handlers[fixmode][fltmode].insn_code;
4200 if (icode != CODE_FOR_nothing)
4206 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4207 for this to work. We need to rework the fix* and ftrunc* patterns
4208 and documentation. */
4209 tab = unsignedp ? ufix_optab : sfix_optab;
4210 icode = tab->handlers[fixmode][fltmode].insn_code;
4211 if (icode != CODE_FOR_nothing
4212 && ftrunc_optab->handlers[fltmode].insn_code != CODE_FOR_nothing)
4219 return CODE_FOR_nothing;
4222 static enum insn_code
4223 can_float_p (enum machine_mode fltmode, enum machine_mode fixmode,
4228 tab = unsignedp ? ufloat_optab : sfloat_optab;
4229 return tab->handlers[fltmode][fixmode].insn_code;
4232 /* Generate code to convert FROM to floating point
4233 and store in TO. FROM must be fixed point and not VOIDmode.
4234 UNSIGNEDP nonzero means regard FROM as unsigned.
4235 Normally this is done by correcting the final value
4236 if it is negative. */
4239 expand_float (rtx to, rtx from, int unsignedp)
4241 enum insn_code icode;
4243 enum machine_mode fmode, imode;
4245 /* Crash now, because we won't be able to decide which mode to use. */
4246 gcc_assert (GET_MODE (from) != VOIDmode);
4248 /* Look for an insn to do the conversion. Do it in the specified
4249 modes if possible; otherwise convert either input, output or both to
4250 wider mode. If the integer mode is wider than the mode of FROM,
4251 we can do the conversion signed even if the input is unsigned. */
4253 for (fmode = GET_MODE (to); fmode != VOIDmode;
4254 fmode = GET_MODE_WIDER_MODE (fmode))
4255 for (imode = GET_MODE (from); imode != VOIDmode;
4256 imode = GET_MODE_WIDER_MODE (imode))
4258 int doing_unsigned = unsignedp;
4260 if (fmode != GET_MODE (to)
4261 && significand_size (fmode) < GET_MODE_BITSIZE (GET_MODE (from)))
4264 icode = can_float_p (fmode, imode, unsignedp);
4265 if (icode == CODE_FOR_nothing && imode != GET_MODE (from) && unsignedp)
4266 icode = can_float_p (fmode, imode, 0), doing_unsigned = 0;
4268 if (icode != CODE_FOR_nothing)
4270 if (imode != GET_MODE (from))
4271 from = convert_to_mode (imode, from, unsignedp);
4273 if (fmode != GET_MODE (to))
4274 target = gen_reg_rtx (fmode);
4276 emit_unop_insn (icode, target, from,
4277 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
4280 convert_move (to, target, 0);
4285 /* Unsigned integer, and no way to convert directly.
4286 Convert as signed, then conditionally adjust the result. */
4289 rtx label = gen_label_rtx ();
4291 REAL_VALUE_TYPE offset;
4294 from = force_not_mem (from);
4296 /* Look for a usable floating mode FMODE wider than the source and at
4297 least as wide as the target. Using FMODE will avoid rounding woes
4298 with unsigned values greater than the signed maximum value. */
4300 for (fmode = GET_MODE (to); fmode != VOIDmode;
4301 fmode = GET_MODE_WIDER_MODE (fmode))
4302 if (GET_MODE_BITSIZE (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
4303 && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
4306 if (fmode == VOIDmode)
4308 /* There is no such mode. Pretend the target is wide enough. */
4309 fmode = GET_MODE (to);
4311 /* Avoid double-rounding when TO is narrower than FROM. */
4312 if ((significand_size (fmode) + 1)
4313 < GET_MODE_BITSIZE (GET_MODE (from)))
4316 rtx neglabel = gen_label_rtx ();
4318 /* Don't use TARGET if it isn't a register, is a hard register,
4319 or is the wrong mode. */
4321 || REGNO (target) < FIRST_PSEUDO_REGISTER
4322 || GET_MODE (target) != fmode)
4323 target = gen_reg_rtx (fmode);
4325 imode = GET_MODE (from);
4326 do_pending_stack_adjust ();
4328 /* Test whether the sign bit is set. */
4329 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
4332 /* The sign bit is not set. Convert as signed. */
4333 expand_float (target, from, 0);
4334 emit_jump_insn (gen_jump (label));
4337 /* The sign bit is set.
4338 Convert to a usable (positive signed) value by shifting right
4339 one bit, while remembering if a nonzero bit was shifted
4340 out; i.e., compute (from & 1) | (from >> 1). */
4342 emit_label (neglabel);
4343 temp = expand_binop (imode, and_optab, from, const1_rtx,
4344 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4345 temp1 = expand_shift (RSHIFT_EXPR, imode, from, integer_one_node,
4347 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
4349 expand_float (target, temp, 0);
4351 /* Multiply by 2 to undo the shift above. */
4352 temp = expand_binop (fmode, add_optab, target, target,
4353 target, 0, OPTAB_LIB_WIDEN);
4355 emit_move_insn (target, temp);
4357 do_pending_stack_adjust ();
4363 /* If we are about to do some arithmetic to correct for an
4364 unsigned operand, do it in a pseudo-register. */
4366 if (GET_MODE (to) != fmode
4367 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
4368 target = gen_reg_rtx (fmode);
4370 /* Convert as signed integer to floating. */
4371 expand_float (target, from, 0);
4373 /* If FROM is negative (and therefore TO is negative),
4374 correct its value by 2**bitwidth. */
4376 do_pending_stack_adjust ();
4377 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from),
4381 real_2expN (&offset, GET_MODE_BITSIZE (GET_MODE (from)));
4382 temp = expand_binop (fmode, add_optab, target,
4383 CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode),
4384 target, 0, OPTAB_LIB_WIDEN);
4386 emit_move_insn (target, temp);
4388 do_pending_stack_adjust ();
4393 /* No hardware instruction available; call a library routine. */
4398 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
4400 if (GET_MODE_SIZE (GET_MODE (from)) < GET_MODE_SIZE (SImode))
4401 from = convert_to_mode (SImode, from, unsignedp);
4404 from = force_not_mem (from);
4406 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
4407 gcc_assert (libfunc);
4411 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4412 GET_MODE (to), 1, from,
4414 insns = get_insns ();
4417 emit_libcall_block (insns, target, value,
4418 gen_rtx_FLOAT (GET_MODE (to), from));
4423 /* Copy result to requested destination
4424 if we have been computing in a temp location. */
4428 if (GET_MODE (target) == GET_MODE (to))
4429 emit_move_insn (to, target);
4431 convert_move (to, target, 0);
4435 /* Generate code to convert FROM to fixed point and store in TO. FROM
4436 must be floating point. */
4439 expand_fix (rtx to, rtx from, int unsignedp)
4441 enum insn_code icode;
4443 enum machine_mode fmode, imode;
4446 /* We first try to find a pair of modes, one real and one integer, at
4447 least as wide as FROM and TO, respectively, in which we can open-code
4448 this conversion. If the integer mode is wider than the mode of TO,
4449 we can do the conversion either signed or unsigned. */
4451 for (fmode = GET_MODE (from); fmode != VOIDmode;
4452 fmode = GET_MODE_WIDER_MODE (fmode))
4453 for (imode = GET_MODE (to); imode != VOIDmode;
4454 imode = GET_MODE_WIDER_MODE (imode))
4456 int doing_unsigned = unsignedp;
4458 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
4459 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
4460 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
4462 if (icode != CODE_FOR_nothing)
4464 if (fmode != GET_MODE (from))
4465 from = convert_to_mode (fmode, from, 0);
4469 rtx temp = gen_reg_rtx (GET_MODE (from));
4470 from = expand_unop (GET_MODE (from), ftrunc_optab, from,
4474 if (imode != GET_MODE (to))
4475 target = gen_reg_rtx (imode);
4477 emit_unop_insn (icode, target, from,
4478 doing_unsigned ? UNSIGNED_FIX : FIX);
4480 convert_move (to, target, unsignedp);
4485 /* For an unsigned conversion, there is one more way to do it.
4486 If we have a signed conversion, we generate code that compares
4487 the real value to the largest representable positive number. If if
4488 is smaller, the conversion is done normally. Otherwise, subtract
4489 one plus the highest signed number, convert, and add it back.
4491 We only need to check all real modes, since we know we didn't find
4492 anything with a wider integer mode.
4494 This code used to extend FP value into mode wider than the destination.
4495 This is not needed. Consider, for instance conversion from SFmode
4498 The hot path trought the code is dealing with inputs smaller than 2^63
4499 and doing just the conversion, so there is no bits to lose.
4501 In the other path we know the value is positive in the range 2^63..2^64-1
4502 inclusive. (as for other imput overflow happens and result is undefined)
4503 So we know that the most important bit set in mantissa corresponds to
4504 2^63. The subtraction of 2^63 should not generate any rounding as it
4505 simply clears out that bit. The rest is trivial. */
4507 if (unsignedp && GET_MODE_BITSIZE (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
4508 for (fmode = GET_MODE (from); fmode != VOIDmode;
4509 fmode = GET_MODE_WIDER_MODE (fmode))
4510 if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0,
4514 REAL_VALUE_TYPE offset;
4515 rtx limit, lab1, lab2, insn;
4517 bitsize = GET_MODE_BITSIZE (GET_MODE (to));
4518 real_2expN (&offset, bitsize - 1);
4519 limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode);
4520 lab1 = gen_label_rtx ();
4521 lab2 = gen_label_rtx ();
4524 from = force_not_mem (from);
4526 if (fmode != GET_MODE (from))
4527 from = convert_to_mode (fmode, from, 0);
4529 /* See if we need to do the subtraction. */
4530 do_pending_stack_adjust ();
4531 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from),
4534 /* If not, do the signed "fix" and branch around fixup code. */
4535 expand_fix (to, from, 0);
4536 emit_jump_insn (gen_jump (lab2));
4539 /* Otherwise, subtract 2**(N-1), convert to signed number,
4540 then add 2**(N-1). Do the addition using XOR since this
4541 will often generate better code. */
4543 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
4544 NULL_RTX, 0, OPTAB_LIB_WIDEN);
4545 expand_fix (to, target, 0);
4546 target = expand_binop (GET_MODE (to), xor_optab, to,
4548 ((HOST_WIDE_INT) 1 << (bitsize - 1),
4550 to, 1, OPTAB_LIB_WIDEN);
4553 emit_move_insn (to, target);
4557 if (mov_optab->handlers[(int) GET_MODE (to)].insn_code
4558 != CODE_FOR_nothing)
4560 /* Make a place for a REG_NOTE and add it. */
4561 insn = emit_move_insn (to, to);
4562 set_unique_reg_note (insn,
4564 gen_rtx_fmt_e (UNSIGNED_FIX,
4572 /* We can't do it with an insn, so use a library call. But first ensure
4573 that the mode of TO is at least as wide as SImode, since those are the
4574 only library calls we know about. */
4576 if (GET_MODE_SIZE (GET_MODE (to)) < GET_MODE_SIZE (SImode))
4578 target = gen_reg_rtx (SImode);
4580 expand_fix (target, from, unsignedp);
4588 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
4589 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
4590 gcc_assert (libfunc);
4593 from = force_not_mem (from);
4597 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4598 GET_MODE (to), 1, from,
4600 insns = get_insns ();
4603 emit_libcall_block (insns, target, value,
4604 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
4605 GET_MODE (to), from));
4610 if (GET_MODE (to) == GET_MODE (target))
4611 emit_move_insn (to, target);
4613 convert_move (to, target, 0);
4617 /* Report whether we have an instruction to perform the operation
4618 specified by CODE on operands of mode MODE. */
4620 have_insn_for (enum rtx_code code, enum machine_mode mode)
4622 return (code_to_optab[(int) code] != 0
4623 && (code_to_optab[(int) code]->handlers[(int) mode].insn_code
4624 != CODE_FOR_nothing));
4627 /* Create a blank optab. */
4632 optab op = ggc_alloc (sizeof (struct optab));
4633 for (i = 0; i < NUM_MACHINE_MODES; i++)
4635 op->handlers[i].insn_code = CODE_FOR_nothing;
4636 op->handlers[i].libfunc = 0;
4642 static convert_optab
4643 new_convert_optab (void)
4646 convert_optab op = ggc_alloc (sizeof (struct convert_optab));
4647 for (i = 0; i < NUM_MACHINE_MODES; i++)
4648 for (j = 0; j < NUM_MACHINE_MODES; j++)
4650 op->handlers[i][j].insn_code = CODE_FOR_nothing;
4651 op->handlers[i][j].libfunc = 0;
4656 /* Same, but fill in its code as CODE, and write it into the
4657 code_to_optab table. */
4659 init_optab (enum rtx_code code)
4661 optab op = new_optab ();
4663 code_to_optab[(int) code] = op;
4667 /* Same, but fill in its code as CODE, and do _not_ write it into
4668 the code_to_optab table. */
4670 init_optabv (enum rtx_code code)
4672 optab op = new_optab ();
4677 /* Conversion optabs never go in the code_to_optab table. */
4678 static inline convert_optab
4679 init_convert_optab (enum rtx_code code)
4681 convert_optab op = new_convert_optab ();
4686 /* Initialize the libfunc fields of an entire group of entries in some
4687 optab. Each entry is set equal to a string consisting of a leading
4688 pair of underscores followed by a generic operation name followed by
4689 a mode name (downshifted to lowercase) followed by a single character
4690 representing the number of operands for the given operation (which is
4691 usually one of the characters '2', '3', or '4').
4693 OPTABLE is the table in which libfunc fields are to be initialized.
4694 FIRST_MODE is the first machine mode index in the given optab to
4696 LAST_MODE is the last machine mode index in the given optab to
4698 OPNAME is the generic (string) name of the operation.
4699 SUFFIX is the character which specifies the number of operands for
4700 the given generic operation.
4704 init_libfuncs (optab optable, int first_mode, int last_mode,
4705 const char *opname, int suffix)
4708 unsigned opname_len = strlen (opname);
4710 for (mode = first_mode; (int) mode <= (int) last_mode;
4711 mode = (enum machine_mode) ((int) mode + 1))
4713 const char *mname = GET_MODE_NAME (mode);
4714 unsigned mname_len = strlen (mname);
4715 char *libfunc_name = alloca (2 + opname_len + mname_len + 1 + 1);
4722 for (q = opname; *q; )
4724 for (q = mname; *q; q++)
4725 *p++ = TOLOWER (*q);
4729 optable->handlers[(int) mode].libfunc
4730 = init_one_libfunc (ggc_alloc_string (libfunc_name, p - libfunc_name));
4734 /* Initialize the libfunc fields of an entire group of entries in some
4735 optab which correspond to all integer mode operations. The parameters
4736 have the same meaning as similarly named ones for the `init_libfuncs'
4737 routine. (See above). */
4740 init_integral_libfuncs (optab optable, const char *opname, int suffix)
4742 int maxsize = 2*BITS_PER_WORD;
4743 if (maxsize < LONG_LONG_TYPE_SIZE)
4744 maxsize = LONG_LONG_TYPE_SIZE;
4745 init_libfuncs (optable, word_mode,
4746 mode_for_size (maxsize, MODE_INT, 0),
4750 /* Initialize the libfunc fields of an entire group of entries in some
4751 optab which correspond to all real mode operations. The parameters
4752 have the same meaning as similarly named ones for the `init_libfuncs'
4753 routine. (See above). */
4756 init_floating_libfuncs (optab optable, const char *opname, int suffix)
4758 init_libfuncs (optable, MIN_MODE_FLOAT, MAX_MODE_FLOAT, opname, suffix);
4761 /* Initialize the libfunc fields of an entire group of entries of an
4762 inter-mode-class conversion optab. The string formation rules are
4763 similar to the ones for init_libfuncs, above, but instead of having
4764 a mode name and an operand count these functions have two mode names
4765 and no operand count. */
4767 init_interclass_conv_libfuncs (convert_optab tab, const char *opname,
4768 enum mode_class from_class,
4769 enum mode_class to_class)
4771 enum machine_mode first_from_mode = GET_CLASS_NARROWEST_MODE (from_class);
4772 enum machine_mode first_to_mode = GET_CLASS_NARROWEST_MODE (to_class);
4773 size_t opname_len = strlen (opname);
4774 size_t max_mname_len = 0;
4776 enum machine_mode fmode, tmode;
4777 const char *fname, *tname;
4779 char *libfunc_name, *suffix;
4782 for (fmode = first_from_mode;
4784 fmode = GET_MODE_WIDER_MODE (fmode))
4785 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (fmode)));
4787 for (tmode = first_to_mode;
4789 tmode = GET_MODE_WIDER_MODE (tmode))
4790 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (tmode)));
4792 libfunc_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
4793 libfunc_name[0] = '_';
4794 libfunc_name[1] = '_';
4795 memcpy (&libfunc_name[2], opname, opname_len);
4796 suffix = libfunc_name + opname_len + 2;
4798 for (fmode = first_from_mode; fmode != VOIDmode;
4799 fmode = GET_MODE_WIDER_MODE (fmode))
4800 for (tmode = first_to_mode; tmode != VOIDmode;
4801 tmode = GET_MODE_WIDER_MODE (tmode))
4803 fname = GET_MODE_NAME (fmode);
4804 tname = GET_MODE_NAME (tmode);
4807 for (q = fname; *q; p++, q++)
4809 for (q = tname; *q; p++, q++)
4814 tab->handlers[tmode][fmode].libfunc
4815 = init_one_libfunc (ggc_alloc_string (libfunc_name,
4820 /* Initialize the libfunc fields of an entire group of entries of an
4821 intra-mode-class conversion optab. The string formation rules are
4822 similar to the ones for init_libfunc, above. WIDENING says whether
4823 the optab goes from narrow to wide modes or vice versa. These functions
4824 have two mode names _and_ an operand count. */
4826 init_intraclass_conv_libfuncs (convert_optab tab, const char *opname,
4827 enum mode_class class, bool widening)
4829 enum machine_mode first_mode = GET_CLASS_NARROWEST_MODE (class);
4830 size_t opname_len = strlen (opname);
4831 size_t max_mname_len = 0;
4833 enum machine_mode nmode, wmode;
4834 const char *nname, *wname;
4836 char *libfunc_name, *suffix;
4839 for (nmode = first_mode; nmode != VOIDmode;
4840 nmode = GET_MODE_WIDER_MODE (nmode))
4841 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (nmode)));
4843 libfunc_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
4844 libfunc_name[0] = '_';
4845 libfunc_name[1] = '_';
4846 memcpy (&libfunc_name[2], opname, opname_len);
4847 suffix = libfunc_name + opname_len + 2;
4849 for (nmode = first_mode; nmode != VOIDmode;
4850 nmode = GET_MODE_WIDER_MODE (nmode))
4851 for (wmode = GET_MODE_WIDER_MODE (nmode); wmode != VOIDmode;
4852 wmode = GET_MODE_WIDER_MODE (wmode))
4854 nname = GET_MODE_NAME (nmode);
4855 wname = GET_MODE_NAME (wmode);
4858 for (q = widening ? nname : wname; *q; p++, q++)
4860 for (q = widening ? wname : nname; *q; p++, q++)
4866 tab->handlers[widening ? wmode : nmode]
4867 [widening ? nmode : wmode].libfunc
4868 = init_one_libfunc (ggc_alloc_string (libfunc_name,
4875 init_one_libfunc (const char *name)
4879 /* Create a FUNCTION_DECL that can be passed to
4880 targetm.encode_section_info. */
4881 /* ??? We don't have any type information except for this is
4882 a function. Pretend this is "int foo()". */
4883 tree decl = build_decl (FUNCTION_DECL, get_identifier (name),
4884 build_function_type (integer_type_node, NULL_TREE));
4885 DECL_ARTIFICIAL (decl) = 1;
4886 DECL_EXTERNAL (decl) = 1;
4887 TREE_PUBLIC (decl) = 1;
4889 symbol = XEXP (DECL_RTL (decl), 0);
4891 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
4892 are the flags assigned by targetm.encode_section_info. */
4893 SYMBOL_REF_DECL (symbol) = 0;
4898 /* Call this to reset the function entry for one optab (OPTABLE) in mode
4899 MODE to NAME, which should be either 0 or a string constant. */
4901 set_optab_libfunc (optab optable, enum machine_mode mode, const char *name)
4904 optable->handlers[mode].libfunc = init_one_libfunc (name);
4906 optable->handlers[mode].libfunc = 0;
4909 /* Call this to reset the function entry for one conversion optab
4910 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
4911 either 0 or a string constant. */
4913 set_conv_libfunc (convert_optab optable, enum machine_mode tmode,
4914 enum machine_mode fmode, const char *name)
4917 optable->handlers[tmode][fmode].libfunc = init_one_libfunc (name);
4919 optable->handlers[tmode][fmode].libfunc = 0;
4922 /* Call this once to initialize the contents of the optabs
4923 appropriately for the current target machine. */
4930 /* Start by initializing all tables to contain CODE_FOR_nothing. */
4932 for (i = 0; i < NUM_RTX_CODE; i++)
4933 setcc_gen_code[i] = CODE_FOR_nothing;
4935 #ifdef HAVE_conditional_move
4936 for (i = 0; i < NUM_MACHINE_MODES; i++)
4937 movcc_gen_code[i] = CODE_FOR_nothing;
4940 for (i = 0; i < NUM_MACHINE_MODES; i++)
4942 vcond_gen_code[i] = CODE_FOR_nothing;
4943 vcondu_gen_code[i] = CODE_FOR_nothing;
4946 add_optab = init_optab (PLUS);
4947 addv_optab = init_optabv (PLUS);
4948 sub_optab = init_optab (MINUS);
4949 subv_optab = init_optabv (MINUS);
4950 smul_optab = init_optab (MULT);
4951 smulv_optab = init_optabv (MULT);
4952 smul_highpart_optab = init_optab (UNKNOWN);
4953 umul_highpart_optab = init_optab (UNKNOWN);
4954 smul_widen_optab = init_optab (UNKNOWN);
4955 umul_widen_optab = init_optab (UNKNOWN);
4956 sdiv_optab = init_optab (DIV);
4957 sdivv_optab = init_optabv (DIV);
4958 sdivmod_optab = init_optab (UNKNOWN);
4959 udiv_optab = init_optab (UDIV);
4960 udivmod_optab = init_optab (UNKNOWN);
4961 smod_optab = init_optab (MOD);
4962 umod_optab = init_optab (UMOD);
4963 fmod_optab = init_optab (UNKNOWN);
4964 drem_optab = init_optab (UNKNOWN);
4965 ftrunc_optab = init_optab (UNKNOWN);
4966 and_optab = init_optab (AND);
4967 ior_optab = init_optab (IOR);
4968 xor_optab = init_optab (XOR);
4969 ashl_optab = init_optab (ASHIFT);
4970 ashr_optab = init_optab (ASHIFTRT);
4971 lshr_optab = init_optab (LSHIFTRT);
4972 rotl_optab = init_optab (ROTATE);
4973 rotr_optab = init_optab (ROTATERT);
4974 smin_optab = init_optab (SMIN);
4975 smax_optab = init_optab (SMAX);
4976 umin_optab = init_optab (UMIN);
4977 umax_optab = init_optab (UMAX);
4978 pow_optab = init_optab (UNKNOWN);
4979 atan2_optab = init_optab (UNKNOWN);
4981 /* These three have codes assigned exclusively for the sake of
4983 mov_optab = init_optab (SET);
4984 movstrict_optab = init_optab (STRICT_LOW_PART);
4985 cmp_optab = init_optab (COMPARE);
4987 ucmp_optab = init_optab (UNKNOWN);
4988 tst_optab = init_optab (UNKNOWN);
4990 eq_optab = init_optab (EQ);
4991 ne_optab = init_optab (NE);
4992 gt_optab = init_optab (GT);
4993 ge_optab = init_optab (GE);
4994 lt_optab = init_optab (LT);
4995 le_optab = init_optab (LE);
4996 unord_optab = init_optab (UNORDERED);
4998 neg_optab = init_optab (NEG);
4999 negv_optab = init_optabv (NEG);
5000 abs_optab = init_optab (ABS);
5001 absv_optab = init_optabv (ABS);
5002 addcc_optab = init_optab (UNKNOWN);
5003 one_cmpl_optab = init_optab (NOT);
5004 ffs_optab = init_optab (FFS);
5005 clz_optab = init_optab (CLZ);
5006 ctz_optab = init_optab (CTZ);
5007 popcount_optab = init_optab (POPCOUNT);
5008 parity_optab = init_optab (PARITY);
5009 sqrt_optab = init_optab (SQRT);
5010 floor_optab = init_optab (UNKNOWN);
5011 lfloor_optab = init_optab (UNKNOWN);
5012 ceil_optab = init_optab (UNKNOWN);
5013 lceil_optab = init_optab (UNKNOWN);
5014 round_optab = init_optab (UNKNOWN);
5015 btrunc_optab = init_optab (UNKNOWN);
5016 nearbyint_optab = init_optab (UNKNOWN);
5017 rint_optab = init_optab (UNKNOWN);
5018 lrint_optab = init_optab (UNKNOWN);
5019 sincos_optab = init_optab (UNKNOWN);
5020 sin_optab = init_optab (UNKNOWN);
5021 asin_optab = init_optab (UNKNOWN);
5022 cos_optab = init_optab (UNKNOWN);
5023 acos_optab = init_optab (UNKNOWN);
5024 exp_optab = init_optab (UNKNOWN);
5025 exp10_optab = init_optab (UNKNOWN);
5026 exp2_optab = init_optab (UNKNOWN);
5027 expm1_optab = init_optab (UNKNOWN);
5028 ldexp_optab = init_optab (UNKNOWN);
5029 logb_optab = init_optab (UNKNOWN);
5030 ilogb_optab = init_optab (UNKNOWN);
5031 log_optab = init_optab (UNKNOWN);
5032 log10_optab = init_optab (UNKNOWN);
5033 log2_optab = init_optab (UNKNOWN);
5034 log1p_optab = init_optab (UNKNOWN);
5035 tan_optab = init_optab (UNKNOWN);
5036 atan_optab = init_optab (UNKNOWN);
5037 copysign_optab = init_optab (UNKNOWN);
5039 strlen_optab = init_optab (UNKNOWN);
5040 cbranch_optab = init_optab (UNKNOWN);
5041 cmov_optab = init_optab (UNKNOWN);
5042 cstore_optab = init_optab (UNKNOWN);
5043 push_optab = init_optab (UNKNOWN);
5045 vec_extract_optab = init_optab (UNKNOWN);
5046 vec_set_optab = init_optab (UNKNOWN);
5047 vec_init_optab = init_optab (UNKNOWN);
5048 vec_realign_load_optab = init_optab (UNKNOWN);
5049 movmisalign_optab = init_optab (UNKNOWN);
5051 powi_optab = init_optab (UNKNOWN);
5054 sext_optab = init_convert_optab (SIGN_EXTEND);
5055 zext_optab = init_convert_optab (ZERO_EXTEND);
5056 trunc_optab = init_convert_optab (TRUNCATE);
5057 sfix_optab = init_convert_optab (FIX);
5058 ufix_optab = init_convert_optab (UNSIGNED_FIX);
5059 sfixtrunc_optab = init_convert_optab (UNKNOWN);
5060 ufixtrunc_optab = init_convert_optab (UNKNOWN);
5061 sfloat_optab = init_convert_optab (FLOAT);
5062 ufloat_optab = init_convert_optab (UNSIGNED_FLOAT);
5064 for (i = 0; i < NUM_MACHINE_MODES; i++)
5066 movmem_optab[i] = CODE_FOR_nothing;
5067 clrmem_optab[i] = CODE_FOR_nothing;
5068 cmpstr_optab[i] = CODE_FOR_nothing;
5069 cmpmem_optab[i] = CODE_FOR_nothing;
5071 sync_add_optab[i] = CODE_FOR_nothing;
5072 sync_sub_optab[i] = CODE_FOR_nothing;
5073 sync_ior_optab[i] = CODE_FOR_nothing;
5074 sync_and_optab[i] = CODE_FOR_nothing;
5075 sync_xor_optab[i] = CODE_FOR_nothing;
5076 sync_nand_optab[i] = CODE_FOR_nothing;
5077 sync_old_add_optab[i] = CODE_FOR_nothing;
5078 sync_old_sub_optab[i] = CODE_FOR_nothing;
5079 sync_old_ior_optab[i] = CODE_FOR_nothing;
5080 sync_old_and_optab[i] = CODE_FOR_nothing;
5081 sync_old_xor_optab[i] = CODE_FOR_nothing;
5082 sync_old_nand_optab[i] = CODE_FOR_nothing;
5083 sync_new_add_optab[i] = CODE_FOR_nothing;
5084 sync_new_sub_optab[i] = CODE_FOR_nothing;
5085 sync_new_ior_optab[i] = CODE_FOR_nothing;
5086 sync_new_and_optab[i] = CODE_FOR_nothing;
5087 sync_new_xor_optab[i] = CODE_FOR_nothing;
5088 sync_new_nand_optab[i] = CODE_FOR_nothing;
5089 sync_compare_and_swap[i] = CODE_FOR_nothing;
5090 sync_compare_and_swap_cc[i] = CODE_FOR_nothing;
5091 sync_lock_test_and_set[i] = CODE_FOR_nothing;
5092 sync_lock_release[i] = CODE_FOR_nothing;
5094 #ifdef HAVE_SECONDARY_RELOADS
5095 reload_in_optab[i] = reload_out_optab[i] = CODE_FOR_nothing;
5099 /* Fill in the optabs with the insns we support. */
5102 /* Initialize the optabs with the names of the library functions. */
5103 init_integral_libfuncs (add_optab, "add", '3');
5104 init_floating_libfuncs (add_optab, "add", '3');
5105 init_integral_libfuncs (addv_optab, "addv", '3');
5106 init_floating_libfuncs (addv_optab, "add", '3');
5107 init_integral_libfuncs (sub_optab, "sub", '3');
5108 init_floating_libfuncs (sub_optab, "sub", '3');
5109 init_integral_libfuncs (subv_optab, "subv", '3');
5110 init_floating_libfuncs (subv_optab, "sub", '3');
5111 init_integral_libfuncs (smul_optab, "mul", '3');
5112 init_floating_libfuncs (smul_optab, "mul", '3');
5113 init_integral_libfuncs (smulv_optab, "mulv", '3');
5114 init_floating_libfuncs (smulv_optab, "mul", '3');
5115 init_integral_libfuncs (sdiv_optab, "div", '3');
5116 init_floating_libfuncs (sdiv_optab, "div", '3');
5117 init_integral_libfuncs (sdivv_optab, "divv", '3');
5118 init_integral_libfuncs (udiv_optab, "udiv", '3');
5119 init_integral_libfuncs (sdivmod_optab, "divmod", '4');
5120 init_integral_libfuncs (udivmod_optab, "udivmod", '4');
5121 init_integral_libfuncs (smod_optab, "mod", '3');
5122 init_integral_libfuncs (umod_optab, "umod", '3');
5123 init_floating_libfuncs (ftrunc_optab, "ftrunc", '2');
5124 init_integral_libfuncs (and_optab, "and", '3');
5125 init_integral_libfuncs (ior_optab, "ior", '3');
5126 init_integral_libfuncs (xor_optab, "xor", '3');
5127 init_integral_libfuncs (ashl_optab, "ashl", '3');
5128 init_integral_libfuncs (ashr_optab, "ashr", '3');
5129 init_integral_libfuncs (lshr_optab, "lshr", '3');
5130 init_integral_libfuncs (smin_optab, "min", '3');
5131 init_floating_libfuncs (smin_optab, "min", '3');
5132 init_integral_libfuncs (smax_optab, "max", '3');
5133 init_floating_libfuncs (smax_optab, "max", '3');
5134 init_integral_libfuncs (umin_optab, "umin", '3');
5135 init_integral_libfuncs (umax_optab, "umax", '3');
5136 init_integral_libfuncs (neg_optab, "neg", '2');
5137 init_floating_libfuncs (neg_optab, "neg", '2');
5138 init_integral_libfuncs (negv_optab, "negv", '2');
5139 init_floating_libfuncs (negv_optab, "neg", '2');
5140 init_integral_libfuncs (one_cmpl_optab, "one_cmpl", '2');
5141 init_integral_libfuncs (ffs_optab, "ffs", '2');
5142 init_integral_libfuncs (clz_optab, "clz", '2');
5143 init_integral_libfuncs (ctz_optab, "ctz", '2');
5144 init_integral_libfuncs (popcount_optab, "popcount", '2');
5145 init_integral_libfuncs (parity_optab, "parity", '2');
5147 /* Comparison libcalls for integers MUST come in pairs,
5149 init_integral_libfuncs (cmp_optab, "cmp", '2');
5150 init_integral_libfuncs (ucmp_optab, "ucmp", '2');
5151 init_floating_libfuncs (cmp_optab, "cmp", '2');
5153 /* EQ etc are floating point only. */
5154 init_floating_libfuncs (eq_optab, "eq", '2');
5155 init_floating_libfuncs (ne_optab, "ne", '2');
5156 init_floating_libfuncs (gt_optab, "gt", '2');
5157 init_floating_libfuncs (ge_optab, "ge", '2');
5158 init_floating_libfuncs (lt_optab, "lt", '2');
5159 init_floating_libfuncs (le_optab, "le", '2');
5160 init_floating_libfuncs (unord_optab, "unord", '2');
5162 init_floating_libfuncs (powi_optab, "powi", '2');
5165 init_interclass_conv_libfuncs (sfloat_optab, "float",
5166 MODE_INT, MODE_FLOAT);
5167 init_interclass_conv_libfuncs (sfix_optab, "fix",
5168 MODE_FLOAT, MODE_INT);
5169 init_interclass_conv_libfuncs (ufix_optab, "fixuns",
5170 MODE_FLOAT, MODE_INT);
5172 /* sext_optab is also used for FLOAT_EXTEND. */
5173 init_intraclass_conv_libfuncs (sext_optab, "extend", MODE_FLOAT, true);
5174 init_intraclass_conv_libfuncs (trunc_optab, "trunc", MODE_FLOAT, false);
5176 /* Use cabs for double complex abs, since systems generally have cabs.
5177 Don't define any libcall for float complex, so that cabs will be used. */
5178 if (complex_double_type_node)
5179 abs_optab->handlers[TYPE_MODE (complex_double_type_node)].libfunc
5180 = init_one_libfunc ("cabs");
5182 /* The ffs function operates on `int'. */
5183 ffs_optab->handlers[(int) mode_for_size (INT_TYPE_SIZE, MODE_INT, 0)].libfunc
5184 = init_one_libfunc ("ffs");
5186 abort_libfunc = init_one_libfunc ("abort");
5187 memcpy_libfunc = init_one_libfunc ("memcpy");
5188 memmove_libfunc = init_one_libfunc ("memmove");
5189 memcmp_libfunc = init_one_libfunc ("memcmp");
5190 memset_libfunc = init_one_libfunc ("memset");
5191 setbits_libfunc = init_one_libfunc ("__setbits");
5193 unwind_resume_libfunc = init_one_libfunc (USING_SJLJ_EXCEPTIONS
5194 ? "_Unwind_SjLj_Resume"
5195 : "_Unwind_Resume");
5196 #ifndef DONT_USE_BUILTIN_SETJMP
5197 setjmp_libfunc = init_one_libfunc ("__builtin_setjmp");
5198 longjmp_libfunc = init_one_libfunc ("__builtin_longjmp");
5200 setjmp_libfunc = init_one_libfunc ("setjmp");
5201 longjmp_libfunc = init_one_libfunc ("longjmp");
5203 unwind_sjlj_register_libfunc = init_one_libfunc ("_Unwind_SjLj_Register");
5204 unwind_sjlj_unregister_libfunc
5205 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5207 /* For function entry/exit instrumentation. */
5208 profile_function_entry_libfunc
5209 = init_one_libfunc ("__cyg_profile_func_enter");
5210 profile_function_exit_libfunc
5211 = init_one_libfunc ("__cyg_profile_func_exit");
5213 gcov_flush_libfunc = init_one_libfunc ("__gcov_flush");
5215 if (HAVE_conditional_trap)
5216 trap_rtx = gen_rtx_fmt_ee (EQ, VOIDmode, NULL_RTX, NULL_RTX);
5218 /* Allow the target to add more libcalls or rename some, etc. */
5219 targetm.init_libfuncs ();
5224 /* Print information about the current contents of the optabs on
5228 debug_optab_libfuncs (void)
5234 /* Dump the arithmetic optabs. */
5235 for (i = 0; i != (int) OTI_MAX; i++)
5236 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5239 struct optab_handlers *h;
5242 h = &o->handlers[j];
5245 gcc_assert (GET_CODE (h->libfunc) = SYMBOL_REF);
5246 fprintf (stderr, "%s\t%s:\t%s\n",
5247 GET_RTX_NAME (o->code),
5249 XSTR (h->libfunc, 0));
5253 /* Dump the conversion optabs. */
5254 for (i = 0; i < (int) CTI_MAX; ++i)
5255 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5256 for (k = 0; k < NUM_MACHINE_MODES; ++k)
5259 struct optab_handlers *h;
5261 o = &convert_optab_table[i];
5262 h = &o->handlers[j][k];
5265 gcc_assert (GET_CODE (h->libfunc) = SYMBOL_REF);
5266 fprintf (stderr, "%s\t%s\t%s:\t%s\n",
5267 GET_RTX_NAME (o->code),
5270 XSTR (h->libfunc, 0));
5278 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5279 CODE. Return 0 on failure. */
5282 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED, rtx op1,
5283 rtx op2 ATTRIBUTE_UNUSED, rtx tcode ATTRIBUTE_UNUSED)
5285 enum machine_mode mode = GET_MODE (op1);
5286 enum insn_code icode;
5289 if (!HAVE_conditional_trap)
5292 if (mode == VOIDmode)
5295 icode = cmp_optab->handlers[(int) mode].insn_code;
5296 if (icode == CODE_FOR_nothing)
5300 op1 = prepare_operand (icode, op1, 0, mode, mode, 0);
5301 op2 = prepare_operand (icode, op2, 1, mode, mode, 0);
5307 emit_insn (GEN_FCN (icode) (op1, op2));
5309 PUT_CODE (trap_rtx, code);
5310 gcc_assert (HAVE_conditional_trap);
5311 insn = gen_conditional_trap (trap_rtx, tcode);
5315 insn = get_insns ();
5322 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5323 or unsigned operation code. */
5325 static enum rtx_code
5326 get_rtx_code (enum tree_code tcode, bool unsignedp)
5338 code = unsignedp ? LTU : LT;
5341 code = unsignedp ? LEU : LE;
5344 code = unsignedp ? GTU : GT;
5347 code = unsignedp ? GEU : GE;
5350 case UNORDERED_EXPR:
5381 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
5382 unsigned operators. Do not generate compare instruction. */
5385 vector_compare_rtx (tree cond, bool unsignedp, enum insn_code icode)
5387 enum rtx_code rcode;
5389 rtx rtx_op0, rtx_op1;
5391 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
5392 ensures that condition is a relational operation. */
5393 gcc_assert (COMPARISON_CLASS_P (cond));
5395 rcode = get_rtx_code (TREE_CODE (cond), unsignedp);
5396 t_op0 = TREE_OPERAND (cond, 0);
5397 t_op1 = TREE_OPERAND (cond, 1);
5399 /* Expand operands. */
5400 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)), 1);
5401 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)), 1);
5403 if (!insn_data[icode].operand[4].predicate (rtx_op0, GET_MODE (rtx_op0))
5404 && GET_MODE (rtx_op0) != VOIDmode)
5405 rtx_op0 = force_reg (GET_MODE (rtx_op0), rtx_op0);
5407 if (!insn_data[icode].operand[5].predicate (rtx_op1, GET_MODE (rtx_op1))
5408 && GET_MODE (rtx_op1) != VOIDmode)
5409 rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1);
5411 return gen_rtx_fmt_ee (rcode, VOIDmode, rtx_op0, rtx_op1);
5414 /* Return insn code for VEC_COND_EXPR EXPR. */
5416 static inline enum insn_code
5417 get_vcond_icode (tree expr, enum machine_mode mode)
5419 enum insn_code icode = CODE_FOR_nothing;
5421 if (TYPE_UNSIGNED (TREE_TYPE (expr)))
5422 icode = vcondu_gen_code[mode];
5424 icode = vcond_gen_code[mode];
5428 /* Return TRUE iff, appropriate vector insns are available
5429 for vector cond expr expr in VMODE mode. */
5432 expand_vec_cond_expr_p (tree expr, enum machine_mode vmode)
5434 if (get_vcond_icode (expr, vmode) == CODE_FOR_nothing)
5439 /* Generate insns for VEC_COND_EXPR. */
5442 expand_vec_cond_expr (tree vec_cond_expr, rtx target)
5444 enum insn_code icode;
5445 rtx comparison, rtx_op1, rtx_op2, cc_op0, cc_op1;
5446 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_cond_expr));
5447 bool unsignedp = TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr));
5449 icode = get_vcond_icode (vec_cond_expr, mode);
5450 if (icode == CODE_FOR_nothing)
5454 target = gen_reg_rtx (mode);
5456 /* Get comparison rtx. First expand both cond expr operands. */
5457 comparison = vector_compare_rtx (TREE_OPERAND (vec_cond_expr, 0),
5459 cc_op0 = XEXP (comparison, 0);
5460 cc_op1 = XEXP (comparison, 1);
5461 /* Expand both operands and force them in reg, if required. */
5462 rtx_op1 = expand_expr (TREE_OPERAND (vec_cond_expr, 1),
5463 NULL_RTX, VOIDmode, 1);
5464 if (!insn_data[icode].operand[1].predicate (rtx_op1, mode)
5465 && mode != VOIDmode)
5466 rtx_op1 = force_reg (mode, rtx_op1);
5468 rtx_op2 = expand_expr (TREE_OPERAND (vec_cond_expr, 2),
5469 NULL_RTX, VOIDmode, 1);
5470 if (!insn_data[icode].operand[2].predicate (rtx_op2, mode)
5471 && mode != VOIDmode)
5472 rtx_op2 = force_reg (mode, rtx_op2);
5474 /* Emit instruction! */
5475 emit_insn (GEN_FCN (icode) (target, rtx_op1, rtx_op2,
5476 comparison, cc_op0, cc_op1));
5482 /* This is an internal subroutine of the other compare_and_swap expanders.
5483 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
5484 operation. TARGET is an optional place to store the value result of
5485 the operation. ICODE is the particular instruction to expand. Return
5486 the result of the operation. */
5489 expand_val_compare_and_swap_1 (rtx mem, rtx old_val, rtx new_val,
5490 rtx target, enum insn_code icode)
5492 enum machine_mode mode = GET_MODE (mem);
5495 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
5496 target = gen_reg_rtx (mode);
5498 if (GET_MODE (old_val) != VOIDmode && GET_MODE (old_val) != mode)
5499 old_val = convert_modes (mode, GET_MODE (old_val), old_val, 1);
5500 if (!insn_data[icode].operand[2].predicate (old_val, mode))
5501 old_val = force_reg (mode, old_val);
5503 if (GET_MODE (new_val) != VOIDmode && GET_MODE (new_val) != mode)
5504 new_val = convert_modes (mode, GET_MODE (new_val), new_val, 1);
5505 if (!insn_data[icode].operand[3].predicate (new_val, mode))
5506 new_val = force_reg (mode, new_val);
5508 insn = GEN_FCN (icode) (target, mem, old_val, new_val);
5509 if (insn == NULL_RTX)
5516 /* Expand a compare-and-swap operation and return its value. */
5519 expand_val_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
5521 enum machine_mode mode = GET_MODE (mem);
5522 enum insn_code icode = sync_compare_and_swap[mode];
5524 if (icode == CODE_FOR_nothing)
5527 return expand_val_compare_and_swap_1 (mem, old_val, new_val, target, icode);
5530 /* Expand a compare-and-swap operation and store true into the result if
5531 the operation was successful and false otherwise. Return the result.
5532 Unlike other routines, TARGET is not optional. */
5535 expand_bool_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
5537 enum machine_mode mode = GET_MODE (mem);
5538 enum insn_code icode;
5539 rtx subtarget, label0, label1;
5541 /* If the target supports a compare-and-swap pattern that simultaneously
5542 sets some flag for success, then use it. Otherwise use the regular
5543 compare-and-swap and follow that immediately with a compare insn. */
5544 icode = sync_compare_and_swap_cc[mode];
5548 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
5550 if (subtarget != NULL_RTX)
5554 case CODE_FOR_nothing:
5555 icode = sync_compare_and_swap[mode];
5556 if (icode == CODE_FOR_nothing)
5559 /* Ensure that if old_val == mem, that we're not comparing
5560 against an old value. */
5561 if (GET_CODE (old_val) == MEM)
5562 old_val = force_reg (mode, old_val);
5564 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
5566 if (subtarget == NULL_RTX)
5569 emit_cmp_insn (subtarget, old_val, EQ, const0_rtx, mode, true);
5572 /* If the target has a sane STORE_FLAG_VALUE, then go ahead and use a
5573 setcc instruction from the beginning. We don't work too hard here,
5574 but it's nice to not be stupid about initial code gen either. */
5575 if (STORE_FLAG_VALUE == 1)
5577 icode = setcc_gen_code[EQ];
5578 if (icode != CODE_FOR_nothing)
5580 enum machine_mode cmode = insn_data[icode].operand[0].mode;
5584 if (!insn_data[icode].operand[0].predicate (target, cmode))
5585 subtarget = gen_reg_rtx (cmode);
5587 insn = GEN_FCN (icode) (subtarget);
5591 if (GET_MODE (target) != GET_MODE (subtarget))
5593 convert_move (target, subtarget, 1);
5601 /* Without an appropriate setcc instruction, use a set of branches to
5602 get 1 and 0 stored into target. Presumably if the target has a
5603 STORE_FLAG_VALUE that isn't 1, then this will get cleaned up by ifcvt. */
5605 label0 = gen_label_rtx ();
5606 label1 = gen_label_rtx ();
5608 emit_jump_insn (bcc_gen_fctn[EQ] (label0));
5609 emit_move_insn (target, const0_rtx);
5610 emit_jump_insn (gen_jump (label1));
5611 emit_label (label0);
5612 emit_move_insn (target, const1_rtx);
5613 emit_label (label1);
5618 /* This is a helper function for the other atomic operations. This function
5619 emits a loop that contains SEQ that iterates until a compare-and-swap
5620 operation at the end succeeds. MEM is the memory to be modified. SEQ is
5621 a set of instructions that takes a value from OLD_REG as an input and
5622 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
5623 set to the current contents of MEM. After SEQ, a compare-and-swap will
5624 attempt to update MEM with NEW_REG. The function returns true when the
5625 loop was generated successfully. */
5628 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
5630 enum machine_mode mode = GET_MODE (mem);
5631 enum insn_code icode;
5632 rtx label, subtarget;
5634 /* The loop we want to generate looks like
5639 old_reg = compare-and-swap(mem, old_reg, new_reg)
5640 if (old_reg != new_reg)
5643 Note that we only do the plain load from memory once. Subsequent
5644 iterations use the value loaded by the compare-and-swap pattern. */
5646 label = gen_label_rtx ();
5648 emit_move_insn (old_reg, mem);
5653 /* If the target supports a compare-and-swap pattern that simultaneously
5654 sets some flag for success, then use it. Otherwise use the regular
5655 compare-and-swap and follow that immediately with a compare insn. */
5656 icode = sync_compare_and_swap_cc[mode];
5660 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
5662 if (subtarget != NULL_RTX)
5666 case CODE_FOR_nothing:
5667 icode = sync_compare_and_swap[mode];
5668 if (icode == CODE_FOR_nothing)
5671 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
5673 if (subtarget == NULL_RTX)
5676 emit_cmp_insn (subtarget, old_reg, EQ, const0_rtx, mode, true);
5679 /* ??? Mark this jump predicted not taken? */
5680 emit_jump_insn (bcc_gen_fctn[NE] (label));
5685 /* This function generates the atomic operation MEM CODE= VAL. In this
5686 case, we do not care about any resulting value. Returns NULL if we
5687 cannot generate the operation. */
5690 expand_sync_operation (rtx mem, rtx val, enum rtx_code code)
5692 enum machine_mode mode = GET_MODE (mem);
5693 enum insn_code icode;
5696 /* Look to see if the target supports the operation directly. */
5700 icode = sync_add_optab[mode];
5703 icode = sync_ior_optab[mode];
5706 icode = sync_xor_optab[mode];
5709 icode = sync_and_optab[mode];
5712 icode = sync_nand_optab[mode];
5716 icode = sync_sub_optab[mode];
5717 if (icode == CODE_FOR_nothing)
5719 icode = sync_add_optab[mode];
5720 if (icode != CODE_FOR_nothing)
5722 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
5732 /* Generate the direct operation, if present. */
5733 if (icode != CODE_FOR_nothing)
5735 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
5736 val = convert_modes (mode, GET_MODE (val), val, 1);
5737 if (!insn_data[icode].operand[1].predicate (val, mode))
5738 val = force_reg (mode, val);
5740 insn = GEN_FCN (icode) (mem, val);
5748 /* Failing that, generate a compare-and-swap loop in which we perform the
5749 operation with normal arithmetic instructions. */
5750 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
5752 rtx t0 = gen_reg_rtx (mode), t1;
5759 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
5762 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
5763 true, OPTAB_LIB_WIDEN);
5765 insn = get_insns ();
5768 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
5775 /* This function generates the atomic operation MEM CODE= VAL. In this
5776 case, we do care about the resulting value: if AFTER is true then
5777 return the value MEM holds after the operation, if AFTER is false
5778 then return the value MEM holds before the operation. TARGET is an
5779 optional place for the result value to be stored. */
5782 expand_sync_fetch_operation (rtx mem, rtx val, enum rtx_code code,
5783 bool after, rtx target)
5785 enum machine_mode mode = GET_MODE (mem);
5786 enum insn_code old_code, new_code, icode;
5790 /* Look to see if the target supports the operation directly. */
5794 old_code = sync_old_add_optab[mode];
5795 new_code = sync_new_add_optab[mode];
5798 old_code = sync_old_ior_optab[mode];
5799 new_code = sync_new_ior_optab[mode];
5802 old_code = sync_old_xor_optab[mode];
5803 new_code = sync_new_xor_optab[mode];
5806 old_code = sync_old_and_optab[mode];
5807 new_code = sync_new_and_optab[mode];
5810 old_code = sync_old_nand_optab[mode];
5811 new_code = sync_new_nand_optab[mode];
5815 old_code = sync_old_sub_optab[mode];
5816 new_code = sync_new_sub_optab[mode];
5817 if (old_code == CODE_FOR_nothing && new_code == CODE_FOR_nothing)
5819 old_code = sync_old_add_optab[mode];
5820 new_code = sync_new_add_optab[mode];
5821 if (old_code != CODE_FOR_nothing || new_code != CODE_FOR_nothing)
5823 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
5833 /* If the target does supports the proper new/old operation, great. But
5834 if we only support the opposite old/new operation, check to see if we
5835 can compensate. In the case in which the old value is supported, then
5836 we can always perform the operation again with normal arithmetic. In
5837 the case in which the new value is supported, then we can only handle
5838 this in the case the operation is reversible. */
5843 if (icode == CODE_FOR_nothing)
5846 if (icode != CODE_FOR_nothing)
5853 if (icode == CODE_FOR_nothing
5854 && (code == PLUS || code == MINUS || code == XOR))
5857 if (icode != CODE_FOR_nothing)
5862 /* If we found something supported, great. */
5863 if (icode != CODE_FOR_nothing)
5865 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
5866 target = gen_reg_rtx (mode);
5868 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
5869 val = convert_modes (mode, GET_MODE (val), val, 1);
5870 if (!insn_data[icode].operand[2].predicate (val, mode))
5871 val = force_reg (mode, val);
5873 insn = GEN_FCN (icode) (target, mem, val);
5878 /* If we need to compensate for using an operation with the
5879 wrong return value, do so now. */
5886 else if (code == MINUS)
5891 target = expand_simple_unop (mode, NOT, target, NULL_RTX, true);
5892 target = expand_simple_binop (mode, code, target, val, NULL_RTX,
5893 true, OPTAB_LIB_WIDEN);
5900 /* Failing that, generate a compare-and-swap loop in which we perform the
5901 operation with normal arithmetic instructions. */
5902 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
5904 rtx t0 = gen_reg_rtx (mode), t1;
5906 if (!target || !register_operand (target, mode))
5907 target = gen_reg_rtx (mode);
5912 emit_move_insn (target, t0);
5916 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
5919 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
5920 true, OPTAB_LIB_WIDEN);
5922 emit_move_insn (target, t1);
5924 insn = get_insns ();
5927 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
5934 /* This function expands a test-and-set operation. Ideally we atomically
5935 store VAL in MEM and return the previous value in MEM. Some targets
5936 may not support this operation and only support VAL with the constant 1;
5937 in this case while the return value will be 0/1, but the exact value
5938 stored in MEM is target defined. TARGET is an option place to stick
5939 the return value. */
5942 expand_sync_lock_test_and_set (rtx mem, rtx val, rtx target)
5944 enum machine_mode mode = GET_MODE (mem);
5945 enum insn_code icode;
5948 /* If the target supports the test-and-set directly, great. */
5949 icode = sync_lock_test_and_set[mode];
5950 if (icode != CODE_FOR_nothing)
5952 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
5953 target = gen_reg_rtx (mode);
5955 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
5956 val = convert_modes (mode, GET_MODE (val), val, 1);
5957 if (!insn_data[icode].operand[2].predicate (val, mode))
5958 val = force_reg (mode, val);
5960 insn = GEN_FCN (icode) (target, mem, val);
5968 /* Otherwise, use a compare-and-swap loop for the exchange. */
5969 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
5971 if (!target || !register_operand (target, mode))
5972 target = gen_reg_rtx (mode);
5973 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
5974 val = convert_modes (mode, GET_MODE (val), val, 1);
5975 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
5982 #include "gt-optabs.h"