1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
45 #include "basic-block.h"
48 /* Each optab contains info on how this target machine
49 can perform a particular operation
50 for all sizes and kinds of operands.
52 The operation to be performed is often specified
53 by passing one of these optabs as an argument.
55 See expr.h for documentation of these optabs. */
57 optab optab_table[OTI_MAX];
59 rtx libfunc_table[LTI_MAX];
61 /* Tables of patterns for extending one integer mode to another. */
62 enum insn_code extendtab[MAX_MACHINE_MODE][MAX_MACHINE_MODE][2];
64 /* Tables of patterns for converting between fixed and floating point. */
65 enum insn_code fixtab[NUM_MACHINE_MODES][NUM_MACHINE_MODES][2];
66 enum insn_code fixtrunctab[NUM_MACHINE_MODES][NUM_MACHINE_MODES][2];
67 enum insn_code floattab[NUM_MACHINE_MODES][NUM_MACHINE_MODES][2];
69 /* Contains the optab used for each rtx code. */
70 optab code_to_optab[NUM_RTX_CODE + 1];
72 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
73 gives the gen_function to make a branch to test that condition. */
75 rtxfun bcc_gen_fctn[NUM_RTX_CODE];
77 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
78 gives the insn code to make a store-condition insn
79 to test that condition. */
81 enum insn_code setcc_gen_code[NUM_RTX_CODE];
83 #ifdef HAVE_conditional_move
84 /* Indexed by the machine mode, gives the insn code to make a conditional
85 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
86 setcc_gen_code to cut down on the number of named patterns. Consider a day
87 when a lot more rtx codes are conditional (eg: for the ARM). */
89 enum insn_code movcc_gen_code[NUM_MACHINE_MODES];
92 /* The insn generating function can not take an rtx_code argument.
93 TRAP_RTX is used as an rtx argument. Its code is replaced with
94 the code to be used in the trap insn and all other fields are ignored. */
95 static GTY(()) rtx trap_rtx;
97 static int add_equal_note (rtx, rtx, enum rtx_code, rtx, rtx);
98 static rtx widen_operand (rtx, enum machine_mode, enum machine_mode, int,
100 static int expand_cmplxdiv_straight (rtx, rtx, rtx, rtx, rtx, rtx,
101 enum machine_mode, int,
102 enum optab_methods, enum mode_class,
104 static int expand_cmplxdiv_wide (rtx, rtx, rtx, rtx, rtx, rtx,
105 enum machine_mode, int, enum optab_methods,
106 enum mode_class, optab);
107 static void prepare_cmp_insn (rtx *, rtx *, enum rtx_code *, rtx,
108 enum machine_mode *, int *,
109 enum can_compare_purpose);
110 static enum insn_code can_fix_p (enum machine_mode, enum machine_mode, int,
112 static enum insn_code can_float_p (enum machine_mode, enum machine_mode, int);
113 static rtx ftruncify (rtx);
114 static optab new_optab (void);
115 static inline optab init_optab (enum rtx_code);
116 static inline optab init_optabv (enum rtx_code);
117 static void init_libfuncs (optab, int, int, const char *, int);
118 static void init_integral_libfuncs (optab, const char *, int);
119 static void init_floating_libfuncs (optab, const char *, int);
120 static void emit_cmp_and_jump_insn_1 (rtx, rtx, enum machine_mode,
121 enum rtx_code, int, rtx);
122 static void prepare_float_lib_cmp (rtx *, rtx *, enum rtx_code *,
123 enum machine_mode *, int *);
124 static rtx expand_vector_binop (enum machine_mode, optab, rtx, rtx, rtx, int,
126 static rtx expand_vector_unop (enum machine_mode, optab, rtx, rtx, int);
127 static rtx widen_clz (enum machine_mode, rtx, rtx);
128 static rtx expand_parity (enum machine_mode, rtx, rtx);
130 #ifndef HAVE_conditional_trap
131 #define HAVE_conditional_trap 0
132 #define gen_conditional_trap(a,b) (abort (), NULL_RTX)
135 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
136 the result of operation CODE applied to OP0 (and OP1 if it is a binary
139 If the last insn does not set TARGET, don't do anything, but return 1.
141 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
142 don't add the REG_EQUAL note but return 0. Our caller can then try
143 again, ensuring that TARGET is not one of the operands. */
146 add_equal_note (rtx insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
148 rtx last_insn, insn, set;
153 || NEXT_INSN (insns) == NULL_RTX)
156 if (GET_RTX_CLASS (code) != '1' && GET_RTX_CLASS (code) != '2'
157 && GET_RTX_CLASS (code) != 'c' && GET_RTX_CLASS (code) != '<')
160 if (GET_CODE (target) == ZERO_EXTRACT)
163 for (last_insn = insns;
164 NEXT_INSN (last_insn) != NULL_RTX;
165 last_insn = NEXT_INSN (last_insn))
168 set = single_set (last_insn);
172 if (! rtx_equal_p (SET_DEST (set), target)
173 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
174 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
175 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
178 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
179 besides the last insn. */
180 if (reg_overlap_mentioned_p (target, op0)
181 || (op1 && reg_overlap_mentioned_p (target, op1)))
183 insn = PREV_INSN (last_insn);
184 while (insn != NULL_RTX)
186 if (reg_set_p (target, insn))
189 insn = PREV_INSN (insn);
193 if (GET_RTX_CLASS (code) == '1')
194 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
196 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
198 set_unique_reg_note (last_insn, REG_EQUAL, note);
203 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
204 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
205 not actually do a sign-extend or zero-extend, but can leave the
206 higher-order bits of the result rtx undefined, for example, in the case
207 of logical operations, but not right shifts. */
210 widen_operand (rtx op, enum machine_mode mode, enum machine_mode oldmode,
211 int unsignedp, int no_extend)
215 /* If we don't have to extend and this is a constant, return it. */
216 if (no_extend && GET_MODE (op) == VOIDmode)
219 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
220 extend since it will be more efficient to do so unless the signedness of
221 a promoted object differs from our extension. */
223 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
224 && SUBREG_PROMOTED_UNSIGNED_P (op) == unsignedp))
225 return convert_modes (mode, oldmode, op, unsignedp);
227 /* If MODE is no wider than a single word, we return a paradoxical
229 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
230 return gen_rtx_SUBREG (mode, force_reg (GET_MODE (op), op), 0);
232 /* Otherwise, get an object of MODE, clobber it, and set the low-order
235 result = gen_reg_rtx (mode);
236 emit_insn (gen_rtx_CLOBBER (VOIDmode, result));
237 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
241 /* Generate code to perform a straightforward complex divide. */
244 expand_cmplxdiv_straight (rtx real0, rtx real1, rtx imag0, rtx imag1,
245 rtx realr, rtx imagr, enum machine_mode submode,
246 int unsignedp, enum optab_methods methods,
247 enum mode_class class, optab binoptab)
253 optab this_add_optab = add_optab;
254 optab this_sub_optab = sub_optab;
255 optab this_neg_optab = neg_optab;
256 optab this_mul_optab = smul_optab;
258 if (binoptab == sdivv_optab)
260 this_add_optab = addv_optab;
261 this_sub_optab = subv_optab;
262 this_neg_optab = negv_optab;
263 this_mul_optab = smulv_optab;
266 /* Don't fetch these from memory more than once. */
267 real0 = force_reg (submode, real0);
268 real1 = force_reg (submode, real1);
271 imag0 = force_reg (submode, imag0);
273 imag1 = force_reg (submode, imag1);
275 /* Divisor: c*c + d*d. */
276 temp1 = expand_binop (submode, this_mul_optab, real1, real1,
277 NULL_RTX, unsignedp, methods);
279 temp2 = expand_binop (submode, this_mul_optab, imag1, imag1,
280 NULL_RTX, unsignedp, methods);
282 if (temp1 == 0 || temp2 == 0)
285 divisor = expand_binop (submode, this_add_optab, temp1, temp2,
286 NULL_RTX, unsignedp, methods);
292 /* Mathematically, ((a)(c-id))/divisor. */
293 /* Computationally, (a+i0) / (c+id) = (ac/(cc+dd)) + i(-ad/(cc+dd)). */
295 /* Calculate the dividend. */
296 real_t = expand_binop (submode, this_mul_optab, real0, real1,
297 NULL_RTX, unsignedp, methods);
299 imag_t = expand_binop (submode, this_mul_optab, real0, imag1,
300 NULL_RTX, unsignedp, methods);
302 if (real_t == 0 || imag_t == 0)
305 imag_t = expand_unop (submode, this_neg_optab, imag_t,
306 NULL_RTX, unsignedp);
310 /* Mathematically, ((a+ib)(c-id))/divider. */
311 /* Calculate the dividend. */
312 temp1 = expand_binop (submode, this_mul_optab, real0, real1,
313 NULL_RTX, unsignedp, methods);
315 temp2 = expand_binop (submode, this_mul_optab, imag0, imag1,
316 NULL_RTX, unsignedp, methods);
318 if (temp1 == 0 || temp2 == 0)
321 real_t = expand_binop (submode, this_add_optab, temp1, temp2,
322 NULL_RTX, unsignedp, methods);
324 temp1 = expand_binop (submode, this_mul_optab, imag0, real1,
325 NULL_RTX, unsignedp, methods);
327 temp2 = expand_binop (submode, this_mul_optab, real0, imag1,
328 NULL_RTX, unsignedp, methods);
330 if (temp1 == 0 || temp2 == 0)
333 imag_t = expand_binop (submode, this_sub_optab, temp1, temp2,
334 NULL_RTX, unsignedp, methods);
336 if (real_t == 0 || imag_t == 0)
340 if (class == MODE_COMPLEX_FLOAT)
341 res = expand_binop (submode, binoptab, real_t, divisor,
342 realr, unsignedp, methods);
344 res = expand_divmod (0, TRUNC_DIV_EXPR, submode,
345 real_t, divisor, realr, unsignedp);
351 emit_move_insn (realr, res);
353 if (class == MODE_COMPLEX_FLOAT)
354 res = expand_binop (submode, binoptab, imag_t, divisor,
355 imagr, unsignedp, methods);
357 res = expand_divmod (0, TRUNC_DIV_EXPR, submode,
358 imag_t, divisor, imagr, unsignedp);
364 emit_move_insn (imagr, res);
369 /* Generate code to perform a wide-input-range-acceptable complex divide. */
372 expand_cmplxdiv_wide (rtx real0, rtx real1, rtx imag0, rtx imag1, rtx realr,
373 rtx imagr, enum machine_mode submode, int unsignedp,
374 enum optab_methods methods, enum mode_class class,
379 rtx temp1, temp2, lab1, lab2;
380 enum machine_mode mode;
382 optab this_add_optab = add_optab;
383 optab this_sub_optab = sub_optab;
384 optab this_neg_optab = neg_optab;
385 optab this_mul_optab = smul_optab;
387 if (binoptab == sdivv_optab)
389 this_add_optab = addv_optab;
390 this_sub_optab = subv_optab;
391 this_neg_optab = negv_optab;
392 this_mul_optab = smulv_optab;
395 /* Don't fetch these from memory more than once. */
396 real0 = force_reg (submode, real0);
397 real1 = force_reg (submode, real1);
400 imag0 = force_reg (submode, imag0);
402 imag1 = force_reg (submode, imag1);
404 /* XXX What's an "unsigned" complex number? */
412 temp1 = expand_abs (submode, real1, NULL_RTX, unsignedp, 1);
413 temp2 = expand_abs (submode, imag1, NULL_RTX, unsignedp, 1);
416 if (temp1 == 0 || temp2 == 0)
419 mode = GET_MODE (temp1);
420 lab1 = gen_label_rtx ();
421 emit_cmp_and_jump_insns (temp1, temp2, LT, NULL_RTX,
422 mode, unsignedp, lab1);
424 /* |c| >= |d|; use ratio d/c to scale dividend and divisor. */
426 if (class == MODE_COMPLEX_FLOAT)
427 ratio = expand_binop (submode, binoptab, imag1, real1,
428 NULL_RTX, unsignedp, methods);
430 ratio = expand_divmod (0, TRUNC_DIV_EXPR, submode,
431 imag1, real1, NULL_RTX, unsignedp);
436 /* Calculate divisor. */
438 temp1 = expand_binop (submode, this_mul_optab, imag1, ratio,
439 NULL_RTX, unsignedp, methods);
444 divisor = expand_binop (submode, this_add_optab, temp1, real1,
445 NULL_RTX, unsignedp, methods);
450 /* Calculate dividend. */
456 /* Compute a / (c+id) as a / (c+d(d/c)) + i (-a(d/c)) / (c+d(d/c)). */
458 imag_t = expand_binop (submode, this_mul_optab, real0, ratio,
459 NULL_RTX, unsignedp, methods);
464 imag_t = expand_unop (submode, this_neg_optab, imag_t,
465 NULL_RTX, unsignedp);
467 if (real_t == 0 || imag_t == 0)
472 /* Compute (a+ib)/(c+id) as
473 (a+b(d/c))/(c+d(d/c) + i(b-a(d/c))/(c+d(d/c)). */
475 temp1 = expand_binop (submode, this_mul_optab, imag0, ratio,
476 NULL_RTX, unsignedp, methods);
481 real_t = expand_binop (submode, this_add_optab, temp1, real0,
482 NULL_RTX, unsignedp, methods);
484 temp1 = expand_binop (submode, this_mul_optab, real0, ratio,
485 NULL_RTX, unsignedp, methods);
490 imag_t = expand_binop (submode, this_sub_optab, imag0, temp1,
491 NULL_RTX, unsignedp, methods);
493 if (real_t == 0 || imag_t == 0)
497 if (class == MODE_COMPLEX_FLOAT)
498 res = expand_binop (submode, binoptab, real_t, divisor,
499 realr, unsignedp, methods);
501 res = expand_divmod (0, TRUNC_DIV_EXPR, submode,
502 real_t, divisor, realr, unsignedp);
508 emit_move_insn (realr, res);
510 if (class == MODE_COMPLEX_FLOAT)
511 res = expand_binop (submode, binoptab, imag_t, divisor,
512 imagr, unsignedp, methods);
514 res = expand_divmod (0, TRUNC_DIV_EXPR, submode,
515 imag_t, divisor, imagr, unsignedp);
521 emit_move_insn (imagr, res);
523 lab2 = gen_label_rtx ();
524 emit_jump_insn (gen_jump (lab2));
529 /* |d| > |c|; use ratio c/d to scale dividend and divisor. */
531 if (class == MODE_COMPLEX_FLOAT)
532 ratio = expand_binop (submode, binoptab, real1, imag1,
533 NULL_RTX, unsignedp, methods);
535 ratio = expand_divmod (0, TRUNC_DIV_EXPR, submode,
536 real1, imag1, NULL_RTX, unsignedp);
541 /* Calculate divisor. */
543 temp1 = expand_binop (submode, this_mul_optab, real1, ratio,
544 NULL_RTX, unsignedp, methods);
549 divisor = expand_binop (submode, this_add_optab, temp1, imag1,
550 NULL_RTX, unsignedp, methods);
555 /* Calculate dividend. */
559 /* Compute a / (c+id) as a(c/d) / (c(c/d)+d) + i (-a) / (c(c/d)+d). */
561 real_t = expand_binop (submode, this_mul_optab, real0, ratio,
562 NULL_RTX, unsignedp, methods);
564 imag_t = expand_unop (submode, this_neg_optab, real0,
565 NULL_RTX, unsignedp);
567 if (real_t == 0 || imag_t == 0)
572 /* Compute (a+ib)/(c+id) as
573 (a(c/d)+b)/(c(c/d)+d) + i (b(c/d)-a)/(c(c/d)+d). */
575 temp1 = expand_binop (submode, this_mul_optab, real0, ratio,
576 NULL_RTX, unsignedp, methods);
581 real_t = expand_binop (submode, this_add_optab, temp1, imag0,
582 NULL_RTX, unsignedp, methods);
584 temp1 = expand_binop (submode, this_mul_optab, imag0, ratio,
585 NULL_RTX, unsignedp, methods);
590 imag_t = expand_binop (submode, this_sub_optab, temp1, real0,
591 NULL_RTX, unsignedp, methods);
593 if (real_t == 0 || imag_t == 0)
597 if (class == MODE_COMPLEX_FLOAT)
598 res = expand_binop (submode, binoptab, real_t, divisor,
599 realr, unsignedp, methods);
601 res = expand_divmod (0, TRUNC_DIV_EXPR, submode,
602 real_t, divisor, realr, unsignedp);
608 emit_move_insn (realr, res);
610 if (class == MODE_COMPLEX_FLOAT)
611 res = expand_binop (submode, binoptab, imag_t, divisor,
612 imagr, unsignedp, methods);
614 res = expand_divmod (0, TRUNC_DIV_EXPR, submode,
615 imag_t, divisor, imagr, unsignedp);
621 emit_move_insn (imagr, res);
628 /* Wrapper around expand_binop which takes an rtx code to specify
629 the operation to perform, not an optab pointer. All other
630 arguments are the same. */
632 expand_simple_binop (enum machine_mode mode, enum rtx_code code, rtx op0,
633 rtx op1, rtx target, int unsignedp,
634 enum optab_methods methods)
636 optab binop = code_to_optab[(int) code];
640 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
643 /* Generate code to perform an operation specified by BINOPTAB
644 on operands OP0 and OP1, with result having machine-mode MODE.
646 UNSIGNEDP is for the case where we have to widen the operands
647 to perform the operation. It says to use zero-extension.
649 If TARGET is nonzero, the value
650 is generated there, if it is convenient to do so.
651 In all cases an rtx is returned for the locus of the value;
652 this may or may not be TARGET. */
655 expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1,
656 rtx target, int unsignedp, enum optab_methods methods)
658 enum optab_methods next_methods
659 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
660 ? OPTAB_WIDEN : methods);
661 enum mode_class class;
662 enum machine_mode wider_mode;
664 int commutative_op = 0;
665 int shift_op = (binoptab->code == ASHIFT
666 || binoptab->code == ASHIFTRT
667 || binoptab->code == LSHIFTRT
668 || binoptab->code == ROTATE
669 || binoptab->code == ROTATERT);
670 rtx entry_last = get_last_insn ();
673 class = GET_MODE_CLASS (mode);
675 op0 = protect_from_queue (op0, 0);
676 op1 = protect_from_queue (op1, 0);
678 target = protect_from_queue (target, 1);
682 /* Load duplicate non-volatile operands once. */
683 if (rtx_equal_p (op0, op1) && ! volatile_refs_p (op0))
685 op0 = force_not_mem (op0);
690 op0 = force_not_mem (op0);
691 op1 = force_not_mem (op1);
695 /* If subtracting an integer constant, convert this into an addition of
696 the negated constant. */
698 if (binoptab == sub_optab && GET_CODE (op1) == CONST_INT)
700 op1 = negate_rtx (mode, op1);
701 binoptab = add_optab;
704 /* If we are inside an appropriately-short loop and one operand is an
705 expensive constant, force it into a register. */
706 if (CONSTANT_P (op0) && preserve_subexpressions_p ()
707 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
708 op0 = force_reg (mode, op0);
710 if (CONSTANT_P (op1) && preserve_subexpressions_p ()
711 && ! shift_op && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
712 op1 = force_reg (mode, op1);
714 /* Record where to delete back to if we backtrack. */
715 last = get_last_insn ();
717 /* If operation is commutative,
718 try to make the first operand a register.
719 Even better, try to make it the same as the target.
720 Also try to make the last operand a constant. */
721 if (GET_RTX_CLASS (binoptab->code) == 'c'
722 || binoptab == smul_widen_optab
723 || binoptab == umul_widen_optab
724 || binoptab == smul_highpart_optab
725 || binoptab == umul_highpart_optab)
729 if (((target == 0 || GET_CODE (target) == REG)
730 ? ((GET_CODE (op1) == REG
731 && GET_CODE (op0) != REG)
733 : rtx_equal_p (op1, target))
734 || GET_CODE (op0) == CONST_INT)
742 /* If we can do it with a three-operand insn, do so. */
744 if (methods != OPTAB_MUST_WIDEN
745 && binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
747 int icode = (int) binoptab->handlers[(int) mode].insn_code;
748 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
749 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
751 rtx xop0 = op0, xop1 = op1;
756 temp = gen_reg_rtx (mode);
758 /* If it is a commutative operator and the modes would match
759 if we would swap the operands, we can save the conversions. */
762 if (GET_MODE (op0) != mode0 && GET_MODE (op1) != mode1
763 && GET_MODE (op0) == mode1 && GET_MODE (op1) == mode0)
767 tmp = op0; op0 = op1; op1 = tmp;
768 tmp = xop0; xop0 = xop1; xop1 = tmp;
772 /* In case the insn wants input operands in modes different from
773 those of the actual operands, convert the operands. It would
774 seem that we don't need to convert CONST_INTs, but we do, so
775 that they're properly zero-extended, sign-extended or truncated
778 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
779 xop0 = convert_modes (mode0,
780 GET_MODE (op0) != VOIDmode
785 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
786 xop1 = convert_modes (mode1,
787 GET_MODE (op1) != VOIDmode
792 /* Now, if insn's predicates don't allow our operands, put them into
795 if (! (*insn_data[icode].operand[1].predicate) (xop0, mode0)
796 && mode0 != VOIDmode)
797 xop0 = copy_to_mode_reg (mode0, xop0);
799 if (! (*insn_data[icode].operand[2].predicate) (xop1, mode1)
800 && mode1 != VOIDmode)
801 xop1 = copy_to_mode_reg (mode1, xop1);
803 if (! (*insn_data[icode].operand[0].predicate) (temp, mode))
804 temp = gen_reg_rtx (mode);
806 pat = GEN_FCN (icode) (temp, xop0, xop1);
809 /* If PAT is composed of more than one insn, try to add an appropriate
810 REG_EQUAL note to it. If we can't because TEMP conflicts with an
811 operand, call ourselves again, this time without a target. */
812 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
813 && ! add_equal_note (pat, temp, binoptab->code, xop0, xop1))
815 delete_insns_since (last);
816 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
824 delete_insns_since (last);
827 /* If this is a multiply, see if we can do a widening operation that
828 takes operands of this mode and makes a wider mode. */
830 if (binoptab == smul_optab && GET_MODE_WIDER_MODE (mode) != VOIDmode
831 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
832 ->handlers[(int) GET_MODE_WIDER_MODE (mode)].insn_code)
833 != CODE_FOR_nothing))
835 temp = expand_binop (GET_MODE_WIDER_MODE (mode),
836 unsignedp ? umul_widen_optab : smul_widen_optab,
837 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
841 if (GET_MODE_CLASS (mode) == MODE_INT)
842 return gen_lowpart (mode, temp);
844 return convert_to_mode (mode, temp, unsignedp);
848 /* Look for a wider mode of the same class for which we think we
849 can open-code the operation. Check for a widening multiply at the
850 wider mode as well. */
852 if ((class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
853 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
854 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
855 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
857 if (binoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
858 || (binoptab == smul_optab
859 && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
860 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
861 ->handlers[(int) GET_MODE_WIDER_MODE (wider_mode)].insn_code)
862 != CODE_FOR_nothing)))
864 rtx xop0 = op0, xop1 = op1;
867 /* For certain integer operations, we need not actually extend
868 the narrow operands, as long as we will truncate
869 the results to the same narrowness. */
871 if ((binoptab == ior_optab || binoptab == and_optab
872 || binoptab == xor_optab
873 || binoptab == add_optab || binoptab == sub_optab
874 || binoptab == smul_optab || binoptab == ashl_optab)
875 && class == MODE_INT)
878 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
880 /* The second operand of a shift must always be extended. */
881 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
882 no_extend && binoptab != ashl_optab);
884 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
885 unsignedp, OPTAB_DIRECT);
888 if (class != MODE_INT)
891 target = gen_reg_rtx (mode);
892 convert_move (target, temp, 0);
896 return gen_lowpart (mode, temp);
899 delete_insns_since (last);
903 /* These can be done a word at a time. */
904 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
906 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
907 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
913 /* If TARGET is the same as one of the operands, the REG_EQUAL note
914 won't be accurate, so use a new target. */
915 if (target == 0 || target == op0 || target == op1)
916 target = gen_reg_rtx (mode);
920 /* Do the actual arithmetic. */
921 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
923 rtx target_piece = operand_subword (target, i, 1, mode);
924 rtx x = expand_binop (word_mode, binoptab,
925 operand_subword_force (op0, i, mode),
926 operand_subword_force (op1, i, mode),
927 target_piece, unsignedp, next_methods);
932 if (target_piece != x)
933 emit_move_insn (target_piece, x);
936 insns = get_insns ();
939 if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
941 if (binoptab->code != UNKNOWN)
943 = gen_rtx_fmt_ee (binoptab->code, mode,
944 copy_rtx (op0), copy_rtx (op1));
948 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
953 /* Synthesize double word shifts from single word shifts. */
954 if ((binoptab == lshr_optab || binoptab == ashl_optab
955 || binoptab == ashr_optab)
957 && GET_CODE (op1) == CONST_INT
958 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
959 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
960 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
961 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
963 rtx insns, inter, equiv_value;
964 rtx into_target, outof_target;
965 rtx into_input, outof_input;
966 int shift_count, left_shift, outof_word;
968 /* If TARGET is the same as one of the operands, the REG_EQUAL note
969 won't be accurate, so use a new target. */
970 if (target == 0 || target == op0 || target == op1)
971 target = gen_reg_rtx (mode);
975 shift_count = INTVAL (op1);
977 /* OUTOF_* is the word we are shifting bits away from, and
978 INTO_* is the word that we are shifting bits towards, thus
979 they differ depending on the direction of the shift and
982 left_shift = binoptab == ashl_optab;
983 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
985 outof_target = operand_subword (target, outof_word, 1, mode);
986 into_target = operand_subword (target, 1 - outof_word, 1, mode);
988 outof_input = operand_subword_force (op0, outof_word, mode);
989 into_input = operand_subword_force (op0, 1 - outof_word, mode);
991 if (shift_count >= BITS_PER_WORD)
993 inter = expand_binop (word_mode, binoptab,
995 GEN_INT (shift_count - BITS_PER_WORD),
996 into_target, unsignedp, next_methods);
998 if (inter != 0 && inter != into_target)
999 emit_move_insn (into_target, inter);
1001 /* For a signed right shift, we must fill the word we are shifting
1002 out of with copies of the sign bit. Otherwise it is zeroed. */
1003 if (inter != 0 && binoptab != ashr_optab)
1004 inter = CONST0_RTX (word_mode);
1005 else if (inter != 0)
1006 inter = expand_binop (word_mode, binoptab,
1008 GEN_INT (BITS_PER_WORD - 1),
1009 outof_target, unsignedp, next_methods);
1011 if (inter != 0 && inter != outof_target)
1012 emit_move_insn (outof_target, inter);
1017 optab reverse_unsigned_shift, unsigned_shift;
1019 /* For a shift of less then BITS_PER_WORD, to compute the carry,
1020 we must do a logical shift in the opposite direction of the
1023 reverse_unsigned_shift = (left_shift ? lshr_optab : ashl_optab);
1025 /* For a shift of less than BITS_PER_WORD, to compute the word
1026 shifted towards, we need to unsigned shift the orig value of
1029 unsigned_shift = (left_shift ? ashl_optab : lshr_optab);
1031 carries = expand_binop (word_mode, reverse_unsigned_shift,
1033 GEN_INT (BITS_PER_WORD - shift_count),
1034 0, unsignedp, next_methods);
1039 inter = expand_binop (word_mode, unsigned_shift, into_input,
1040 op1, 0, unsignedp, next_methods);
1043 inter = expand_binop (word_mode, ior_optab, carries, inter,
1044 into_target, unsignedp, next_methods);
1046 if (inter != 0 && inter != into_target)
1047 emit_move_insn (into_target, inter);
1050 inter = expand_binop (word_mode, binoptab, outof_input,
1051 op1, outof_target, unsignedp, next_methods);
1053 if (inter != 0 && inter != outof_target)
1054 emit_move_insn (outof_target, inter);
1057 insns = get_insns ();
1062 if (binoptab->code != UNKNOWN)
1063 equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, op0, op1);
1067 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1072 /* Synthesize double word rotates from single word shifts. */
1073 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1074 && class == MODE_INT
1075 && GET_CODE (op1) == CONST_INT
1076 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1077 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1078 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1080 rtx insns, equiv_value;
1081 rtx into_target, outof_target;
1082 rtx into_input, outof_input;
1084 int shift_count, left_shift, outof_word;
1086 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1087 won't be accurate, so use a new target. */
1088 if (target == 0 || target == op0 || target == op1)
1089 target = gen_reg_rtx (mode);
1093 shift_count = INTVAL (op1);
1095 /* OUTOF_* is the word we are shifting bits away from, and
1096 INTO_* is the word that we are shifting bits towards, thus
1097 they differ depending on the direction of the shift and
1098 WORDS_BIG_ENDIAN. */
1100 left_shift = (binoptab == rotl_optab);
1101 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1103 outof_target = operand_subword (target, outof_word, 1, mode);
1104 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1106 outof_input = operand_subword_force (op0, outof_word, mode);
1107 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1109 if (shift_count == BITS_PER_WORD)
1111 /* This is just a word swap. */
1112 emit_move_insn (outof_target, into_input);
1113 emit_move_insn (into_target, outof_input);
1118 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1119 rtx first_shift_count, second_shift_count;
1120 optab reverse_unsigned_shift, unsigned_shift;
1122 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1123 ? lshr_optab : ashl_optab);
1125 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1126 ? ashl_optab : lshr_optab);
1128 if (shift_count > BITS_PER_WORD)
1130 first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1131 second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1135 first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1136 second_shift_count = GEN_INT (shift_count);
1139 into_temp1 = expand_binop (word_mode, unsigned_shift,
1140 outof_input, first_shift_count,
1141 NULL_RTX, unsignedp, next_methods);
1142 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1143 into_input, second_shift_count,
1144 NULL_RTX, unsignedp, next_methods);
1146 if (into_temp1 != 0 && into_temp2 != 0)
1147 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1148 into_target, unsignedp, next_methods);
1152 if (inter != 0 && inter != into_target)
1153 emit_move_insn (into_target, inter);
1155 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1156 into_input, first_shift_count,
1157 NULL_RTX, unsignedp, next_methods);
1158 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1159 outof_input, second_shift_count,
1160 NULL_RTX, unsignedp, next_methods);
1162 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1163 inter = expand_binop (word_mode, ior_optab,
1164 outof_temp1, outof_temp2,
1165 outof_target, unsignedp, next_methods);
1167 if (inter != 0 && inter != outof_target)
1168 emit_move_insn (outof_target, inter);
1171 insns = get_insns ();
1176 if (binoptab->code != UNKNOWN)
1177 equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, op0, op1);
1181 /* We can't make this a no conflict block if this is a word swap,
1182 because the word swap case fails if the input and output values
1183 are in the same register. */
1184 if (shift_count != BITS_PER_WORD)
1185 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1194 /* These can be done a word at a time by propagating carries. */
1195 if ((binoptab == add_optab || binoptab == sub_optab)
1196 && class == MODE_INT
1197 && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
1198 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1201 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1202 const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
1203 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1204 rtx xop0, xop1, xtarget;
1206 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1207 value is one of those, use it. Otherwise, use 1 since it is the
1208 one easiest to get. */
1209 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1210 int normalizep = STORE_FLAG_VALUE;
1215 /* Prepare the operands. */
1216 xop0 = force_reg (mode, op0);
1217 xop1 = force_reg (mode, op1);
1219 xtarget = gen_reg_rtx (mode);
1221 if (target == 0 || GET_CODE (target) != REG)
1224 /* Indicate for flow that the entire target reg is being set. */
1225 if (GET_CODE (target) == REG)
1226 emit_insn (gen_rtx_CLOBBER (VOIDmode, xtarget));
1228 /* Do the actual arithmetic. */
1229 for (i = 0; i < nwords; i++)
1231 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1232 rtx target_piece = operand_subword (xtarget, index, 1, mode);
1233 rtx op0_piece = operand_subword_force (xop0, index, mode);
1234 rtx op1_piece = operand_subword_force (xop1, index, mode);
1237 /* Main add/subtract of the input operands. */
1238 x = expand_binop (word_mode, binoptab,
1239 op0_piece, op1_piece,
1240 target_piece, unsignedp, next_methods);
1246 /* Store carry from main add/subtract. */
1247 carry_out = gen_reg_rtx (word_mode);
1248 carry_out = emit_store_flag_force (carry_out,
1249 (binoptab == add_optab
1252 word_mode, 1, normalizep);
1259 /* Add/subtract previous carry to main result. */
1260 newx = expand_binop (word_mode,
1261 normalizep == 1 ? binoptab : otheroptab,
1263 NULL_RTX, 1, next_methods);
1267 /* Get out carry from adding/subtracting carry in. */
1268 rtx carry_tmp = gen_reg_rtx (word_mode);
1269 carry_tmp = emit_store_flag_force (carry_tmp,
1270 (binoptab == add_optab
1273 word_mode, 1, normalizep);
1275 /* Logical-ior the two poss. carry together. */
1276 carry_out = expand_binop (word_mode, ior_optab,
1277 carry_out, carry_tmp,
1278 carry_out, 0, next_methods);
1282 emit_move_insn (target_piece, newx);
1285 carry_in = carry_out;
1288 if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
1290 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
1291 || ! rtx_equal_p (target, xtarget))
1293 rtx temp = emit_move_insn (target, xtarget);
1295 set_unique_reg_note (temp,
1297 gen_rtx_fmt_ee (binoptab->code, mode,
1308 delete_insns_since (last);
1311 /* If we want to multiply two two-word values and have normal and widening
1312 multiplies of single-word values, we can do this with three smaller
1313 multiplications. Note that we do not make a REG_NO_CONFLICT block here
1314 because we are not operating on one word at a time.
1316 The multiplication proceeds as follows:
1317 _______________________
1318 [__op0_high_|__op0_low__]
1319 _______________________
1320 * [__op1_high_|__op1_low__]
1321 _______________________________________________
1322 _______________________
1323 (1) [__op0_low__*__op1_low__]
1324 _______________________
1325 (2a) [__op0_low__*__op1_high_]
1326 _______________________
1327 (2b) [__op0_high_*__op1_low__]
1328 _______________________
1329 (3) [__op0_high_*__op1_high_]
1332 This gives a 4-word result. Since we are only interested in the
1333 lower 2 words, partial result (3) and the upper words of (2a) and
1334 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1335 calculated using non-widening multiplication.
1337 (1), however, needs to be calculated with an unsigned widening
1338 multiplication. If this operation is not directly supported we
1339 try using a signed widening multiplication and adjust the result.
1340 This adjustment works as follows:
1342 If both operands are positive then no adjustment is needed.
1344 If the operands have different signs, for example op0_low < 0 and
1345 op1_low >= 0, the instruction treats the most significant bit of
1346 op0_low as a sign bit instead of a bit with significance
1347 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1348 with 2**BITS_PER_WORD - op0_low, and two's complements the
1349 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1352 Similarly, if both operands are negative, we need to add
1353 (op0_low + op1_low) * 2**BITS_PER_WORD.
1355 We use a trick to adjust quickly. We logically shift op0_low right
1356 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1357 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1358 logical shift exists, we do an arithmetic right shift and subtract
1361 if (binoptab == smul_optab
1362 && class == MODE_INT
1363 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1364 && smul_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1365 && add_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1366 && ((umul_widen_optab->handlers[(int) mode].insn_code
1367 != CODE_FOR_nothing)
1368 || (smul_widen_optab->handlers[(int) mode].insn_code
1369 != CODE_FOR_nothing)))
1371 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
1372 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
1373 rtx op0_high = operand_subword_force (op0, high, mode);
1374 rtx op0_low = operand_subword_force (op0, low, mode);
1375 rtx op1_high = operand_subword_force (op1, high, mode);
1376 rtx op1_low = operand_subword_force (op1, low, mode);
1378 rtx op0_xhigh = NULL_RTX;
1379 rtx op1_xhigh = NULL_RTX;
1381 /* If the target is the same as one of the inputs, don't use it. This
1382 prevents problems with the REG_EQUAL note. */
1383 if (target == op0 || target == op1
1384 || (target != 0 && GET_CODE (target) != REG))
1387 /* Multiply the two lower words to get a double-word product.
1388 If unsigned widening multiplication is available, use that;
1389 otherwise use the signed form and compensate. */
1391 if (umul_widen_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1393 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
1394 target, 1, OPTAB_DIRECT);
1396 /* If we didn't succeed, delete everything we did so far. */
1398 delete_insns_since (last);
1400 op0_xhigh = op0_high, op1_xhigh = op1_high;
1404 && smul_widen_optab->handlers[(int) mode].insn_code
1405 != CODE_FOR_nothing)
1407 rtx wordm1 = GEN_INT (BITS_PER_WORD - 1);
1408 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
1409 target, 1, OPTAB_DIRECT);
1410 op0_xhigh = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
1411 NULL_RTX, 1, next_methods);
1413 op0_xhigh = expand_binop (word_mode, add_optab, op0_high,
1414 op0_xhigh, op0_xhigh, 0, next_methods);
1417 op0_xhigh = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
1418 NULL_RTX, 0, next_methods);
1420 op0_xhigh = expand_binop (word_mode, sub_optab, op0_high,
1421 op0_xhigh, op0_xhigh, 0,
1425 op1_xhigh = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
1426 NULL_RTX, 1, next_methods);
1428 op1_xhigh = expand_binop (word_mode, add_optab, op1_high,
1429 op1_xhigh, op1_xhigh, 0, next_methods);
1432 op1_xhigh = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
1433 NULL_RTX, 0, next_methods);
1435 op1_xhigh = expand_binop (word_mode, sub_optab, op1_high,
1436 op1_xhigh, op1_xhigh, 0,
1441 /* If we have been able to directly compute the product of the
1442 low-order words of the operands and perform any required adjustments
1443 of the operands, we proceed by trying two more multiplications
1444 and then computing the appropriate sum.
1446 We have checked above that the required addition is provided.
1447 Full-word addition will normally always succeed, especially if
1448 it is provided at all, so we don't worry about its failure. The
1449 multiplication may well fail, however, so we do handle that. */
1451 if (product && op0_xhigh && op1_xhigh)
1453 rtx product_high = operand_subword (product, high, 1, mode);
1454 rtx temp = expand_binop (word_mode, binoptab, op0_low, op1_xhigh,
1455 NULL_RTX, 0, OPTAB_DIRECT);
1457 if (!REG_P (product_high))
1458 product_high = force_reg (word_mode, product_high);
1461 temp = expand_binop (word_mode, add_optab, temp, product_high,
1462 product_high, 0, next_methods);
1464 if (temp != 0 && temp != product_high)
1465 emit_move_insn (product_high, temp);
1468 temp = expand_binop (word_mode, binoptab, op1_low, op0_xhigh,
1469 NULL_RTX, 0, OPTAB_DIRECT);
1472 temp = expand_binop (word_mode, add_optab, temp,
1473 product_high, product_high,
1476 if (temp != 0 && temp != product_high)
1477 emit_move_insn (product_high, temp);
1479 emit_move_insn (operand_subword (product, high, 1, mode), product_high);
1483 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1485 temp = emit_move_insn (product, product);
1486 set_unique_reg_note (temp,
1488 gen_rtx_fmt_ee (MULT, mode,
1497 /* If we get here, we couldn't do it for some reason even though we
1498 originally thought we could. Delete anything we've emitted in
1501 delete_insns_since (last);
1504 /* Open-code the vector operations if we have no hardware support
1506 if (class == MODE_VECTOR_INT || class == MODE_VECTOR_FLOAT)
1507 return expand_vector_binop (mode, binoptab, op0, op1, target,
1508 unsignedp, methods);
1510 /* We need to open-code the complex type operations: '+, -, * and /' */
1512 /* At this point we allow operations between two similar complex
1513 numbers, and also if one of the operands is not a complex number
1514 but rather of MODE_FLOAT or MODE_INT. However, the caller
1515 must make sure that the MODE of the non-complex operand matches
1516 the SUBMODE of the complex operand. */
1518 if (class == MODE_COMPLEX_FLOAT || class == MODE_COMPLEX_INT)
1520 rtx real0 = 0, imag0 = 0;
1521 rtx real1 = 0, imag1 = 0;
1522 rtx realr, imagr, res;
1527 /* Find the correct mode for the real and imaginary parts. */
1528 enum machine_mode submode = GET_MODE_INNER(mode);
1530 if (submode == BLKmode)
1534 target = gen_reg_rtx (mode);
1538 realr = gen_realpart (submode, target);
1539 imagr = gen_imagpart (submode, target);
1541 if (GET_MODE (op0) == mode)
1543 real0 = gen_realpart (submode, op0);
1544 imag0 = gen_imagpart (submode, op0);
1549 if (GET_MODE (op1) == mode)
1551 real1 = gen_realpart (submode, op1);
1552 imag1 = gen_imagpart (submode, op1);
1557 if (real0 == 0 || real1 == 0 || ! (imag0 != 0 || imag1 != 0))
1560 switch (binoptab->code)
1563 /* (a+ib) + (c+id) = (a+c) + i(b+d) */
1565 /* (a+ib) - (c+id) = (a-c) + i(b-d) */
1566 res = expand_binop (submode, binoptab, real0, real1,
1567 realr, unsignedp, methods);
1571 else if (res != realr)
1572 emit_move_insn (realr, res);
1574 if (imag0 != 0 && imag1 != 0)
1575 res = expand_binop (submode, binoptab, imag0, imag1,
1576 imagr, unsignedp, methods);
1577 else if (imag0 != 0)
1579 else if (binoptab->code == MINUS)
1580 res = expand_unop (submode,
1581 binoptab == subv_optab ? negv_optab : neg_optab,
1582 imag1, imagr, unsignedp);
1588 else if (res != imagr)
1589 emit_move_insn (imagr, res);
1595 /* (a+ib) * (c+id) = (ac-bd) + i(ad+cb) */
1597 if (imag0 != 0 && imag1 != 0)
1601 /* Don't fetch these from memory more than once. */
1602 real0 = force_reg (submode, real0);
1603 real1 = force_reg (submode, real1);
1604 imag0 = force_reg (submode, imag0);
1605 imag1 = force_reg (submode, imag1);
1607 temp1 = expand_binop (submode, binoptab, real0, real1, NULL_RTX,
1608 unsignedp, methods);
1610 temp2 = expand_binop (submode, binoptab, imag0, imag1, NULL_RTX,
1611 unsignedp, methods);
1613 if (temp1 == 0 || temp2 == 0)
1618 binoptab == smulv_optab ? subv_optab : sub_optab,
1619 temp1, temp2, realr, unsignedp, methods));
1623 else if (res != realr)
1624 emit_move_insn (realr, res);
1626 temp1 = expand_binop (submode, binoptab, real0, imag1,
1627 NULL_RTX, unsignedp, methods);
1629 /* Avoid expanding redundant multiplication for the common
1630 case of squaring a complex number. */
1631 if (rtx_equal_p (real0, real1) && rtx_equal_p (imag0, imag1))
1634 temp2 = expand_binop (submode, binoptab, real1, imag0,
1635 NULL_RTX, unsignedp, methods);
1637 if (temp1 == 0 || temp2 == 0)
1642 binoptab == smulv_optab ? addv_optab : add_optab,
1643 temp1, temp2, imagr, unsignedp, methods));
1647 else if (res != imagr)
1648 emit_move_insn (imagr, res);
1654 /* Don't fetch these from memory more than once. */
1655 real0 = force_reg (submode, real0);
1656 real1 = force_reg (submode, real1);
1658 res = expand_binop (submode, binoptab, real0, real1,
1659 realr, unsignedp, methods);
1662 else if (res != realr)
1663 emit_move_insn (realr, res);
1666 res = expand_binop (submode, binoptab,
1667 real1, imag0, imagr, unsignedp, methods);
1669 res = expand_binop (submode, binoptab,
1670 real0, imag1, imagr, unsignedp, methods);
1674 else if (res != imagr)
1675 emit_move_insn (imagr, res);
1682 /* (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd)) */
1686 /* (a+ib) / (c+i0) = (a/c) + i(b/c) */
1688 /* Don't fetch these from memory more than once. */
1689 real1 = force_reg (submode, real1);
1691 /* Simply divide the real and imaginary parts by `c' */
1692 if (class == MODE_COMPLEX_FLOAT)
1693 res = expand_binop (submode, binoptab, real0, real1,
1694 realr, unsignedp, methods);
1696 res = expand_divmod (0, TRUNC_DIV_EXPR, submode,
1697 real0, real1, realr, unsignedp);
1701 else if (res != realr)
1702 emit_move_insn (realr, res);
1704 if (class == MODE_COMPLEX_FLOAT)
1705 res = expand_binop (submode, binoptab, imag0, real1,
1706 imagr, unsignedp, methods);
1708 res = expand_divmod (0, TRUNC_DIV_EXPR, submode,
1709 imag0, real1, imagr, unsignedp);
1713 else if (res != imagr)
1714 emit_move_insn (imagr, res);
1720 switch (flag_complex_divide_method)
1723 ok = expand_cmplxdiv_straight (real0, real1, imag0, imag1,
1724 realr, imagr, submode,
1730 ok = expand_cmplxdiv_wide (real0, real1, imag0, imag1,
1731 realr, imagr, submode,
1751 if (binoptab->code != UNKNOWN)
1753 = gen_rtx_fmt_ee (binoptab->code, mode,
1754 copy_rtx (op0), copy_rtx (op1));
1758 emit_no_conflict_block (seq, target, op0, op1, equiv_value);
1764 /* It can't be open-coded in this mode.
1765 Use a library call if one is available and caller says that's ok. */
1767 if (binoptab->handlers[(int) mode].libfunc
1768 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
1772 enum machine_mode op1_mode = mode;
1779 op1_mode = word_mode;
1780 /* Specify unsigned here,
1781 since negative shift counts are meaningless. */
1782 op1x = convert_to_mode (word_mode, op1, 1);
1785 if (GET_MODE (op0) != VOIDmode
1786 && GET_MODE (op0) != mode)
1787 op0 = convert_to_mode (mode, op0, unsignedp);
1789 /* Pass 1 for NO_QUEUE so we don't lose any increments
1790 if the libcall is cse'd or moved. */
1791 value = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
1792 NULL_RTX, LCT_CONST, mode, 2,
1793 op0, mode, op1x, op1_mode);
1795 insns = get_insns ();
1798 target = gen_reg_rtx (mode);
1799 emit_libcall_block (insns, target, value,
1800 gen_rtx_fmt_ee (binoptab->code, mode, op0, op1));
1805 delete_insns_since (last);
1807 /* It can't be done in this mode. Can we do it in a wider mode? */
1809 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
1810 || methods == OPTAB_MUST_WIDEN))
1812 /* Caller says, don't even try. */
1813 delete_insns_since (entry_last);
1817 /* Compute the value of METHODS to pass to recursive calls.
1818 Don't allow widening to be tried recursively. */
1820 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
1822 /* Look for a wider mode of the same class for which it appears we can do
1825 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
1827 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
1828 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1830 if ((binoptab->handlers[(int) wider_mode].insn_code
1831 != CODE_FOR_nothing)
1832 || (methods == OPTAB_LIB
1833 && binoptab->handlers[(int) wider_mode].libfunc))
1835 rtx xop0 = op0, xop1 = op1;
1838 /* For certain integer operations, we need not actually extend
1839 the narrow operands, as long as we will truncate
1840 the results to the same narrowness. */
1842 if ((binoptab == ior_optab || binoptab == and_optab
1843 || binoptab == xor_optab
1844 || binoptab == add_optab || binoptab == sub_optab
1845 || binoptab == smul_optab || binoptab == ashl_optab)
1846 && class == MODE_INT)
1849 xop0 = widen_operand (xop0, wider_mode, mode,
1850 unsignedp, no_extend);
1852 /* The second operand of a shift must always be extended. */
1853 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1854 no_extend && binoptab != ashl_optab);
1856 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1857 unsignedp, methods);
1860 if (class != MODE_INT)
1863 target = gen_reg_rtx (mode);
1864 convert_move (target, temp, 0);
1868 return gen_lowpart (mode, temp);
1871 delete_insns_since (last);
1876 delete_insns_since (entry_last);
1880 /* Like expand_binop, but for open-coding vectors binops. */
1883 expand_vector_binop (enum machine_mode mode, optab binoptab, rtx op0,
1884 rtx op1, rtx target, int unsignedp,
1885 enum optab_methods methods)
1887 enum machine_mode submode, tmode;
1888 int size, elts, subsize, subbitsize, i;
1889 rtx t, a, b, res, seq;
1890 enum mode_class class;
1892 class = GET_MODE_CLASS (mode);
1894 size = GET_MODE_SIZE (mode);
1895 submode = GET_MODE_INNER (mode);
1897 /* Search for the widest vector mode with the same inner mode that is
1898 still narrower than MODE and that allows to open-code this operator.
1899 Note, if we find such a mode and the handler later decides it can't
1900 do the expansion, we'll be called recursively with the narrower mode. */
1901 for (tmode = GET_CLASS_NARROWEST_MODE (class);
1902 GET_MODE_SIZE (tmode) < GET_MODE_SIZE (mode);
1903 tmode = GET_MODE_WIDER_MODE (tmode))
1905 if (GET_MODE_INNER (tmode) == GET_MODE_INNER (mode)
1906 && binoptab->handlers[(int) tmode].insn_code != CODE_FOR_nothing)
1910 switch (binoptab->code)
1915 tmode = int_mode_for_mode (mode);
1916 if (tmode != BLKmode)
1922 subsize = GET_MODE_SIZE (submode);
1923 subbitsize = GET_MODE_BITSIZE (submode);
1924 elts = size / subsize;
1926 /* If METHODS is OPTAB_DIRECT, we don't insist on the exact mode,
1927 but that we operate on more than one element at a time. */
1928 if (subsize == GET_MODE_UNIT_SIZE (mode) && methods == OPTAB_DIRECT)
1933 /* Errors can leave us with a const0_rtx as operand. */
1934 if (GET_MODE (op0) != mode)
1935 op0 = copy_to_mode_reg (mode, op0);
1936 if (GET_MODE (op1) != mode)
1937 op1 = copy_to_mode_reg (mode, op1);
1940 target = gen_reg_rtx (mode);
1942 for (i = 0; i < elts; ++i)
1944 /* If this is part of a register, and not the first item in the
1945 word, we can't store using a SUBREG - that would clobber
1947 And storing with a SUBREG is only possible for the least
1948 significant part, hence we can't do it for big endian
1949 (unless we want to permute the evaluation order. */
1950 if (GET_CODE (target) == REG
1951 && (BYTES_BIG_ENDIAN
1952 ? subsize < UNITS_PER_WORD
1953 : ((i * subsize) % UNITS_PER_WORD) != 0))
1956 t = simplify_gen_subreg (submode, target, mode, i * subsize);
1957 if (CONSTANT_P (op0))
1958 a = simplify_gen_subreg (submode, op0, mode, i * subsize);
1960 a = extract_bit_field (op0, subbitsize, i * subbitsize, unsignedp,
1961 NULL_RTX, submode, submode, size);
1962 if (CONSTANT_P (op1))
1963 b = simplify_gen_subreg (submode, op1, mode, i * subsize);
1965 b = extract_bit_field (op1, subbitsize, i * subbitsize, unsignedp,
1966 NULL_RTX, submode, submode, size);
1968 if (binoptab->code == DIV)
1970 if (class == MODE_VECTOR_FLOAT)
1971 res = expand_binop (submode, binoptab, a, b, t,
1972 unsignedp, methods);
1974 res = expand_divmod (0, TRUNC_DIV_EXPR, submode,
1975 a, b, t, unsignedp);
1978 res = expand_binop (submode, binoptab, a, b, t,
1979 unsignedp, methods);
1985 emit_move_insn (t, res);
1987 store_bit_field (target, subbitsize, i * subbitsize, submode, res,
2003 /* Like expand_unop but for open-coding vector unops. */
2006 expand_vector_unop (enum machine_mode mode, optab unoptab, rtx op0,
2007 rtx target, int unsignedp)
2009 enum machine_mode submode, tmode;
2010 int size, elts, subsize, subbitsize, i;
2013 size = GET_MODE_SIZE (mode);
2014 submode = GET_MODE_INNER (mode);
2016 /* Search for the widest vector mode with the same inner mode that is
2017 still narrower than MODE and that allows to open-code this operator.
2018 Note, if we find such a mode and the handler later decides it can't
2019 do the expansion, we'll be called recursively with the narrower mode. */
2020 for (tmode = GET_CLASS_NARROWEST_MODE (GET_MODE_CLASS (mode));
2021 GET_MODE_SIZE (tmode) < GET_MODE_SIZE (mode);
2022 tmode = GET_MODE_WIDER_MODE (tmode))
2024 if (GET_MODE_INNER (tmode) == GET_MODE_INNER (mode)
2025 && unoptab->handlers[(int) tmode].insn_code != CODE_FOR_nothing)
2028 /* If there is no negate operation, try doing a subtract from zero. */
2029 if (unoptab == neg_optab && GET_MODE_CLASS (submode) == MODE_INT
2030 /* Avoid infinite recursion when an
2031 error has left us with the wrong mode. */
2032 && GET_MODE (op0) == mode)
2035 temp = expand_binop (mode, sub_optab, CONST0_RTX (mode), op0,
2036 target, unsignedp, OPTAB_DIRECT);
2041 if (unoptab == one_cmpl_optab)
2043 tmode = int_mode_for_mode (mode);
2044 if (tmode != BLKmode)
2048 subsize = GET_MODE_SIZE (submode);
2049 subbitsize = GET_MODE_BITSIZE (submode);
2050 elts = size / subsize;
2052 /* Errors can leave us with a const0_rtx as operand. */
2053 if (GET_MODE (op0) != mode)
2054 op0 = copy_to_mode_reg (mode, op0);
2057 target = gen_reg_rtx (mode);
2061 for (i = 0; i < elts; ++i)
2063 /* If this is part of a register, and not the first item in the
2064 word, we can't store using a SUBREG - that would clobber
2066 And storing with a SUBREG is only possible for the least
2067 significant part, hence we can't do it for big endian
2068 (unless we want to permute the evaluation order. */
2069 if (GET_CODE (target) == REG
2070 && (BYTES_BIG_ENDIAN
2071 ? subsize < UNITS_PER_WORD
2072 : ((i * subsize) % UNITS_PER_WORD) != 0))
2075 t = simplify_gen_subreg (submode, target, mode, i * subsize);
2076 if (CONSTANT_P (op0))
2077 a = simplify_gen_subreg (submode, op0, mode, i * subsize);
2079 a = extract_bit_field (op0, subbitsize, i * subbitsize, unsignedp,
2080 t, submode, submode, size);
2082 res = expand_unop (submode, unoptab, a, t, unsignedp);
2085 emit_move_insn (t, res);
2087 store_bit_field (target, subbitsize, i * subbitsize, submode, res,
2098 /* Expand a binary operator which has both signed and unsigned forms.
2099 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2102 If we widen unsigned operands, we may use a signed wider operation instead
2103 of an unsigned wider operation, since the result would be the same. */
2106 sign_expand_binop (enum machine_mode mode, optab uoptab, optab soptab,
2107 rtx op0, rtx op1, rtx target, int unsignedp,
2108 enum optab_methods methods)
2111 optab direct_optab = unsignedp ? uoptab : soptab;
2112 struct optab wide_soptab;
2114 /* Do it without widening, if possible. */
2115 temp = expand_binop (mode, direct_optab, op0, op1, target,
2116 unsignedp, OPTAB_DIRECT);
2117 if (temp || methods == OPTAB_DIRECT)
2120 /* Try widening to a signed int. Make a fake signed optab that
2121 hides any signed insn for direct use. */
2122 wide_soptab = *soptab;
2123 wide_soptab.handlers[(int) mode].insn_code = CODE_FOR_nothing;
2124 wide_soptab.handlers[(int) mode].libfunc = 0;
2126 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2127 unsignedp, OPTAB_WIDEN);
2129 /* For unsigned operands, try widening to an unsigned int. */
2130 if (temp == 0 && unsignedp)
2131 temp = expand_binop (mode, uoptab, op0, op1, target,
2132 unsignedp, OPTAB_WIDEN);
2133 if (temp || methods == OPTAB_WIDEN)
2136 /* Use the right width lib call if that exists. */
2137 temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_LIB);
2138 if (temp || methods == OPTAB_LIB)
2141 /* Must widen and use a lib call, use either signed or unsigned. */
2142 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2143 unsignedp, methods);
2147 return expand_binop (mode, uoptab, op0, op1, target,
2148 unsignedp, methods);
2152 /* Generate code to perform an operation specified by BINOPTAB
2153 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2154 We assume that the order of the operands for the instruction
2155 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2156 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2158 Either TARG0 or TARG1 may be zero, but what that means is that
2159 the result is not actually wanted. We will generate it into
2160 a dummy pseudo-reg and discard it. They may not both be zero.
2162 Returns 1 if this operation can be performed; 0 if not. */
2165 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
2168 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2169 enum mode_class class;
2170 enum machine_mode wider_mode;
2171 rtx entry_last = get_last_insn ();
2174 class = GET_MODE_CLASS (mode);
2176 op0 = protect_from_queue (op0, 0);
2177 op1 = protect_from_queue (op1, 0);
2181 op0 = force_not_mem (op0);
2182 op1 = force_not_mem (op1);
2185 /* If we are inside an appropriately-short loop and one operand is an
2186 expensive constant, force it into a register. */
2187 if (CONSTANT_P (op0) && preserve_subexpressions_p ()
2188 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
2189 op0 = force_reg (mode, op0);
2191 if (CONSTANT_P (op1) && preserve_subexpressions_p ()
2192 && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
2193 op1 = force_reg (mode, op1);
2196 targ0 = protect_from_queue (targ0, 1);
2198 targ0 = gen_reg_rtx (mode);
2200 targ1 = protect_from_queue (targ1, 1);
2202 targ1 = gen_reg_rtx (mode);
2204 /* Record where to go back to if we fail. */
2205 last = get_last_insn ();
2207 if (binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2209 int icode = (int) binoptab->handlers[(int) mode].insn_code;
2210 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2211 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
2213 rtx xop0 = op0, xop1 = op1;
2215 /* In case the insn wants input operands in modes different from
2216 those of the actual operands, convert the operands. It would
2217 seem that we don't need to convert CONST_INTs, but we do, so
2218 that they're properly zero-extended, sign-extended or truncated
2221 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
2222 xop0 = convert_modes (mode0,
2223 GET_MODE (op0) != VOIDmode
2228 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
2229 xop1 = convert_modes (mode1,
2230 GET_MODE (op1) != VOIDmode
2235 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2236 if (! (*insn_data[icode].operand[1].predicate) (xop0, mode0))
2237 xop0 = copy_to_mode_reg (mode0, xop0);
2239 if (! (*insn_data[icode].operand[2].predicate) (xop1, mode1))
2240 xop1 = copy_to_mode_reg (mode1, xop1);
2242 /* We could handle this, but we should always be called with a pseudo
2243 for our targets and all insns should take them as outputs. */
2244 if (! (*insn_data[icode].operand[0].predicate) (targ0, mode)
2245 || ! (*insn_data[icode].operand[3].predicate) (targ1, mode))
2248 pat = GEN_FCN (icode) (targ0, xop0, xop1, targ1);
2255 delete_insns_since (last);
2258 /* It can't be done in this mode. Can we do it in a wider mode? */
2260 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2262 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
2263 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2265 if (binoptab->handlers[(int) wider_mode].insn_code
2266 != CODE_FOR_nothing)
2268 rtx t0 = gen_reg_rtx (wider_mode);
2269 rtx t1 = gen_reg_rtx (wider_mode);
2270 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2271 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2273 if (expand_twoval_binop (binoptab, cop0, cop1,
2276 convert_move (targ0, t0, unsignedp);
2277 convert_move (targ1, t1, unsignedp);
2281 delete_insns_since (last);
2286 delete_insns_since (entry_last);
2290 /* Wrapper around expand_unop which takes an rtx code to specify
2291 the operation to perform, not an optab pointer. All other
2292 arguments are the same. */
2294 expand_simple_unop (enum machine_mode mode, enum rtx_code code, rtx op0,
2295 rtx target, int unsignedp)
2297 optab unop = code_to_optab[(int) code];
2301 return expand_unop (mode, unop, op0, target, unsignedp);
2307 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2309 widen_clz (enum machine_mode mode, rtx op0, rtx target)
2311 enum mode_class class = GET_MODE_CLASS (mode);
2312 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2314 enum machine_mode wider_mode;
2315 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
2316 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2318 if (clz_optab->handlers[(int) wider_mode].insn_code
2319 != CODE_FOR_nothing)
2321 rtx xop0, temp, last;
2323 last = get_last_insn ();
2326 target = gen_reg_rtx (mode);
2327 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2328 temp = expand_unop (wider_mode, clz_optab, xop0, NULL_RTX, true);
2330 temp = expand_binop (wider_mode, sub_optab, temp,
2331 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2332 - GET_MODE_BITSIZE (mode)),
2333 target, true, OPTAB_DIRECT);
2335 delete_insns_since (last);
2344 /* Try calculating (parity x) as (and (popcount x) 1), where
2345 popcount can also be done in a wider mode. */
2347 expand_parity (enum machine_mode mode, rtx op0, rtx target)
2349 enum mode_class class = GET_MODE_CLASS (mode);
2350 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2352 enum machine_mode wider_mode;
2353 for (wider_mode = mode; wider_mode != VOIDmode;
2354 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2356 if (popcount_optab->handlers[(int) wider_mode].insn_code
2357 != CODE_FOR_nothing)
2359 rtx xop0, temp, last;
2361 last = get_last_insn ();
2364 target = gen_reg_rtx (mode);
2365 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2366 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2369 temp = expand_binop (wider_mode, and_optab, temp, GEN_INT (1),
2370 target, true, OPTAB_DIRECT);
2372 delete_insns_since (last);
2381 /* Generate code to perform an operation specified by UNOPTAB
2382 on operand OP0, with result having machine-mode MODE.
2384 UNSIGNEDP is for the case where we have to widen the operands
2385 to perform the operation. It says to use zero-extension.
2387 If TARGET is nonzero, the value
2388 is generated there, if it is convenient to do so.
2389 In all cases an rtx is returned for the locus of the value;
2390 this may or may not be TARGET. */
2393 expand_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
2396 enum mode_class class;
2397 enum machine_mode wider_mode;
2399 rtx last = get_last_insn ();
2402 class = GET_MODE_CLASS (mode);
2404 op0 = protect_from_queue (op0, 0);
2408 op0 = force_not_mem (op0);
2412 target = protect_from_queue (target, 1);
2414 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2416 int icode = (int) unoptab->handlers[(int) mode].insn_code;
2417 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2423 temp = gen_reg_rtx (mode);
2425 if (GET_MODE (xop0) != VOIDmode
2426 && GET_MODE (xop0) != mode0)
2427 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2429 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2431 if (! (*insn_data[icode].operand[1].predicate) (xop0, mode0))
2432 xop0 = copy_to_mode_reg (mode0, xop0);
2434 if (! (*insn_data[icode].operand[0].predicate) (temp, mode))
2435 temp = gen_reg_rtx (mode);
2437 pat = GEN_FCN (icode) (temp, xop0);
2440 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
2441 && ! add_equal_note (pat, temp, unoptab->code, xop0, NULL_RTX))
2443 delete_insns_since (last);
2444 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
2452 delete_insns_since (last);
2455 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2457 /* Widening clz needs special treatment. */
2458 if (unoptab == clz_optab)
2460 temp = widen_clz (mode, op0, target);
2467 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2468 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
2469 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2471 if (unoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing)
2475 /* For certain operations, we need not actually extend
2476 the narrow operand, as long as we will truncate the
2477 results to the same narrowness. */
2479 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2480 (unoptab == neg_optab
2481 || unoptab == one_cmpl_optab)
2482 && class == MODE_INT);
2484 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2489 if (class != MODE_INT)
2492 target = gen_reg_rtx (mode);
2493 convert_move (target, temp, 0);
2497 return gen_lowpart (mode, temp);
2500 delete_insns_since (last);
2504 /* These can be done a word at a time. */
2505 if (unoptab == one_cmpl_optab
2506 && class == MODE_INT
2507 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
2508 && unoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
2513 if (target == 0 || target == op0)
2514 target = gen_reg_rtx (mode);
2518 /* Do the actual arithmetic. */
2519 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
2521 rtx target_piece = operand_subword (target, i, 1, mode);
2522 rtx x = expand_unop (word_mode, unoptab,
2523 operand_subword_force (op0, i, mode),
2524 target_piece, unsignedp);
2526 if (target_piece != x)
2527 emit_move_insn (target_piece, x);
2530 insns = get_insns ();
2533 emit_no_conflict_block (insns, target, op0, NULL_RTX,
2534 gen_rtx_fmt_e (unoptab->code, mode,
2539 /* Open-code the complex negation operation. */
2540 else if (unoptab->code == NEG
2541 && (class == MODE_COMPLEX_FLOAT || class == MODE_COMPLEX_INT))
2547 /* Find the correct mode for the real and imaginary parts. */
2548 enum machine_mode submode = GET_MODE_INNER (mode);
2550 if (submode == BLKmode)
2554 target = gen_reg_rtx (mode);
2558 target_piece = gen_imagpart (submode, target);
2559 x = expand_unop (submode, unoptab,
2560 gen_imagpart (submode, op0),
2561 target_piece, unsignedp);
2562 if (target_piece != x)
2563 emit_move_insn (target_piece, x);
2565 target_piece = gen_realpart (submode, target);
2566 x = expand_unop (submode, unoptab,
2567 gen_realpart (submode, op0),
2568 target_piece, unsignedp);
2569 if (target_piece != x)
2570 emit_move_insn (target_piece, x);
2575 emit_no_conflict_block (seq, target, op0, 0,
2576 gen_rtx_fmt_e (unoptab->code, mode,
2581 /* Try negating floating point values by flipping the sign bit. */
2582 if (unoptab->code == NEG && class == MODE_FLOAT
2583 && GET_MODE_BITSIZE (mode) <= 2 * HOST_BITS_PER_WIDE_INT)
2585 const struct real_format *fmt = REAL_MODE_FORMAT (mode);
2586 enum machine_mode imode = int_mode_for_mode (mode);
2587 int bitpos = (fmt != 0) ? fmt->signbit : -1;
2589 if (imode != BLKmode && bitpos >= 0 && fmt->has_signed_zero)
2591 HOST_WIDE_INT hi, lo;
2592 rtx last = get_last_insn ();
2594 /* Handle targets with different FP word orders. */
2595 if (FLOAT_WORDS_BIG_ENDIAN != WORDS_BIG_ENDIAN)
2597 int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
2598 int word = nwords - (bitpos / BITS_PER_WORD) - 1;
2599 bitpos = word * BITS_PER_WORD + bitpos % BITS_PER_WORD;
2602 if (bitpos < HOST_BITS_PER_WIDE_INT)
2605 lo = (HOST_WIDE_INT) 1 << bitpos;
2609 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2612 temp = expand_binop (imode, xor_optab,
2613 gen_lowpart (imode, op0),
2614 immed_double_const (lo, hi, imode),
2615 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2617 return gen_lowpart (mode, temp);
2618 delete_insns_since (last);
2622 /* Try calculating parity (x) as popcount (x) % 2. */
2623 if (unoptab == parity_optab)
2625 temp = expand_parity (mode, op0, target);
2631 /* Now try a library call in this mode. */
2632 if (unoptab->handlers[(int) mode].libfunc)
2636 enum machine_mode outmode = mode;
2638 /* All of these functions return small values. Thus we choose to
2639 have them return something that isn't a double-word. */
2640 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
2641 || unoptab == popcount_optab || unoptab == parity_optab)
2643 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node)));
2647 /* Pass 1 for NO_QUEUE so we don't lose any increments
2648 if the libcall is cse'd or moved. */
2649 value = emit_library_call_value (unoptab->handlers[(int) mode].libfunc,
2650 NULL_RTX, LCT_CONST, outmode,
2652 insns = get_insns ();
2655 target = gen_reg_rtx (outmode);
2656 emit_libcall_block (insns, target, value,
2657 gen_rtx_fmt_e (unoptab->code, mode, op0));
2662 if (class == MODE_VECTOR_FLOAT || class == MODE_VECTOR_INT)
2663 return expand_vector_unop (mode, unoptab, op0, target, unsignedp);
2665 /* It can't be done in this mode. Can we do it in a wider mode? */
2667 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2669 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
2670 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2672 if ((unoptab->handlers[(int) wider_mode].insn_code
2673 != CODE_FOR_nothing)
2674 || unoptab->handlers[(int) wider_mode].libfunc)
2678 /* For certain operations, we need not actually extend
2679 the narrow operand, as long as we will truncate the
2680 results to the same narrowness. */
2682 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2683 (unoptab == neg_optab
2684 || unoptab == one_cmpl_optab)
2685 && class == MODE_INT);
2687 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2690 /* If we are generating clz using wider mode, adjust the
2692 if (unoptab == clz_optab && temp != 0)
2693 temp = expand_binop (wider_mode, sub_optab, temp,
2694 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2695 - GET_MODE_BITSIZE (mode)),
2696 target, true, OPTAB_DIRECT);
2700 if (class != MODE_INT)
2703 target = gen_reg_rtx (mode);
2704 convert_move (target, temp, 0);
2708 return gen_lowpart (mode, temp);
2711 delete_insns_since (last);
2716 /* If there is no negate operation, try doing a subtract from zero.
2717 The US Software GOFAST library needs this. */
2718 if (unoptab->code == NEG)
2721 temp = expand_binop (mode,
2722 unoptab == negv_optab ? subv_optab : sub_optab,
2723 CONST0_RTX (mode), op0,
2724 target, unsignedp, OPTAB_LIB_WIDEN);
2732 /* Emit code to compute the absolute value of OP0, with result to
2733 TARGET if convenient. (TARGET may be 0.) The return value says
2734 where the result actually is to be found.
2736 MODE is the mode of the operand; the mode of the result is
2737 different but can be deduced from MODE.
2742 expand_abs_nojump (enum machine_mode mode, rtx op0, rtx target,
2743 int result_unsignedp)
2748 result_unsignedp = 1;
2750 /* First try to do it with a special abs instruction. */
2751 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
2756 /* For floating point modes, try clearing the sign bit. */
2757 if (GET_MODE_CLASS (mode) == MODE_FLOAT
2758 && GET_MODE_BITSIZE (mode) <= 2 * HOST_BITS_PER_WIDE_INT)
2760 const struct real_format *fmt = REAL_MODE_FORMAT (mode);
2761 enum machine_mode imode = int_mode_for_mode (mode);
2762 int bitpos = (fmt != 0) ? fmt->signbit : -1;
2764 if (imode != BLKmode && bitpos >= 0)
2766 HOST_WIDE_INT hi, lo;
2767 rtx last = get_last_insn ();
2769 /* Handle targets with different FP word orders. */
2770 if (FLOAT_WORDS_BIG_ENDIAN != WORDS_BIG_ENDIAN)
2772 int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
2773 int word = nwords - (bitpos / BITS_PER_WORD) - 1;
2774 bitpos = word * BITS_PER_WORD + bitpos % BITS_PER_WORD;
2777 if (bitpos < HOST_BITS_PER_WIDE_INT)
2780 lo = (HOST_WIDE_INT) 1 << bitpos;
2784 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2787 temp = expand_binop (imode, and_optab,
2788 gen_lowpart (imode, op0),
2789 immed_double_const (~lo, ~hi, imode),
2790 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2792 return gen_lowpart (mode, temp);
2793 delete_insns_since (last);
2797 /* If we have a MAX insn, we can do this as MAX (x, -x). */
2798 if (smax_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2800 rtx last = get_last_insn ();
2802 temp = expand_unop (mode, neg_optab, op0, NULL_RTX, 0);
2804 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
2810 delete_insns_since (last);
2813 /* If this machine has expensive jumps, we can do integer absolute
2814 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2815 where W is the width of MODE. */
2817 if (GET_MODE_CLASS (mode) == MODE_INT && BRANCH_COST >= 2)
2819 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
2820 size_int (GET_MODE_BITSIZE (mode) - 1),
2823 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
2826 temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab,
2827 temp, extended, target, 0, OPTAB_LIB_WIDEN);
2837 expand_abs (enum machine_mode mode, rtx op0, rtx target,
2838 int result_unsignedp, int safe)
2843 result_unsignedp = 1;
2845 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
2849 /* If that does not win, use conditional jump and negate. */
2851 /* It is safe to use the target if it is the same
2852 as the source if this is also a pseudo register */
2853 if (op0 == target && GET_CODE (op0) == REG
2854 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
2857 op1 = gen_label_rtx ();
2858 if (target == 0 || ! safe
2859 || GET_MODE (target) != mode
2860 || (GET_CODE (target) == MEM && MEM_VOLATILE_P (target))
2861 || (GET_CODE (target) == REG
2862 && REGNO (target) < FIRST_PSEUDO_REGISTER))
2863 target = gen_reg_rtx (mode);
2865 emit_move_insn (target, op0);
2868 /* If this mode is an integer too wide to compare properly,
2869 compare word by word. Rely on CSE to optimize constant cases. */
2870 if (GET_MODE_CLASS (mode) == MODE_INT
2871 && ! can_compare_p (GE, mode, ccp_jump))
2872 do_jump_by_parts_greater_rtx (mode, 0, target, const0_rtx,
2875 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
2876 NULL_RTX, NULL_RTX, op1);
2878 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
2881 emit_move_insn (target, op0);
2887 /* Emit code to compute the absolute value of OP0, with result to
2888 TARGET if convenient. (TARGET may be 0.) The return value says
2889 where the result actually is to be found.
2891 MODE is the mode of the operand; the mode of the result is
2892 different but can be deduced from MODE.
2894 UNSIGNEDP is relevant for complex integer modes. */
2897 expand_complex_abs (enum machine_mode mode, rtx op0, rtx target,
2900 enum mode_class class = GET_MODE_CLASS (mode);
2901 enum machine_mode wider_mode;
2903 rtx entry_last = get_last_insn ();
2906 optab this_abs_optab;
2908 /* Find the correct mode for the real and imaginary parts. */
2909 enum machine_mode submode = GET_MODE_INNER (mode);
2911 if (submode == BLKmode)
2914 op0 = protect_from_queue (op0, 0);
2918 op0 = force_not_mem (op0);
2921 last = get_last_insn ();
2924 target = protect_from_queue (target, 1);
2926 this_abs_optab = ! unsignedp && flag_trapv
2927 && (GET_MODE_CLASS(mode) == MODE_INT)
2928 ? absv_optab : abs_optab;
2930 if (this_abs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2932 int icode = (int) this_abs_optab->handlers[(int) mode].insn_code;
2933 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2939 temp = gen_reg_rtx (submode);
2941 if (GET_MODE (xop0) != VOIDmode
2942 && GET_MODE (xop0) != mode0)
2943 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2945 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2947 if (! (*insn_data[icode].operand[1].predicate) (xop0, mode0))
2948 xop0 = copy_to_mode_reg (mode0, xop0);
2950 if (! (*insn_data[icode].operand[0].predicate) (temp, submode))
2951 temp = gen_reg_rtx (submode);
2953 pat = GEN_FCN (icode) (temp, xop0);
2956 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
2957 && ! add_equal_note (pat, temp, this_abs_optab->code, xop0,
2960 delete_insns_since (last);
2961 return expand_unop (mode, this_abs_optab, op0, NULL_RTX,
2970 delete_insns_since (last);
2973 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2975 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
2976 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2978 if (this_abs_optab->handlers[(int) wider_mode].insn_code
2979 != CODE_FOR_nothing)
2983 xop0 = convert_modes (wider_mode, mode, xop0, unsignedp);
2984 temp = expand_complex_abs (wider_mode, xop0, NULL_RTX, unsignedp);
2988 if (class != MODE_COMPLEX_INT)
2991 target = gen_reg_rtx (submode);
2992 convert_move (target, temp, 0);
2996 return gen_lowpart (submode, temp);
2999 delete_insns_since (last);
3003 /* Open-code the complex absolute-value operation
3004 if we can open-code sqrt. Otherwise it's not worth while. */
3005 if (sqrt_optab->handlers[(int) submode].insn_code != CODE_FOR_nothing
3008 rtx real, imag, total;
3010 real = gen_realpart (submode, op0);
3011 imag = gen_imagpart (submode, op0);
3013 /* Square both parts. */
3014 real = expand_mult (submode, real, real, NULL_RTX, 0);
3015 imag = expand_mult (submode, imag, imag, NULL_RTX, 0);
3017 /* Sum the parts. */
3018 total = expand_binop (submode, add_optab, real, imag, NULL_RTX,
3019 0, OPTAB_LIB_WIDEN);
3021 /* Get sqrt in TARGET. Set TARGET to where the result is. */
3022 target = expand_unop (submode, sqrt_optab, total, target, 0);
3024 delete_insns_since (last);
3029 /* Now try a library call in this mode. */
3030 if (this_abs_optab->handlers[(int) mode].libfunc)
3037 /* Pass 1 for NO_QUEUE so we don't lose any increments
3038 if the libcall is cse'd or moved. */
3039 value = emit_library_call_value (abs_optab->handlers[(int) mode].libfunc,
3040 NULL_RTX, LCT_CONST, submode, 1, op0, mode);
3041 insns = get_insns ();
3044 target = gen_reg_rtx (submode);
3045 emit_libcall_block (insns, target, value,
3046 gen_rtx_fmt_e (this_abs_optab->code, mode, op0));
3051 /* It can't be done in this mode. Can we do it in a wider mode? */
3053 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
3054 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
3056 if ((this_abs_optab->handlers[(int) wider_mode].insn_code
3057 != CODE_FOR_nothing)
3058 || this_abs_optab->handlers[(int) wider_mode].libfunc)
3062 xop0 = convert_modes (wider_mode, mode, xop0, unsignedp);
3064 temp = expand_complex_abs (wider_mode, xop0, NULL_RTX, unsignedp);
3068 if (class != MODE_COMPLEX_INT)
3071 target = gen_reg_rtx (submode);
3072 convert_move (target, temp, 0);
3076 return gen_lowpart (submode, temp);
3079 delete_insns_since (last);
3083 delete_insns_since (entry_last);
3087 /* Generate an instruction whose insn-code is INSN_CODE,
3088 with two operands: an output TARGET and an input OP0.
3089 TARGET *must* be nonzero, and the output is always stored there.
3090 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3091 the value that is stored into TARGET. */
3094 emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code)
3097 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
3100 temp = target = protect_from_queue (target, 1);
3102 op0 = protect_from_queue (op0, 0);
3104 /* Sign and zero extension from memory is often done specially on
3105 RISC machines, so forcing into a register here can pessimize
3107 if (flag_force_mem && code != SIGN_EXTEND && code != ZERO_EXTEND)
3108 op0 = force_not_mem (op0);
3110 /* Now, if insn does not accept our operands, put them into pseudos. */
3112 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
3113 op0 = copy_to_mode_reg (mode0, op0);
3115 if (! (*insn_data[icode].operand[0].predicate) (temp, GET_MODE (temp))
3116 || (flag_force_mem && GET_CODE (temp) == MEM))
3117 temp = gen_reg_rtx (GET_MODE (temp));
3119 pat = GEN_FCN (icode) (temp, op0);
3121 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && code != UNKNOWN)
3122 add_equal_note (pat, temp, code, op0, NULL_RTX);
3127 emit_move_insn (target, temp);
3130 /* Emit code to perform a series of operations on a multi-word quantity, one
3133 Such a block is preceded by a CLOBBER of the output, consists of multiple
3134 insns, each setting one word of the output, and followed by a SET copying
3135 the output to itself.
3137 Each of the insns setting words of the output receives a REG_NO_CONFLICT
3138 note indicating that it doesn't conflict with the (also multi-word)
3139 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
3142 INSNS is a block of code generated to perform the operation, not including
3143 the CLOBBER and final copy. All insns that compute intermediate values
3144 are first emitted, followed by the block as described above.
3146 TARGET, OP0, and OP1 are the output and inputs of the operations,
3147 respectively. OP1 may be zero for a unary operation.
3149 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
3152 If TARGET is not a register, INSNS is simply emitted with no special
3153 processing. Likewise if anything in INSNS is not an INSN or if
3154 there is a libcall block inside INSNS.
3156 The final insn emitted is returned. */
3159 emit_no_conflict_block (rtx insns, rtx target, rtx op0, rtx op1, rtx equiv)
3161 rtx prev, next, first, last, insn;
3163 if (GET_CODE (target) != REG || reload_in_progress)
3164 return emit_insn (insns);
3166 for (insn = insns; insn; insn = NEXT_INSN (insn))
3167 if (GET_CODE (insn) != INSN
3168 || find_reg_note (insn, REG_LIBCALL, NULL_RTX))
3169 return emit_insn (insns);
3171 /* First emit all insns that do not store into words of the output and remove
3172 these from the list. */
3173 for (insn = insns; insn; insn = next)
3178 next = NEXT_INSN (insn);
3180 /* Some ports (cris) create a libcall regions at their own. We must
3181 avoid any potential nesting of LIBCALLs. */
3182 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3183 remove_note (insn, note);
3184 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3185 remove_note (insn, note);
3187 if (GET_CODE (PATTERN (insn)) == SET || GET_CODE (PATTERN (insn)) == USE
3188 || GET_CODE (PATTERN (insn)) == CLOBBER)
3189 set = PATTERN (insn);
3190 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
3192 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
3193 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
3195 set = XVECEXP (PATTERN (insn), 0, i);
3203 if (! reg_overlap_mentioned_p (target, SET_DEST (set)))
3205 if (PREV_INSN (insn))
3206 NEXT_INSN (PREV_INSN (insn)) = next;
3211 PREV_INSN (next) = PREV_INSN (insn);
3217 prev = get_last_insn ();
3219 /* Now write the CLOBBER of the output, followed by the setting of each
3220 of the words, followed by the final copy. */
3221 if (target != op0 && target != op1)
3222 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
3224 for (insn = insns; insn; insn = next)
3226 next = NEXT_INSN (insn);
3229 if (op1 && GET_CODE (op1) == REG)
3230 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op1,
3233 if (op0 && GET_CODE (op0) == REG)
3234 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op0,
3238 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3239 != CODE_FOR_nothing)
3241 last = emit_move_insn (target, target);
3243 set_unique_reg_note (last, REG_EQUAL, equiv);
3247 last = get_last_insn ();
3249 /* Remove any existing REG_EQUAL note from "last", or else it will
3250 be mistaken for a note referring to the full contents of the
3251 alleged libcall value when found together with the REG_RETVAL
3252 note added below. An existing note can come from an insn
3253 expansion at "last". */
3254 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3258 first = get_insns ();
3260 first = NEXT_INSN (prev);
3262 /* Encapsulate the block so it gets manipulated as a unit. */
3263 REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
3265 REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first, REG_NOTES (last));
3270 /* Emit code to make a call to a constant function or a library call.
3272 INSNS is a list containing all insns emitted in the call.
3273 These insns leave the result in RESULT. Our block is to copy RESULT
3274 to TARGET, which is logically equivalent to EQUIV.
3276 We first emit any insns that set a pseudo on the assumption that these are
3277 loading constants into registers; doing so allows them to be safely cse'ed
3278 between blocks. Then we emit all the other insns in the block, followed by
3279 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3280 note with an operand of EQUIV.
3282 Moving assignments to pseudos outside of the block is done to improve
3283 the generated code, but is not required to generate correct code,
3284 hence being unable to move an assignment is not grounds for not making
3285 a libcall block. There are two reasons why it is safe to leave these
3286 insns inside the block: First, we know that these pseudos cannot be
3287 used in generated RTL outside the block since they are created for
3288 temporary purposes within the block. Second, CSE will not record the
3289 values of anything set inside a libcall block, so we know they must
3290 be dead at the end of the block.
3292 Except for the first group of insns (the ones setting pseudos), the
3293 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3296 emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv)
3298 rtx final_dest = target;
3299 rtx prev, next, first, last, insn;
3301 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3302 into a MEM later. Protect the libcall block from this change. */
3303 if (! REG_P (target) || REG_USERVAR_P (target))
3304 target = gen_reg_rtx (GET_MODE (target));
3306 /* If we're using non-call exceptions, a libcall corresponding to an
3307 operation that may trap may also trap. */
3308 if (flag_non_call_exceptions && may_trap_p (equiv))
3310 for (insn = insns; insn; insn = NEXT_INSN (insn))
3311 if (GET_CODE (insn) == CALL_INSN)
3313 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3315 if (note != 0 && INTVAL (XEXP (note, 0)) <= 0)
3316 remove_note (insn, note);
3320 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3321 reg note to indicate that this call cannot throw or execute a nonlocal
3322 goto (unless there is already a REG_EH_REGION note, in which case
3324 for (insn = insns; insn; insn = NEXT_INSN (insn))
3325 if (GET_CODE (insn) == CALL_INSN)
3327 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3330 XEXP (note, 0) = GEN_INT (-1);
3332 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EH_REGION, GEN_INT (-1),
3336 /* First emit all insns that set pseudos. Remove them from the list as
3337 we go. Avoid insns that set pseudos which were referenced in previous
3338 insns. These can be generated by move_by_pieces, for example,
3339 to update an address. Similarly, avoid insns that reference things
3340 set in previous insns. */
3342 for (insn = insns; insn; insn = next)
3344 rtx set = single_set (insn);
3347 /* Some ports (cris) create a libcall regions at their own. We must
3348 avoid any potential nesting of LIBCALLs. */
3349 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3350 remove_note (insn, note);
3351 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3352 remove_note (insn, note);
3354 next = NEXT_INSN (insn);
3356 if (set != 0 && GET_CODE (SET_DEST (set)) == REG
3357 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
3359 || ((! INSN_P(insns)
3360 || ! reg_mentioned_p (SET_DEST (set), PATTERN (insns)))
3361 && ! reg_used_between_p (SET_DEST (set), insns, insn)
3362 && ! modified_in_p (SET_SRC (set), insns)
3363 && ! modified_between_p (SET_SRC (set), insns, insn))))
3365 if (PREV_INSN (insn))
3366 NEXT_INSN (PREV_INSN (insn)) = next;
3371 PREV_INSN (next) = PREV_INSN (insn);
3376 /* Some ports use a loop to copy large arguments onto the stack.
3377 Don't move anything outside such a loop. */
3378 if (GET_CODE (insn) == CODE_LABEL)
3382 prev = get_last_insn ();
3384 /* Write the remaining insns followed by the final copy. */
3386 for (insn = insns; insn; insn = next)
3388 next = NEXT_INSN (insn);
3393 last = emit_move_insn (target, result);
3394 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3395 != CODE_FOR_nothing)
3396 set_unique_reg_note (last, REG_EQUAL, copy_rtx (equiv));
3399 /* Remove any existing REG_EQUAL note from "last", or else it will
3400 be mistaken for a note referring to the full contents of the
3401 libcall value when found together with the REG_RETVAL note added
3402 below. An existing note can come from an insn expansion at
3404 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3407 if (final_dest != target)
3408 emit_move_insn (final_dest, target);
3411 first = get_insns ();
3413 first = NEXT_INSN (prev);
3415 /* Encapsulate the block so it gets manipulated as a unit. */
3416 if (!flag_non_call_exceptions || !may_trap_p (equiv))
3418 /* We can't attach the REG_LIBCALL and REG_RETVAL notes
3419 when the encapsulated region would not be in one basic block,
3420 i.e. when there is a control_flow_insn_p insn between FIRST and LAST.
3422 bool attach_libcall_retval_notes = true;
3423 next = NEXT_INSN (last);
3424 for (insn = first; insn != next; insn = NEXT_INSN (insn))
3425 if (control_flow_insn_p (insn))
3427 attach_libcall_retval_notes = false;
3431 if (attach_libcall_retval_notes)
3433 REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
3435 REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first,
3441 /* Generate code to store zero in X. */
3444 emit_clr_insn (rtx x)
3446 emit_move_insn (x, const0_rtx);
3449 /* Generate code to store 1 in X
3450 assuming it contains zero beforehand. */
3453 emit_0_to_1_insn (rtx x)
3455 emit_move_insn (x, const1_rtx);
3458 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3459 PURPOSE describes how this comparison will be used. CODE is the rtx
3460 comparison code we will be using.
3462 ??? Actually, CODE is slightly weaker than that. A target is still
3463 required to implement all of the normal bcc operations, but not
3464 required to implement all (or any) of the unordered bcc operations. */
3467 can_compare_p (enum rtx_code code, enum machine_mode mode,
3468 enum can_compare_purpose purpose)
3472 if (cmp_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3474 if (purpose == ccp_jump)
3475 return bcc_gen_fctn[(int) code] != NULL;
3476 else if (purpose == ccp_store_flag)
3477 return setcc_gen_code[(int) code] != CODE_FOR_nothing;
3479 /* There's only one cmov entry point, and it's allowed to fail. */
3482 if (purpose == ccp_jump
3483 && cbranch_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3485 if (purpose == ccp_cmov
3486 && cmov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3488 if (purpose == ccp_store_flag
3489 && cstore_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3492 mode = GET_MODE_WIDER_MODE (mode);
3494 while (mode != VOIDmode);
3499 /* This function is called when we are going to emit a compare instruction that
3500 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3502 *PMODE is the mode of the inputs (in case they are const_int).
3503 *PUNSIGNEDP nonzero says that the operands are unsigned;
3504 this matters if they need to be widened.
3506 If they have mode BLKmode, then SIZE specifies the size of both operands.
3508 This function performs all the setup necessary so that the caller only has
3509 to emit a single comparison insn. This setup can involve doing a BLKmode
3510 comparison or emitting a library call to perform the comparison if no insn
3511 is available to handle it.
3512 The values which are passed in through pointers can be modified; the caller
3513 should perform the comparison on the modified values. */
3516 prepare_cmp_insn (rtx *px, rtx *py, enum rtx_code *pcomparison, rtx size,
3517 enum machine_mode *pmode, int *punsignedp,
3518 enum can_compare_purpose purpose)
3520 enum machine_mode mode = *pmode;
3521 rtx x = *px, y = *py;
3522 int unsignedp = *punsignedp;
3523 enum mode_class class;
3525 class = GET_MODE_CLASS (mode);
3527 /* They could both be VOIDmode if both args are immediate constants,
3528 but we should fold that at an earlier stage.
3529 With no special code here, this will call abort,
3530 reminding the programmer to implement such folding. */
3532 if (mode != BLKmode && flag_force_mem)
3534 /* Load duplicate non-volatile operands once. */
3535 if (rtx_equal_p (x, y) && ! volatile_refs_p (x))
3537 x = force_not_mem (x);
3542 x = force_not_mem (x);
3543 y = force_not_mem (y);
3547 /* If we are inside an appropriately-short loop and one operand is an
3548 expensive constant, force it into a register. */
3549 if (CONSTANT_P (x) && preserve_subexpressions_p ()
3550 && rtx_cost (x, COMPARE) > COSTS_N_INSNS (1))
3551 x = force_reg (mode, x);
3553 if (CONSTANT_P (y) && preserve_subexpressions_p ()
3554 && rtx_cost (y, COMPARE) > COSTS_N_INSNS (1))
3555 y = force_reg (mode, y);
3558 /* Abort if we have a non-canonical comparison. The RTL documentation
3559 states that canonical comparisons are required only for targets which
3561 if (CONSTANT_P (x) && ! CONSTANT_P (y))
3565 /* Don't let both operands fail to indicate the mode. */
3566 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
3567 x = force_reg (mode, x);
3569 /* Handle all BLKmode compares. */
3571 if (mode == BLKmode)
3574 enum machine_mode result_mode;
3575 rtx opalign ATTRIBUTE_UNUSED
3576 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
3579 x = protect_from_queue (x, 0);
3580 y = protect_from_queue (y, 0);
3584 #ifdef HAVE_cmpmemqi
3586 && GET_CODE (size) == CONST_INT
3587 && INTVAL (size) < (1 << GET_MODE_BITSIZE (QImode)))
3589 result_mode = insn_data[(int) CODE_FOR_cmpmemqi].operand[0].mode;
3590 result = gen_reg_rtx (result_mode);
3591 emit_insn (gen_cmpmemqi (result, x, y, size, opalign));
3595 #ifdef HAVE_cmpmemhi
3597 && GET_CODE (size) == CONST_INT
3598 && INTVAL (size) < (1 << GET_MODE_BITSIZE (HImode)))
3600 result_mode = insn_data[(int) CODE_FOR_cmpmemhi].operand[0].mode;
3601 result = gen_reg_rtx (result_mode);
3602 emit_insn (gen_cmpmemhi (result, x, y, size, opalign));
3606 #ifdef HAVE_cmpmemsi
3609 result_mode = insn_data[(int) CODE_FOR_cmpmemsi].operand[0].mode;
3610 result = gen_reg_rtx (result_mode);
3611 size = protect_from_queue (size, 0);
3612 emit_insn (gen_cmpmemsi (result, x, y,
3613 convert_to_mode (SImode, size, 1),
3618 #ifdef HAVE_cmpstrqi
3620 && GET_CODE (size) == CONST_INT
3621 && INTVAL (size) < (1 << GET_MODE_BITSIZE (QImode)))
3623 result_mode = insn_data[(int) CODE_FOR_cmpstrqi].operand[0].mode;
3624 result = gen_reg_rtx (result_mode);
3625 emit_insn (gen_cmpstrqi (result, x, y, size, opalign));
3629 #ifdef HAVE_cmpstrhi
3631 && GET_CODE (size) == CONST_INT
3632 && INTVAL (size) < (1 << GET_MODE_BITSIZE (HImode)))
3634 result_mode = insn_data[(int) CODE_FOR_cmpstrhi].operand[0].mode;
3635 result = gen_reg_rtx (result_mode);
3636 emit_insn (gen_cmpstrhi (result, x, y, size, opalign));
3640 #ifdef HAVE_cmpstrsi
3643 result_mode = insn_data[(int) CODE_FOR_cmpstrsi].operand[0].mode;
3644 result = gen_reg_rtx (result_mode);
3645 size = protect_from_queue (size, 0);
3646 emit_insn (gen_cmpstrsi (result, x, y,
3647 convert_to_mode (SImode, size, 1),
3653 #ifdef TARGET_MEM_FUNCTIONS
3654 result = emit_library_call_value (memcmp_libfunc, NULL_RTX, LCT_PURE_MAKE_BLOCK,
3655 TYPE_MODE (integer_type_node), 3,
3656 XEXP (x, 0), Pmode, XEXP (y, 0), Pmode,
3657 convert_to_mode (TYPE_MODE (sizetype), size,
3658 TREE_UNSIGNED (sizetype)),
3659 TYPE_MODE (sizetype));
3661 result = emit_library_call_value (bcmp_libfunc, NULL_RTX, LCT_PURE_MAKE_BLOCK,
3662 TYPE_MODE (integer_type_node), 3,
3663 XEXP (x, 0), Pmode, XEXP (y, 0), Pmode,
3664 convert_to_mode (TYPE_MODE (integer_type_node),
3666 TREE_UNSIGNED (integer_type_node)),
3667 TYPE_MODE (integer_type_node));
3670 result_mode = TYPE_MODE (integer_type_node);
3674 *pmode = result_mode;
3680 if (can_compare_p (*pcomparison, mode, purpose))
3683 /* Handle a lib call just for the mode we are using. */
3685 if (cmp_optab->handlers[(int) mode].libfunc && class != MODE_FLOAT)
3687 rtx libfunc = cmp_optab->handlers[(int) mode].libfunc;
3690 /* If we want unsigned, and this mode has a distinct unsigned
3691 comparison routine, use that. */
3692 if (unsignedp && ucmp_optab->handlers[(int) mode].libfunc)
3693 libfunc = ucmp_optab->handlers[(int) mode].libfunc;
3695 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST_MAKE_BLOCK,
3696 word_mode, 2, x, mode, y, mode);
3698 /* Integer comparison returns a result that must be compared against 1,
3699 so that even if we do an unsigned compare afterward,
3700 there is still a value that can represent the result "less than". */
3707 if (class == MODE_FLOAT)
3708 prepare_float_lib_cmp (px, py, pcomparison, pmode, punsignedp);
3714 /* Before emitting an insn with code ICODE, make sure that X, which is going
3715 to be used for operand OPNUM of the insn, is converted from mode MODE to
3716 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3717 that it is accepted by the operand predicate. Return the new value. */
3720 prepare_operand (int icode, rtx x, int opnum, enum machine_mode mode,
3721 enum machine_mode wider_mode, int unsignedp)
3723 x = protect_from_queue (x, 0);
3725 if (mode != wider_mode)
3726 x = convert_modes (wider_mode, mode, x, unsignedp);
3728 if (! (*insn_data[icode].operand[opnum].predicate)
3729 (x, insn_data[icode].operand[opnum].mode))
3733 x = copy_to_mode_reg (insn_data[icode].operand[opnum].mode, x);
3739 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3740 we can do the comparison.
3741 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3742 be NULL_RTX which indicates that only a comparison is to be generated. */
3745 emit_cmp_and_jump_insn_1 (rtx x, rtx y, enum machine_mode mode,
3746 enum rtx_code comparison, int unsignedp, rtx label)
3748 rtx test = gen_rtx_fmt_ee (comparison, mode, x, y);
3749 enum mode_class class = GET_MODE_CLASS (mode);
3750 enum machine_mode wider_mode = mode;
3752 /* Try combined insns first. */
3755 enum insn_code icode;
3756 PUT_MODE (test, wider_mode);
3760 icode = cbranch_optab->handlers[(int) wider_mode].insn_code;
3762 if (icode != CODE_FOR_nothing
3763 && (*insn_data[icode].operand[0].predicate) (test, wider_mode))
3765 x = prepare_operand (icode, x, 1, mode, wider_mode, unsignedp);
3766 y = prepare_operand (icode, y, 2, mode, wider_mode, unsignedp);
3767 emit_jump_insn (GEN_FCN (icode) (test, x, y, label));
3772 /* Handle some compares against zero. */
3773 icode = (int) tst_optab->handlers[(int) wider_mode].insn_code;
3774 if (y == CONST0_RTX (mode) && icode != CODE_FOR_nothing)
3776 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
3777 emit_insn (GEN_FCN (icode) (x));
3779 emit_jump_insn ((*bcc_gen_fctn[(int) comparison]) (label));
3783 /* Handle compares for which there is a directly suitable insn. */
3785 icode = (int) cmp_optab->handlers[(int) wider_mode].insn_code;
3786 if (icode != CODE_FOR_nothing)
3788 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
3789 y = prepare_operand (icode, y, 1, mode, wider_mode, unsignedp);
3790 emit_insn (GEN_FCN (icode) (x, y));
3792 emit_jump_insn ((*bcc_gen_fctn[(int) comparison]) (label));
3796 if (class != MODE_INT && class != MODE_FLOAT
3797 && class != MODE_COMPLEX_FLOAT)
3800 wider_mode = GET_MODE_WIDER_MODE (wider_mode);
3802 while (wider_mode != VOIDmode);
3807 /* Generate code to compare X with Y so that the condition codes are
3808 set and to jump to LABEL if the condition is true. If X is a
3809 constant and Y is not a constant, then the comparison is swapped to
3810 ensure that the comparison RTL has the canonical form.
3812 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3813 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
3814 the proper branch condition code.
3816 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
3818 MODE is the mode of the inputs (in case they are const_int).
3820 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
3821 be passed unchanged to emit_cmp_insn, then potentially converted into an
3822 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
3825 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
3826 enum machine_mode mode, int unsignedp, rtx label)
3828 rtx op0 = x, op1 = y;
3830 /* Swap operands and condition to ensure canonical RTL. */
3831 if (swap_commutative_operands_p (x, y))
3833 /* If we're not emitting a branch, this means some caller
3839 comparison = swap_condition (comparison);
3843 /* If OP0 is still a constant, then both X and Y must be constants. Force
3844 X into a register to avoid aborting in emit_cmp_insn due to non-canonical
3846 if (CONSTANT_P (op0))
3847 op0 = force_reg (mode, op0);
3852 comparison = unsigned_condition (comparison);
3854 prepare_cmp_insn (&op0, &op1, &comparison, size, &mode, &unsignedp,
3856 emit_cmp_and_jump_insn_1 (op0, op1, mode, comparison, unsignedp, label);
3859 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
3862 emit_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
3863 enum machine_mode mode, int unsignedp)
3865 emit_cmp_and_jump_insns (x, y, comparison, size, mode, unsignedp, 0);
3868 /* Emit a library call comparison between floating point X and Y.
3869 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
3872 prepare_float_lib_cmp (rtx *px, rtx *py, enum rtx_code *pcomparison,
3873 enum machine_mode *pmode, int *punsignedp)
3875 enum rtx_code comparison = *pcomparison;
3876 enum rtx_code swapped = swap_condition (comparison);
3877 rtx x = protect_from_queue (*px, 0);
3878 rtx y = protect_from_queue (*py, 0);
3879 enum machine_mode orig_mode = GET_MODE (x);
3880 enum machine_mode mode;
3881 rtx value, target, insns, equiv;
3884 for (mode = orig_mode; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
3886 if ((libfunc = code_to_optab[comparison]->handlers[mode].libfunc))
3889 if ((libfunc = code_to_optab[swapped]->handlers[mode].libfunc))
3892 tmp = x; x = y; y = tmp;
3893 comparison = swapped;
3898 if (mode == VOIDmode)
3901 if (mode != orig_mode)
3903 x = convert_to_mode (mode, x, 0);
3904 y = convert_to_mode (mode, y, 0);
3907 /* If we're optimizing attach a REG_EQUAL note describing the semantics
3908 of the libcall to the RTL. The allows the RTL optimizers to delete
3909 the libcall if the condition can be determined at compile-time. */
3911 && ! side_effects_p (x)
3912 && ! side_effects_p (y))
3914 if (comparison == UNORDERED)
3916 rtx temp = simplify_gen_relational (NE, word_mode, mode, x, x);
3917 equiv = simplify_gen_relational (NE, word_mode, mode, y, y);
3918 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
3919 temp, const_true_rtx, equiv);
3923 equiv = simplify_gen_relational (comparison, word_mode, mode, x, y);
3924 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
3926 rtx true_rtx, false_rtx;
3931 true_rtx = const0_rtx;
3932 false_rtx = const_true_rtx;
3936 true_rtx = const_true_rtx;
3937 false_rtx = const0_rtx;
3941 true_rtx = const1_rtx;
3942 false_rtx = const0_rtx;
3946 true_rtx = const0_rtx;
3947 false_rtx = constm1_rtx;
3951 true_rtx = constm1_rtx;
3952 false_rtx = const0_rtx;
3956 true_rtx = const0_rtx;
3957 false_rtx = const1_rtx;
3963 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode,
3965 true_rtx, false_rtx);
3973 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
3974 word_mode, 2, x, mode, y, mode);
3975 insns = get_insns ();
3978 target = gen_reg_rtx (word_mode);
3979 emit_libcall_block (insns, target, value, equiv);
3982 if (comparison == UNORDERED
3983 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
3989 *pcomparison = comparison;
3993 /* Generate code to indirectly jump to a location given in the rtx LOC. */
3996 emit_indirect_jump (rtx loc)
3998 if (! ((*insn_data[(int) CODE_FOR_indirect_jump].operand[0].predicate)
4000 loc = copy_to_mode_reg (Pmode, loc);
4002 emit_jump_insn (gen_indirect_jump (loc));
4006 #ifdef HAVE_conditional_move
4008 /* Emit a conditional move instruction if the machine supports one for that
4009 condition and machine mode.
4011 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4012 the mode to use should they be constants. If it is VOIDmode, they cannot
4015 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4016 should be stored there. MODE is the mode to use should they be constants.
4017 If it is VOIDmode, they cannot both be constants.
4019 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4020 is not supported. */
4023 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
4024 enum machine_mode cmode, rtx op2, rtx op3,
4025 enum machine_mode mode, int unsignedp)
4027 rtx tem, subtarget, comparison, insn;
4028 enum insn_code icode;
4029 enum rtx_code reversed;
4031 /* If one operand is constant, make it the second one. Only do this
4032 if the other operand is not constant as well. */
4034 if (swap_commutative_operands_p (op0, op1))
4039 code = swap_condition (code);
4042 /* get_condition will prefer to generate LT and GT even if the old
4043 comparison was against zero, so undo that canonicalization here since
4044 comparisons against zero are cheaper. */
4045 if (code == LT && op1 == const1_rtx)
4046 code = LE, op1 = const0_rtx;
4047 else if (code == GT && op1 == constm1_rtx)
4048 code = GE, op1 = const0_rtx;
4050 if (cmode == VOIDmode)
4051 cmode = GET_MODE (op0);
4053 if (swap_commutative_operands_p (op2, op3)
4054 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4063 if (mode == VOIDmode)
4064 mode = GET_MODE (op2);
4066 icode = movcc_gen_code[mode];
4068 if (icode == CODE_FOR_nothing)
4073 op2 = force_not_mem (op2);
4074 op3 = force_not_mem (op3);
4078 target = protect_from_queue (target, 1);
4080 target = gen_reg_rtx (mode);
4086 op2 = protect_from_queue (op2, 0);
4087 op3 = protect_from_queue (op3, 0);
4089 /* If the insn doesn't accept these operands, put them in pseudos. */
4091 if (! (*insn_data[icode].operand[0].predicate)
4092 (subtarget, insn_data[icode].operand[0].mode))
4093 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4095 if (! (*insn_data[icode].operand[2].predicate)
4096 (op2, insn_data[icode].operand[2].mode))
4097 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4099 if (! (*insn_data[icode].operand[3].predicate)
4100 (op3, insn_data[icode].operand[3].mode))
4101 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4103 /* Everything should now be in the suitable form, so emit the compare insn
4104 and then the conditional move. */
4107 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4109 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4110 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4111 return NULL and let the caller figure out how best to deal with this
4113 if (GET_CODE (comparison) != code)
4116 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4118 /* If that failed, then give up. */
4124 if (subtarget != target)
4125 convert_move (target, subtarget, 0);
4130 /* Return nonzero if a conditional move of mode MODE is supported.
4132 This function is for combine so it can tell whether an insn that looks
4133 like a conditional move is actually supported by the hardware. If we
4134 guess wrong we lose a bit on optimization, but that's it. */
4135 /* ??? sparc64 supports conditionally moving integers values based on fp
4136 comparisons, and vice versa. How do we handle them? */
4139 can_conditionally_move_p (enum machine_mode mode)
4141 if (movcc_gen_code[mode] != CODE_FOR_nothing)
4147 #endif /* HAVE_conditional_move */
4149 /* Emit a conditional addition instruction if the machine supports one for that
4150 condition and machine mode.
4152 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4153 the mode to use should they be constants. If it is VOIDmode, they cannot
4156 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
4157 should be stored there. MODE is the mode to use should they be constants.
4158 If it is VOIDmode, they cannot both be constants.
4160 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4161 is not supported. */
4164 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
4165 enum machine_mode cmode, rtx op2, rtx op3,
4166 enum machine_mode mode, int unsignedp)
4168 rtx tem, subtarget, comparison, insn;
4169 enum insn_code icode;
4170 enum rtx_code reversed;
4172 /* If one operand is constant, make it the second one. Only do this
4173 if the other operand is not constant as well. */
4175 if (swap_commutative_operands_p (op0, op1))
4180 code = swap_condition (code);
4183 /* get_condition will prefer to generate LT and GT even if the old
4184 comparison was against zero, so undo that canonicalization here since
4185 comparisons against zero are cheaper. */
4186 if (code == LT && op1 == const1_rtx)
4187 code = LE, op1 = const0_rtx;
4188 else if (code == GT && op1 == constm1_rtx)
4189 code = GE, op1 = const0_rtx;
4191 if (cmode == VOIDmode)
4192 cmode = GET_MODE (op0);
4194 if (swap_commutative_operands_p (op2, op3)
4195 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4204 if (mode == VOIDmode)
4205 mode = GET_MODE (op2);
4207 icode = addcc_optab->handlers[(int) mode].insn_code;
4209 if (icode == CODE_FOR_nothing)
4214 op2 = force_not_mem (op2);
4215 op3 = force_not_mem (op3);
4219 target = protect_from_queue (target, 1);
4221 target = gen_reg_rtx (mode);
4227 op2 = protect_from_queue (op2, 0);
4228 op3 = protect_from_queue (op3, 0);
4230 /* If the insn doesn't accept these operands, put them in pseudos. */
4232 if (! (*insn_data[icode].operand[0].predicate)
4233 (subtarget, insn_data[icode].operand[0].mode))
4234 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4236 if (! (*insn_data[icode].operand[2].predicate)
4237 (op2, insn_data[icode].operand[2].mode))
4238 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4240 if (! (*insn_data[icode].operand[3].predicate)
4241 (op3, insn_data[icode].operand[3].mode))
4242 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4244 /* Everything should now be in the suitable form, so emit the compare insn
4245 and then the conditional move. */
4248 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4250 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4251 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4252 return NULL and let the caller figure out how best to deal with this
4254 if (GET_CODE (comparison) != code)
4257 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4259 /* If that failed, then give up. */
4265 if (subtarget != target)
4266 convert_move (target, subtarget, 0);
4271 /* These functions attempt to generate an insn body, rather than
4272 emitting the insn, but if the gen function already emits them, we
4273 make no attempt to turn them back into naked patterns.
4275 They do not protect from queued increments,
4276 because they may be used 1) in protect_from_queue itself
4277 and 2) in other passes where there is no queue. */
4279 /* Generate and return an insn body to add Y to X. */
4282 gen_add2_insn (rtx x, rtx y)
4284 int icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4286 if (! ((*insn_data[icode].operand[0].predicate)
4287 (x, insn_data[icode].operand[0].mode))
4288 || ! ((*insn_data[icode].operand[1].predicate)
4289 (x, insn_data[icode].operand[1].mode))
4290 || ! ((*insn_data[icode].operand[2].predicate)
4291 (y, insn_data[icode].operand[2].mode)))
4294 return (GEN_FCN (icode) (x, x, y));
4297 /* Generate and return an insn body to add r1 and c,
4298 storing the result in r0. */
4300 gen_add3_insn (rtx r0, rtx r1, rtx c)
4302 int icode = (int) add_optab->handlers[(int) GET_MODE (r0)].insn_code;
4304 if (icode == CODE_FOR_nothing
4305 || ! ((*insn_data[icode].operand[0].predicate)
4306 (r0, insn_data[icode].operand[0].mode))
4307 || ! ((*insn_data[icode].operand[1].predicate)
4308 (r1, insn_data[icode].operand[1].mode))
4309 || ! ((*insn_data[icode].operand[2].predicate)
4310 (c, insn_data[icode].operand[2].mode)))
4313 return (GEN_FCN (icode) (r0, r1, c));
4317 have_add2_insn (rtx x, rtx y)
4321 if (GET_MODE (x) == VOIDmode)
4324 icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4326 if (icode == CODE_FOR_nothing)
4329 if (! ((*insn_data[icode].operand[0].predicate)
4330 (x, insn_data[icode].operand[0].mode))
4331 || ! ((*insn_data[icode].operand[1].predicate)
4332 (x, insn_data[icode].operand[1].mode))
4333 || ! ((*insn_data[icode].operand[2].predicate)
4334 (y, insn_data[icode].operand[2].mode)))
4340 /* Generate and return an insn body to subtract Y from X. */
4343 gen_sub2_insn (rtx x, rtx y)
4345 int icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4347 if (! ((*insn_data[icode].operand[0].predicate)
4348 (x, insn_data[icode].operand[0].mode))
4349 || ! ((*insn_data[icode].operand[1].predicate)
4350 (x, insn_data[icode].operand[1].mode))
4351 || ! ((*insn_data[icode].operand[2].predicate)
4352 (y, insn_data[icode].operand[2].mode)))
4355 return (GEN_FCN (icode) (x, x, y));
4358 /* Generate and return an insn body to subtract r1 and c,
4359 storing the result in r0. */
4361 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4363 int icode = (int) sub_optab->handlers[(int) GET_MODE (r0)].insn_code;
4365 if (icode == CODE_FOR_nothing
4366 || ! ((*insn_data[icode].operand[0].predicate)
4367 (r0, insn_data[icode].operand[0].mode))
4368 || ! ((*insn_data[icode].operand[1].predicate)
4369 (r1, insn_data[icode].operand[1].mode))
4370 || ! ((*insn_data[icode].operand[2].predicate)
4371 (c, insn_data[icode].operand[2].mode)))
4374 return (GEN_FCN (icode) (r0, r1, c));
4378 have_sub2_insn (rtx x, rtx y)
4382 if (GET_MODE (x) == VOIDmode)
4385 icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4387 if (icode == CODE_FOR_nothing)
4390 if (! ((*insn_data[icode].operand[0].predicate)
4391 (x, insn_data[icode].operand[0].mode))
4392 || ! ((*insn_data[icode].operand[1].predicate)
4393 (x, insn_data[icode].operand[1].mode))
4394 || ! ((*insn_data[icode].operand[2].predicate)
4395 (y, insn_data[icode].operand[2].mode)))
4401 /* Generate the body of an instruction to copy Y into X.
4402 It may be a list of insns, if one insn isn't enough. */
4405 gen_move_insn (rtx x, rtx y)
4410 emit_move_insn_1 (x, y);
4416 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4417 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4418 no such operation exists, CODE_FOR_nothing will be returned. */
4421 can_extend_p (enum machine_mode to_mode, enum machine_mode from_mode,
4424 #ifdef HAVE_ptr_extend
4426 return CODE_FOR_ptr_extend;
4429 return extendtab[(int) to_mode][(int) from_mode][unsignedp != 0];
4432 /* Generate the body of an insn to extend Y (with mode MFROM)
4433 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4436 gen_extend_insn (rtx x, rtx y, enum machine_mode mto,
4437 enum machine_mode mfrom, int unsignedp)
4439 return (GEN_FCN (extendtab[(int) mto][(int) mfrom][unsignedp != 0]) (x, y));
4442 /* can_fix_p and can_float_p say whether the target machine
4443 can directly convert a given fixed point type to
4444 a given floating point type, or vice versa.
4445 The returned value is the CODE_FOR_... value to use,
4446 or CODE_FOR_nothing if these modes cannot be directly converted.
4448 *TRUNCP_PTR is set to 1 if it is necessary to output
4449 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4451 static enum insn_code
4452 can_fix_p (enum machine_mode fixmode, enum machine_mode fltmode,
4453 int unsignedp, int *truncp_ptr)
4456 if (fixtrunctab[(int) fltmode][(int) fixmode][unsignedp != 0]
4457 != CODE_FOR_nothing)
4458 return fixtrunctab[(int) fltmode][(int) fixmode][unsignedp != 0];
4460 if (ftrunc_optab->handlers[(int) fltmode].insn_code != CODE_FOR_nothing)
4463 return fixtab[(int) fltmode][(int) fixmode][unsignedp != 0];
4465 return CODE_FOR_nothing;
4468 static enum insn_code
4469 can_float_p (enum machine_mode fltmode, enum machine_mode fixmode,
4472 return floattab[(int) fltmode][(int) fixmode][unsignedp != 0];
4475 /* Generate code to convert FROM to floating point
4476 and store in TO. FROM must be fixed point and not VOIDmode.
4477 UNSIGNEDP nonzero means regard FROM as unsigned.
4478 Normally this is done by correcting the final value
4479 if it is negative. */
4482 expand_float (rtx to, rtx from, int unsignedp)
4484 enum insn_code icode;
4486 enum machine_mode fmode, imode;
4488 /* Crash now, because we won't be able to decide which mode to use. */
4489 if (GET_MODE (from) == VOIDmode)
4492 /* Look for an insn to do the conversion. Do it in the specified
4493 modes if possible; otherwise convert either input, output or both to
4494 wider mode. If the integer mode is wider than the mode of FROM,
4495 we can do the conversion signed even if the input is unsigned. */
4497 for (fmode = GET_MODE (to); fmode != VOIDmode;
4498 fmode = GET_MODE_WIDER_MODE (fmode))
4499 for (imode = GET_MODE (from); imode != VOIDmode;
4500 imode = GET_MODE_WIDER_MODE (imode))
4502 int doing_unsigned = unsignedp;
4504 if (fmode != GET_MODE (to)
4505 && significand_size (fmode) < GET_MODE_BITSIZE (GET_MODE (from)))
4508 icode = can_float_p (fmode, imode, unsignedp);
4509 if (icode == CODE_FOR_nothing && imode != GET_MODE (from) && unsignedp)
4510 icode = can_float_p (fmode, imode, 0), doing_unsigned = 0;
4512 if (icode != CODE_FOR_nothing)
4514 to = protect_from_queue (to, 1);
4515 from = protect_from_queue (from, 0);
4517 if (imode != GET_MODE (from))
4518 from = convert_to_mode (imode, from, unsignedp);
4520 if (fmode != GET_MODE (to))
4521 target = gen_reg_rtx (fmode);
4523 emit_unop_insn (icode, target, from,
4524 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
4527 convert_move (to, target, 0);
4532 /* Unsigned integer, and no way to convert directly.
4533 Convert as signed, then conditionally adjust the result. */
4536 rtx label = gen_label_rtx ();
4538 REAL_VALUE_TYPE offset;
4542 to = protect_from_queue (to, 1);
4543 from = protect_from_queue (from, 0);
4546 from = force_not_mem (from);
4548 /* Look for a usable floating mode FMODE wider than the source and at
4549 least as wide as the target. Using FMODE will avoid rounding woes
4550 with unsigned values greater than the signed maximum value. */
4552 for (fmode = GET_MODE (to); fmode != VOIDmode;
4553 fmode = GET_MODE_WIDER_MODE (fmode))
4554 if (GET_MODE_BITSIZE (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
4555 && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
4558 if (fmode == VOIDmode)
4560 /* There is no such mode. Pretend the target is wide enough. */
4561 fmode = GET_MODE (to);
4563 /* Avoid double-rounding when TO is narrower than FROM. */
4564 if ((significand_size (fmode) + 1)
4565 < GET_MODE_BITSIZE (GET_MODE (from)))
4568 rtx neglabel = gen_label_rtx ();
4570 /* Don't use TARGET if it isn't a register, is a hard register,
4571 or is the wrong mode. */
4572 if (GET_CODE (target) != REG
4573 || REGNO (target) < FIRST_PSEUDO_REGISTER
4574 || GET_MODE (target) != fmode)
4575 target = gen_reg_rtx (fmode);
4577 imode = GET_MODE (from);
4578 do_pending_stack_adjust ();
4580 /* Test whether the sign bit is set. */
4581 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
4584 /* The sign bit is not set. Convert as signed. */
4585 expand_float (target, from, 0);
4586 emit_jump_insn (gen_jump (label));
4589 /* The sign bit is set.
4590 Convert to a usable (positive signed) value by shifting right
4591 one bit, while remembering if a nonzero bit was shifted
4592 out; i.e., compute (from & 1) | (from >> 1). */
4594 emit_label (neglabel);
4595 temp = expand_binop (imode, and_optab, from, const1_rtx,
4596 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4597 temp1 = expand_shift (RSHIFT_EXPR, imode, from, integer_one_node,
4599 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
4601 expand_float (target, temp, 0);
4603 /* Multiply by 2 to undo the shift above. */
4604 temp = expand_binop (fmode, add_optab, target, target,
4605 target, 0, OPTAB_LIB_WIDEN);
4607 emit_move_insn (target, temp);
4609 do_pending_stack_adjust ();
4615 /* If we are about to do some arithmetic to correct for an
4616 unsigned operand, do it in a pseudo-register. */
4618 if (GET_MODE (to) != fmode
4619 || GET_CODE (to) != REG || REGNO (to) < FIRST_PSEUDO_REGISTER)
4620 target = gen_reg_rtx (fmode);
4622 /* Convert as signed integer to floating. */
4623 expand_float (target, from, 0);
4625 /* If FROM is negative (and therefore TO is negative),
4626 correct its value by 2**bitwidth. */
4628 do_pending_stack_adjust ();
4629 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from),
4633 real_2expN (&offset, GET_MODE_BITSIZE (GET_MODE (from)));
4634 temp = expand_binop (fmode, add_optab, target,
4635 CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode),
4636 target, 0, OPTAB_LIB_WIDEN);
4638 emit_move_insn (target, temp);
4640 do_pending_stack_adjust ();
4645 /* No hardware instruction available; call a library routine to convert from
4646 SImode, DImode, or TImode into SFmode, DFmode, XFmode, or TFmode. */
4652 to = protect_from_queue (to, 1);
4653 from = protect_from_queue (from, 0);
4655 if (GET_MODE_SIZE (GET_MODE (from)) < GET_MODE_SIZE (SImode))
4656 from = convert_to_mode (SImode, from, unsignedp);
4659 from = force_not_mem (from);
4661 if (GET_MODE (to) == SFmode)
4663 if (GET_MODE (from) == SImode)
4664 libfcn = floatsisf_libfunc;
4665 else if (GET_MODE (from) == DImode)
4666 libfcn = floatdisf_libfunc;
4667 else if (GET_MODE (from) == TImode)
4668 libfcn = floattisf_libfunc;
4672 else if (GET_MODE (to) == DFmode)
4674 if (GET_MODE (from) == SImode)
4675 libfcn = floatsidf_libfunc;
4676 else if (GET_MODE (from) == DImode)
4677 libfcn = floatdidf_libfunc;
4678 else if (GET_MODE (from) == TImode)
4679 libfcn = floattidf_libfunc;
4683 else if (GET_MODE (to) == XFmode)
4685 if (GET_MODE (from) == SImode)
4686 libfcn = floatsixf_libfunc;
4687 else if (GET_MODE (from) == DImode)
4688 libfcn = floatdixf_libfunc;
4689 else if (GET_MODE (from) == TImode)
4690 libfcn = floattixf_libfunc;
4694 else if (GET_MODE (to) == TFmode)
4696 if (GET_MODE (from) == SImode)
4697 libfcn = floatsitf_libfunc;
4698 else if (GET_MODE (from) == DImode)
4699 libfcn = floatditf_libfunc;
4700 else if (GET_MODE (from) == TImode)
4701 libfcn = floattitf_libfunc;
4710 value = emit_library_call_value (libfcn, NULL_RTX, LCT_CONST,
4711 GET_MODE (to), 1, from,
4713 insns = get_insns ();
4716 emit_libcall_block (insns, target, value,
4717 gen_rtx_FLOAT (GET_MODE (to), from));
4722 /* Copy result to requested destination
4723 if we have been computing in a temp location. */
4727 if (GET_MODE (target) == GET_MODE (to))
4728 emit_move_insn (to, target);
4730 convert_move (to, target, 0);
4734 /* expand_fix: generate code to convert FROM to fixed point
4735 and store in TO. FROM must be floating point. */
4740 rtx temp = gen_reg_rtx (GET_MODE (x));
4741 return expand_unop (GET_MODE (x), ftrunc_optab, x, temp, 0);
4745 expand_fix (rtx to, rtx from, int unsignedp)
4747 enum insn_code icode;
4749 enum machine_mode fmode, imode;
4753 /* We first try to find a pair of modes, one real and one integer, at
4754 least as wide as FROM and TO, respectively, in which we can open-code
4755 this conversion. If the integer mode is wider than the mode of TO,
4756 we can do the conversion either signed or unsigned. */
4758 for (fmode = GET_MODE (from); fmode != VOIDmode;
4759 fmode = GET_MODE_WIDER_MODE (fmode))
4760 for (imode = GET_MODE (to); imode != VOIDmode;
4761 imode = GET_MODE_WIDER_MODE (imode))
4763 int doing_unsigned = unsignedp;
4765 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
4766 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
4767 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
4769 if (icode != CODE_FOR_nothing)
4771 to = protect_from_queue (to, 1);
4772 from = protect_from_queue (from, 0);
4774 if (fmode != GET_MODE (from))
4775 from = convert_to_mode (fmode, from, 0);
4778 from = ftruncify (from);
4780 if (imode != GET_MODE (to))
4781 target = gen_reg_rtx (imode);
4783 emit_unop_insn (icode, target, from,
4784 doing_unsigned ? UNSIGNED_FIX : FIX);
4786 convert_move (to, target, unsignedp);
4791 /* For an unsigned conversion, there is one more way to do it.
4792 If we have a signed conversion, we generate code that compares
4793 the real value to the largest representable positive number. If if
4794 is smaller, the conversion is done normally. Otherwise, subtract
4795 one plus the highest signed number, convert, and add it back.
4797 We only need to check all real modes, since we know we didn't find
4798 anything with a wider integer mode.
4800 This code used to extend FP value into mode wider than the destination.
4801 This is not needed. Consider, for instance conversion from SFmode
4804 The hot path trought the code is dealing with inputs smaller than 2^63
4805 and doing just the conversion, so there is no bits to lose.
4807 In the other path we know the value is positive in the range 2^63..2^64-1
4808 inclusive. (as for other imput overflow happens and result is undefined)
4809 So we know that the most important bit set in mantissa corresponds to
4810 2^63. The subtraction of 2^63 should not generate any rounding as it
4811 simply clears out that bit. The rest is trivial. */
4813 if (unsignedp && GET_MODE_BITSIZE (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
4814 for (fmode = GET_MODE (from); fmode != VOIDmode;
4815 fmode = GET_MODE_WIDER_MODE (fmode))
4816 if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0,
4820 REAL_VALUE_TYPE offset;
4821 rtx limit, lab1, lab2, insn;
4823 bitsize = GET_MODE_BITSIZE (GET_MODE (to));
4824 real_2expN (&offset, bitsize - 1);
4825 limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode);
4826 lab1 = gen_label_rtx ();
4827 lab2 = gen_label_rtx ();
4830 to = protect_from_queue (to, 1);
4831 from = protect_from_queue (from, 0);
4834 from = force_not_mem (from);
4836 if (fmode != GET_MODE (from))
4837 from = convert_to_mode (fmode, from, 0);
4839 /* See if we need to do the subtraction. */
4840 do_pending_stack_adjust ();
4841 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from),
4844 /* If not, do the signed "fix" and branch around fixup code. */
4845 expand_fix (to, from, 0);
4846 emit_jump_insn (gen_jump (lab2));
4849 /* Otherwise, subtract 2**(N-1), convert to signed number,
4850 then add 2**(N-1). Do the addition using XOR since this
4851 will often generate better code. */
4853 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
4854 NULL_RTX, 0, OPTAB_LIB_WIDEN);
4855 expand_fix (to, target, 0);
4856 target = expand_binop (GET_MODE (to), xor_optab, to,
4858 ((HOST_WIDE_INT) 1 << (bitsize - 1),
4860 to, 1, OPTAB_LIB_WIDEN);
4863 emit_move_insn (to, target);
4867 if (mov_optab->handlers[(int) GET_MODE (to)].insn_code
4868 != CODE_FOR_nothing)
4870 /* Make a place for a REG_NOTE and add it. */
4871 insn = emit_move_insn (to, to);
4872 set_unique_reg_note (insn,
4874 gen_rtx_fmt_e (UNSIGNED_FIX,
4882 /* We can't do it with an insn, so use a library call. But first ensure
4883 that the mode of TO is at least as wide as SImode, since those are the
4884 only library calls we know about. */
4886 if (GET_MODE_SIZE (GET_MODE (to)) < GET_MODE_SIZE (SImode))
4888 target = gen_reg_rtx (SImode);
4890 expand_fix (target, from, unsignedp);
4892 else if (GET_MODE (from) == SFmode)
4894 if (GET_MODE (to) == SImode)
4895 libfcn = unsignedp ? fixunssfsi_libfunc : fixsfsi_libfunc;
4896 else if (GET_MODE (to) == DImode)
4897 libfcn = unsignedp ? fixunssfdi_libfunc : fixsfdi_libfunc;
4898 else if (GET_MODE (to) == TImode)
4899 libfcn = unsignedp ? fixunssfti_libfunc : fixsfti_libfunc;
4903 else if (GET_MODE (from) == DFmode)
4905 if (GET_MODE (to) == SImode)
4906 libfcn = unsignedp ? fixunsdfsi_libfunc : fixdfsi_libfunc;
4907 else if (GET_MODE (to) == DImode)
4908 libfcn = unsignedp ? fixunsdfdi_libfunc : fixdfdi_libfunc;
4909 else if (GET_MODE (to) == TImode)
4910 libfcn = unsignedp ? fixunsdfti_libfunc : fixdfti_libfunc;
4914 else if (GET_MODE (from) == XFmode)
4916 if (GET_MODE (to) == SImode)
4917 libfcn = unsignedp ? fixunsxfsi_libfunc : fixxfsi_libfunc;
4918 else if (GET_MODE (to) == DImode)
4919 libfcn = unsignedp ? fixunsxfdi_libfunc : fixxfdi_libfunc;
4920 else if (GET_MODE (to) == TImode)
4921 libfcn = unsignedp ? fixunsxfti_libfunc : fixxfti_libfunc;
4925 else if (GET_MODE (from) == TFmode)
4927 if (GET_MODE (to) == SImode)
4928 libfcn = unsignedp ? fixunstfsi_libfunc : fixtfsi_libfunc;
4929 else if (GET_MODE (to) == DImode)
4930 libfcn = unsignedp ? fixunstfdi_libfunc : fixtfdi_libfunc;
4931 else if (GET_MODE (to) == TImode)
4932 libfcn = unsignedp ? fixunstfti_libfunc : fixtfti_libfunc;
4944 to = protect_from_queue (to, 1);
4945 from = protect_from_queue (from, 0);
4948 from = force_not_mem (from);
4952 value = emit_library_call_value (libfcn, NULL_RTX, LCT_CONST,
4953 GET_MODE (to), 1, from,
4955 insns = get_insns ();
4958 emit_libcall_block (insns, target, value,
4959 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
4960 GET_MODE (to), from));
4965 if (GET_MODE (to) == GET_MODE (target))
4966 emit_move_insn (to, target);
4968 convert_move (to, target, 0);
4972 /* Report whether we have an instruction to perform the operation
4973 specified by CODE on operands of mode MODE. */
4975 have_insn_for (enum rtx_code code, enum machine_mode mode)
4977 return (code_to_optab[(int) code] != 0
4978 && (code_to_optab[(int) code]->handlers[(int) mode].insn_code
4979 != CODE_FOR_nothing));
4982 /* Create a blank optab. */
4987 optab op = ggc_alloc (sizeof (struct optab));
4988 for (i = 0; i < NUM_MACHINE_MODES; i++)
4990 op->handlers[i].insn_code = CODE_FOR_nothing;
4991 op->handlers[i].libfunc = 0;
4997 /* Same, but fill in its code as CODE, and write it into the
4998 code_to_optab table. */
5000 init_optab (enum rtx_code code)
5002 optab op = new_optab ();
5004 code_to_optab[(int) code] = op;
5008 /* Same, but fill in its code as CODE, and do _not_ write it into
5009 the code_to_optab table. */
5011 init_optabv (enum rtx_code code)
5013 optab op = new_optab ();
5018 /* Initialize the libfunc fields of an entire group of entries in some
5019 optab. Each entry is set equal to a string consisting of a leading
5020 pair of underscores followed by a generic operation name followed by
5021 a mode name (downshifted to lower case) followed by a single character
5022 representing the number of operands for the given operation (which is
5023 usually one of the characters '2', '3', or '4').
5025 OPTABLE is the table in which libfunc fields are to be initialized.
5026 FIRST_MODE is the first machine mode index in the given optab to
5028 LAST_MODE is the last machine mode index in the given optab to
5030 OPNAME is the generic (string) name of the operation.
5031 SUFFIX is the character which specifies the number of operands for
5032 the given generic operation.
5036 init_libfuncs (optab optable, int first_mode, int last_mode,
5037 const char *opname, int suffix)
5040 unsigned opname_len = strlen (opname);
5042 for (mode = first_mode; (int) mode <= (int) last_mode;
5043 mode = (enum machine_mode) ((int) mode + 1))
5045 const char *mname = GET_MODE_NAME (mode);
5046 unsigned mname_len = strlen (mname);
5047 char *libfunc_name = alloca (2 + opname_len + mname_len + 1 + 1);
5054 for (q = opname; *q; )
5056 for (q = mname; *q; q++)
5057 *p++ = TOLOWER (*q);
5061 optable->handlers[(int) mode].libfunc
5062 = init_one_libfunc (ggc_alloc_string (libfunc_name, p - libfunc_name));
5066 /* Initialize the libfunc fields of an entire group of entries in some
5067 optab which correspond to all integer mode operations. The parameters
5068 have the same meaning as similarly named ones for the `init_libfuncs'
5069 routine. (See above). */
5072 init_integral_libfuncs (optab optable, const char *opname, int suffix)
5074 int maxsize = 2*BITS_PER_WORD;
5075 if (maxsize < LONG_LONG_TYPE_SIZE)
5076 maxsize = LONG_LONG_TYPE_SIZE;
5077 init_libfuncs (optable, word_mode,
5078 mode_for_size (maxsize, MODE_INT, 0),
5082 /* Initialize the libfunc fields of an entire group of entries in some
5083 optab which correspond to all real mode operations. The parameters
5084 have the same meaning as similarly named ones for the `init_libfuncs'
5085 routine. (See above). */
5088 init_floating_libfuncs (optab optable, const char *opname, int suffix)
5090 enum machine_mode fmode, dmode, lmode;
5092 fmode = float_type_node ? TYPE_MODE (float_type_node) : VOIDmode;
5093 dmode = double_type_node ? TYPE_MODE (double_type_node) : VOIDmode;
5094 lmode = long_double_type_node ? TYPE_MODE (long_double_type_node) : VOIDmode;
5096 if (fmode != VOIDmode)
5097 init_libfuncs (optable, fmode, fmode, opname, suffix);
5098 if (dmode != fmode && dmode != VOIDmode)
5099 init_libfuncs (optable, dmode, dmode, opname, suffix);
5100 if (lmode != dmode && lmode != VOIDmode)
5101 init_libfuncs (optable, lmode, lmode, opname, suffix);
5105 init_one_libfunc (const char *name)
5109 /* Create a FUNCTION_DECL that can be passed to
5110 targetm.encode_section_info. */
5111 /* ??? We don't have any type information except for this is
5112 a function. Pretend this is "int foo()". */
5113 tree decl = build_decl (FUNCTION_DECL, get_identifier (name),
5114 build_function_type (integer_type_node, NULL_TREE));
5115 DECL_ARTIFICIAL (decl) = 1;
5116 DECL_EXTERNAL (decl) = 1;
5117 TREE_PUBLIC (decl) = 1;
5119 symbol = XEXP (DECL_RTL (decl), 0);
5121 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
5122 are the flags assigned by targetm.encode_section_info. */
5123 SYMBOL_REF_DECL (symbol) = 0;
5128 /* Call this to reset the function entry for one optab (OPTABLE) in mode
5129 MODE to NAME, which should be either 0 or a string constant. */
5131 set_optab_libfunc (optab optable, enum machine_mode mode, const char *name)
5134 optable->handlers[mode].libfunc = init_one_libfunc (name);
5136 optable->handlers[mode].libfunc = 0;
5139 /* Call this once to initialize the contents of the optabs
5140 appropriately for the current target machine. */
5145 unsigned int i, j, k;
5147 /* Start by initializing all tables to contain CODE_FOR_nothing. */
5149 for (i = 0; i < ARRAY_SIZE (fixtab); i++)
5150 for (j = 0; j < ARRAY_SIZE (fixtab[0]); j++)
5151 for (k = 0; k < ARRAY_SIZE (fixtab[0][0]); k++)
5152 fixtab[i][j][k] = CODE_FOR_nothing;
5154 for (i = 0; i < ARRAY_SIZE (fixtrunctab); i++)
5155 for (j = 0; j < ARRAY_SIZE (fixtrunctab[0]); j++)
5156 for (k = 0; k < ARRAY_SIZE (fixtrunctab[0][0]); k++)
5157 fixtrunctab[i][j][k] = CODE_FOR_nothing;
5159 for (i = 0; i < ARRAY_SIZE (floattab); i++)
5160 for (j = 0; j < ARRAY_SIZE (floattab[0]); j++)
5161 for (k = 0; k < ARRAY_SIZE (floattab[0][0]); k++)
5162 floattab[i][j][k] = CODE_FOR_nothing;
5164 for (i = 0; i < ARRAY_SIZE (extendtab); i++)
5165 for (j = 0; j < ARRAY_SIZE (extendtab[0]); j++)
5166 for (k = 0; k < ARRAY_SIZE (extendtab[0][0]); k++)
5167 extendtab[i][j][k] = CODE_FOR_nothing;
5169 for (i = 0; i < NUM_RTX_CODE; i++)
5170 setcc_gen_code[i] = CODE_FOR_nothing;
5172 #ifdef HAVE_conditional_move
5173 for (i = 0; i < NUM_MACHINE_MODES; i++)
5174 movcc_gen_code[i] = CODE_FOR_nothing;
5177 add_optab = init_optab (PLUS);
5178 addv_optab = init_optabv (PLUS);
5179 sub_optab = init_optab (MINUS);
5180 subv_optab = init_optabv (MINUS);
5181 smul_optab = init_optab (MULT);
5182 smulv_optab = init_optabv (MULT);
5183 smul_highpart_optab = init_optab (UNKNOWN);
5184 umul_highpart_optab = init_optab (UNKNOWN);
5185 smul_widen_optab = init_optab (UNKNOWN);
5186 umul_widen_optab = init_optab (UNKNOWN);
5187 sdiv_optab = init_optab (DIV);
5188 sdivv_optab = init_optabv (DIV);
5189 sdivmod_optab = init_optab (UNKNOWN);
5190 udiv_optab = init_optab (UDIV);
5191 udivmod_optab = init_optab (UNKNOWN);
5192 smod_optab = init_optab (MOD);
5193 umod_optab = init_optab (UMOD);
5194 ftrunc_optab = init_optab (UNKNOWN);
5195 and_optab = init_optab (AND);
5196 ior_optab = init_optab (IOR);
5197 xor_optab = init_optab (XOR);
5198 ashl_optab = init_optab (ASHIFT);
5199 ashr_optab = init_optab (ASHIFTRT);
5200 lshr_optab = init_optab (LSHIFTRT);
5201 rotl_optab = init_optab (ROTATE);
5202 rotr_optab = init_optab (ROTATERT);
5203 smin_optab = init_optab (SMIN);
5204 smax_optab = init_optab (SMAX);
5205 umin_optab = init_optab (UMIN);
5206 umax_optab = init_optab (UMAX);
5207 pow_optab = init_optab (UNKNOWN);
5208 atan2_optab = init_optab (UNKNOWN);
5210 /* These three have codes assigned exclusively for the sake of
5212 mov_optab = init_optab (SET);
5213 movstrict_optab = init_optab (STRICT_LOW_PART);
5214 cmp_optab = init_optab (COMPARE);
5216 ucmp_optab = init_optab (UNKNOWN);
5217 tst_optab = init_optab (UNKNOWN);
5219 eq_optab = init_optab (EQ);
5220 ne_optab = init_optab (NE);
5221 gt_optab = init_optab (GT);
5222 ge_optab = init_optab (GE);
5223 lt_optab = init_optab (LT);
5224 le_optab = init_optab (LE);
5225 unord_optab = init_optab (UNORDERED);
5227 neg_optab = init_optab (NEG);
5228 negv_optab = init_optabv (NEG);
5229 abs_optab = init_optab (ABS);
5230 absv_optab = init_optabv (ABS);
5231 addcc_optab = init_optab (UNKNOWN);
5232 one_cmpl_optab = init_optab (NOT);
5233 ffs_optab = init_optab (FFS);
5234 clz_optab = init_optab (CLZ);
5235 ctz_optab = init_optab (CTZ);
5236 popcount_optab = init_optab (POPCOUNT);
5237 parity_optab = init_optab (PARITY);
5238 sqrt_optab = init_optab (SQRT);
5239 floor_optab = init_optab (UNKNOWN);
5240 ceil_optab = init_optab (UNKNOWN);
5241 round_optab = init_optab (UNKNOWN);
5242 trunc_optab = init_optab (UNKNOWN);
5243 nearbyint_optab = init_optab (UNKNOWN);
5244 sin_optab = init_optab (UNKNOWN);
5245 cos_optab = init_optab (UNKNOWN);
5246 exp_optab = init_optab (UNKNOWN);
5247 log_optab = init_optab (UNKNOWN);
5248 tan_optab = init_optab (UNKNOWN);
5249 atan_optab = init_optab (UNKNOWN);
5250 strlen_optab = init_optab (UNKNOWN);
5251 cbranch_optab = init_optab (UNKNOWN);
5252 cmov_optab = init_optab (UNKNOWN);
5253 cstore_optab = init_optab (UNKNOWN);
5254 push_optab = init_optab (UNKNOWN);
5256 for (i = 0; i < NUM_MACHINE_MODES; i++)
5258 movstr_optab[i] = CODE_FOR_nothing;
5259 clrstr_optab[i] = CODE_FOR_nothing;
5261 #ifdef HAVE_SECONDARY_RELOADS
5262 reload_in_optab[i] = reload_out_optab[i] = CODE_FOR_nothing;
5266 /* Fill in the optabs with the insns we support. */
5269 #ifdef FIXUNS_TRUNC_LIKE_FIX_TRUNC
5270 /* This flag says the same insns that convert to a signed fixnum
5271 also convert validly to an unsigned one. */
5272 for (i = 0; i < NUM_MACHINE_MODES; i++)
5273 for (j = 0; j < NUM_MACHINE_MODES; j++)
5274 fixtrunctab[i][j][1] = fixtrunctab[i][j][0];
5277 /* Initialize the optabs with the names of the library functions. */
5278 init_integral_libfuncs (add_optab, "add", '3');
5279 init_floating_libfuncs (add_optab, "add", '3');
5280 init_integral_libfuncs (addv_optab, "addv", '3');
5281 init_floating_libfuncs (addv_optab, "add", '3');
5282 init_integral_libfuncs (sub_optab, "sub", '3');
5283 init_floating_libfuncs (sub_optab, "sub", '3');
5284 init_integral_libfuncs (subv_optab, "subv", '3');
5285 init_floating_libfuncs (subv_optab, "sub", '3');
5286 init_integral_libfuncs (smul_optab, "mul", '3');
5287 init_floating_libfuncs (smul_optab, "mul", '3');
5288 init_integral_libfuncs (smulv_optab, "mulv", '3');
5289 init_floating_libfuncs (smulv_optab, "mul", '3');
5290 init_integral_libfuncs (sdiv_optab, "div", '3');
5291 init_floating_libfuncs (sdiv_optab, "div", '3');
5292 init_integral_libfuncs (sdivv_optab, "divv", '3');
5293 init_integral_libfuncs (udiv_optab, "udiv", '3');
5294 init_integral_libfuncs (sdivmod_optab, "divmod", '4');
5295 init_integral_libfuncs (udivmod_optab, "udivmod", '4');
5296 init_integral_libfuncs (smod_optab, "mod", '3');
5297 init_integral_libfuncs (umod_optab, "umod", '3');
5298 init_floating_libfuncs (ftrunc_optab, "ftrunc", '2');
5299 init_integral_libfuncs (and_optab, "and", '3');
5300 init_integral_libfuncs (ior_optab, "ior", '3');
5301 init_integral_libfuncs (xor_optab, "xor", '3');
5302 init_integral_libfuncs (ashl_optab, "ashl", '3');
5303 init_integral_libfuncs (ashr_optab, "ashr", '3');
5304 init_integral_libfuncs (lshr_optab, "lshr", '3');
5305 init_integral_libfuncs (smin_optab, "min", '3');
5306 init_floating_libfuncs (smin_optab, "min", '3');
5307 init_integral_libfuncs (smax_optab, "max", '3');
5308 init_floating_libfuncs (smax_optab, "max", '3');
5309 init_integral_libfuncs (umin_optab, "umin", '3');
5310 init_integral_libfuncs (umax_optab, "umax", '3');
5311 init_integral_libfuncs (neg_optab, "neg", '2');
5312 init_floating_libfuncs (neg_optab, "neg", '2');
5313 init_integral_libfuncs (negv_optab, "negv", '2');
5314 init_floating_libfuncs (negv_optab, "neg", '2');
5315 init_integral_libfuncs (one_cmpl_optab, "one_cmpl", '2');
5316 init_integral_libfuncs (ffs_optab, "ffs", '2');
5317 init_integral_libfuncs (clz_optab, "clz", '2');
5318 init_integral_libfuncs (ctz_optab, "ctz", '2');
5319 init_integral_libfuncs (popcount_optab, "popcount", '2');
5320 init_integral_libfuncs (parity_optab, "parity", '2');
5322 /* Comparison libcalls for integers MUST come in pairs, signed/unsigned. */
5323 init_integral_libfuncs (cmp_optab, "cmp", '2');
5324 init_integral_libfuncs (ucmp_optab, "ucmp", '2');
5325 init_floating_libfuncs (cmp_optab, "cmp", '2');
5327 /* EQ etc are floating point only. */
5328 init_floating_libfuncs (eq_optab, "eq", '2');
5329 init_floating_libfuncs (ne_optab, "ne", '2');
5330 init_floating_libfuncs (gt_optab, "gt", '2');
5331 init_floating_libfuncs (ge_optab, "ge", '2');
5332 init_floating_libfuncs (lt_optab, "lt", '2');
5333 init_floating_libfuncs (le_optab, "le", '2');
5334 init_floating_libfuncs (unord_optab, "unord", '2');
5336 /* Use cabs for DC complex abs, since systems generally have cabs.
5337 Don't define any libcall for SCmode, so that cabs will be used. */
5338 abs_optab->handlers[(int) DCmode].libfunc
5339 = init_one_libfunc ("cabs");
5341 /* The ffs function operates on `int'. */
5342 ffs_optab->handlers[(int) mode_for_size (INT_TYPE_SIZE, MODE_INT, 0)].libfunc
5343 = init_one_libfunc ("ffs");
5345 extendsfdf2_libfunc = init_one_libfunc ("__extendsfdf2");
5346 extendsfxf2_libfunc = init_one_libfunc ("__extendsfxf2");
5347 extendsftf2_libfunc = init_one_libfunc ("__extendsftf2");
5348 extenddfxf2_libfunc = init_one_libfunc ("__extenddfxf2");
5349 extenddftf2_libfunc = init_one_libfunc ("__extenddftf2");
5351 truncdfsf2_libfunc = init_one_libfunc ("__truncdfsf2");
5352 truncxfsf2_libfunc = init_one_libfunc ("__truncxfsf2");
5353 trunctfsf2_libfunc = init_one_libfunc ("__trunctfsf2");
5354 truncxfdf2_libfunc = init_one_libfunc ("__truncxfdf2");
5355 trunctfdf2_libfunc = init_one_libfunc ("__trunctfdf2");
5357 abort_libfunc = init_one_libfunc ("abort");
5358 memcpy_libfunc = init_one_libfunc ("memcpy");
5359 memmove_libfunc = init_one_libfunc ("memmove");
5360 bcopy_libfunc = init_one_libfunc ("bcopy");
5361 memcmp_libfunc = init_one_libfunc ("memcmp");
5362 bcmp_libfunc = init_one_libfunc ("__gcc_bcmp");
5363 memset_libfunc = init_one_libfunc ("memset");
5364 bzero_libfunc = init_one_libfunc ("bzero");
5365 setbits_libfunc = init_one_libfunc ("__setbits");
5367 unwind_resume_libfunc = init_one_libfunc (USING_SJLJ_EXCEPTIONS
5368 ? "_Unwind_SjLj_Resume"
5369 : "_Unwind_Resume");
5370 #ifndef DONT_USE_BUILTIN_SETJMP
5371 setjmp_libfunc = init_one_libfunc ("__builtin_setjmp");
5372 longjmp_libfunc = init_one_libfunc ("__builtin_longjmp");
5374 setjmp_libfunc = init_one_libfunc ("setjmp");
5375 longjmp_libfunc = init_one_libfunc ("longjmp");
5377 unwind_sjlj_register_libfunc = init_one_libfunc ("_Unwind_SjLj_Register");
5378 unwind_sjlj_unregister_libfunc
5379 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5381 floatsisf_libfunc = init_one_libfunc ("__floatsisf");
5382 floatdisf_libfunc = init_one_libfunc ("__floatdisf");
5383 floattisf_libfunc = init_one_libfunc ("__floattisf");
5385 floatsidf_libfunc = init_one_libfunc ("__floatsidf");
5386 floatdidf_libfunc = init_one_libfunc ("__floatdidf");
5387 floattidf_libfunc = init_one_libfunc ("__floattidf");
5389 floatsixf_libfunc = init_one_libfunc ("__floatsixf");
5390 floatdixf_libfunc = init_one_libfunc ("__floatdixf");
5391 floattixf_libfunc = init_one_libfunc ("__floattixf");
5393 floatsitf_libfunc = init_one_libfunc ("__floatsitf");
5394 floatditf_libfunc = init_one_libfunc ("__floatditf");
5395 floattitf_libfunc = init_one_libfunc ("__floattitf");
5397 fixsfsi_libfunc = init_one_libfunc ("__fixsfsi");
5398 fixsfdi_libfunc = init_one_libfunc ("__fixsfdi");
5399 fixsfti_libfunc = init_one_libfunc ("__fixsfti");
5401 fixdfsi_libfunc = init_one_libfunc ("__fixdfsi");
5402 fixdfdi_libfunc = init_one_libfunc ("__fixdfdi");
5403 fixdfti_libfunc = init_one_libfunc ("__fixdfti");
5405 fixxfsi_libfunc = init_one_libfunc ("__fixxfsi");
5406 fixxfdi_libfunc = init_one_libfunc ("__fixxfdi");
5407 fixxfti_libfunc = init_one_libfunc ("__fixxfti");
5409 fixtfsi_libfunc = init_one_libfunc ("__fixtfsi");
5410 fixtfdi_libfunc = init_one_libfunc ("__fixtfdi");
5411 fixtfti_libfunc = init_one_libfunc ("__fixtfti");
5413 fixunssfsi_libfunc = init_one_libfunc ("__fixunssfsi");
5414 fixunssfdi_libfunc = init_one_libfunc ("__fixunssfdi");
5415 fixunssfti_libfunc = init_one_libfunc ("__fixunssfti");
5417 fixunsdfsi_libfunc = init_one_libfunc ("__fixunsdfsi");
5418 fixunsdfdi_libfunc = init_one_libfunc ("__fixunsdfdi");
5419 fixunsdfti_libfunc = init_one_libfunc ("__fixunsdfti");
5421 fixunsxfsi_libfunc = init_one_libfunc ("__fixunsxfsi");
5422 fixunsxfdi_libfunc = init_one_libfunc ("__fixunsxfdi");
5423 fixunsxfti_libfunc = init_one_libfunc ("__fixunsxfti");
5425 fixunstfsi_libfunc = init_one_libfunc ("__fixunstfsi");
5426 fixunstfdi_libfunc = init_one_libfunc ("__fixunstfdi");
5427 fixunstfti_libfunc = init_one_libfunc ("__fixunstfti");
5429 /* For function entry/exit instrumentation. */
5430 profile_function_entry_libfunc
5431 = init_one_libfunc ("__cyg_profile_func_enter");
5432 profile_function_exit_libfunc
5433 = init_one_libfunc ("__cyg_profile_func_exit");
5435 gcov_flush_libfunc = init_one_libfunc ("__gcov_flush");
5436 gcov_init_libfunc = init_one_libfunc ("__gcov_init");
5438 if (HAVE_conditional_trap)
5439 trap_rtx = gen_rtx_fmt_ee (EQ, VOIDmode, NULL_RTX, NULL_RTX);
5441 /* Allow the target to add more libcalls or rename some, etc. */
5442 targetm.init_libfuncs ();
5445 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5446 CODE. Return 0 on failure. */
5449 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED, rtx op1,
5450 rtx op2 ATTRIBUTE_UNUSED, rtx tcode ATTRIBUTE_UNUSED)
5452 enum machine_mode mode = GET_MODE (op1);
5453 enum insn_code icode;
5456 if (!HAVE_conditional_trap)
5459 if (mode == VOIDmode)
5462 icode = cmp_optab->handlers[(int) mode].insn_code;
5463 if (icode == CODE_FOR_nothing)
5467 op1 = prepare_operand (icode, op1, 0, mode, mode, 0);
5468 op2 = prepare_operand (icode, op2, 1, mode, mode, 0);
5474 emit_insn (GEN_FCN (icode) (op1, op2));
5476 PUT_CODE (trap_rtx, code);
5477 insn = gen_conditional_trap (trap_rtx, tcode);
5481 insn = get_insns ();
5488 #include "gt-optabs.h"