1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
45 #include "basic-block.h"
48 /* Each optab contains info on how this target machine
49 can perform a particular operation
50 for all sizes and kinds of operands.
52 The operation to be performed is often specified
53 by passing one of these optabs as an argument.
55 See expr.h for documentation of these optabs. */
57 optab optab_table[OTI_MAX];
59 rtx libfunc_table[LTI_MAX];
61 /* Tables of patterns for converting one mode to another. */
62 convert_optab convert_optab_table[CTI_MAX];
64 /* Contains the optab used for each rtx code. */
65 optab code_to_optab[NUM_RTX_CODE + 1];
67 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
68 gives the gen_function to make a branch to test that condition. */
70 rtxfun bcc_gen_fctn[NUM_RTX_CODE];
72 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
73 gives the insn code to make a store-condition insn
74 to test that condition. */
76 enum insn_code setcc_gen_code[NUM_RTX_CODE];
78 #ifdef HAVE_conditional_move
79 /* Indexed by the machine mode, gives the insn code to make a conditional
80 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
81 setcc_gen_code to cut down on the number of named patterns. Consider a day
82 when a lot more rtx codes are conditional (eg: for the ARM). */
84 enum insn_code movcc_gen_code[NUM_MACHINE_MODES];
87 /* The insn generating function can not take an rtx_code argument.
88 TRAP_RTX is used as an rtx argument. Its code is replaced with
89 the code to be used in the trap insn and all other fields are ignored. */
90 static GTY(()) rtx trap_rtx;
92 static int add_equal_note (rtx, rtx, enum rtx_code, rtx, rtx);
93 static rtx widen_operand (rtx, enum machine_mode, enum machine_mode, int,
95 static int expand_cmplxdiv_straight (rtx, rtx, rtx, rtx, rtx, rtx,
96 enum machine_mode, int,
97 enum optab_methods, enum mode_class,
99 static int expand_cmplxdiv_wide (rtx, rtx, rtx, rtx, rtx, rtx,
100 enum machine_mode, int, enum optab_methods,
101 enum mode_class, optab);
102 static void prepare_cmp_insn (rtx *, rtx *, enum rtx_code *, rtx,
103 enum machine_mode *, int *,
104 enum can_compare_purpose);
105 static enum insn_code can_fix_p (enum machine_mode, enum machine_mode, int,
107 static enum insn_code can_float_p (enum machine_mode, enum machine_mode, int);
108 static optab new_optab (void);
109 static convert_optab new_convert_optab (void);
110 static inline optab init_optab (enum rtx_code);
111 static inline optab init_optabv (enum rtx_code);
112 static inline convert_optab init_convert_optab (enum rtx_code);
113 static void init_libfuncs (optab, int, int, const char *, int);
114 static void init_integral_libfuncs (optab, const char *, int);
115 static void init_floating_libfuncs (optab, const char *, int);
116 static void init_interclass_conv_libfuncs (convert_optab, const char *,
117 enum mode_class, enum mode_class);
118 static void init_intraclass_conv_libfuncs (convert_optab, const char *,
119 enum mode_class, bool);
120 static void emit_cmp_and_jump_insn_1 (rtx, rtx, enum machine_mode,
121 enum rtx_code, int, rtx);
122 static void prepare_float_lib_cmp (rtx *, rtx *, enum rtx_code *,
123 enum machine_mode *, int *);
124 static rtx expand_vector_binop (enum machine_mode, optab, rtx, rtx, rtx, int,
126 static rtx expand_vector_unop (enum machine_mode, optab, rtx, rtx, int);
127 static rtx widen_clz (enum machine_mode, rtx, rtx);
128 static rtx expand_parity (enum machine_mode, rtx, rtx);
130 #ifndef HAVE_conditional_trap
131 #define HAVE_conditional_trap 0
132 #define gen_conditional_trap(a,b) (abort (), NULL_RTX)
135 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
136 the result of operation CODE applied to OP0 (and OP1 if it is a binary
139 If the last insn does not set TARGET, don't do anything, but return 1.
141 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
142 don't add the REG_EQUAL note but return 0. Our caller can then try
143 again, ensuring that TARGET is not one of the operands. */
146 add_equal_note (rtx insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
148 rtx last_insn, insn, set;
153 || NEXT_INSN (insns) == NULL_RTX)
156 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
157 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
158 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
159 && GET_RTX_CLASS (code) != RTX_COMPARE
160 && GET_RTX_CLASS (code) != RTX_UNARY)
163 if (GET_CODE (target) == ZERO_EXTRACT)
166 for (last_insn = insns;
167 NEXT_INSN (last_insn) != NULL_RTX;
168 last_insn = NEXT_INSN (last_insn))
171 set = single_set (last_insn);
175 if (! rtx_equal_p (SET_DEST (set), target)
176 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
177 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
178 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
181 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
182 besides the last insn. */
183 if (reg_overlap_mentioned_p (target, op0)
184 || (op1 && reg_overlap_mentioned_p (target, op1)))
186 insn = PREV_INSN (last_insn);
187 while (insn != NULL_RTX)
189 if (reg_set_p (target, insn))
192 insn = PREV_INSN (insn);
196 if (GET_RTX_CLASS (code) == RTX_UNARY)
197 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
199 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
201 set_unique_reg_note (last_insn, REG_EQUAL, note);
206 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
207 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
208 not actually do a sign-extend or zero-extend, but can leave the
209 higher-order bits of the result rtx undefined, for example, in the case
210 of logical operations, but not right shifts. */
213 widen_operand (rtx op, enum machine_mode mode, enum machine_mode oldmode,
214 int unsignedp, int no_extend)
218 /* If we don't have to extend and this is a constant, return it. */
219 if (no_extend && GET_MODE (op) == VOIDmode)
222 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
223 extend since it will be more efficient to do so unless the signedness of
224 a promoted object differs from our extension. */
226 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
227 && SUBREG_PROMOTED_UNSIGNED_P (op) == unsignedp))
228 return convert_modes (mode, oldmode, op, unsignedp);
230 /* If MODE is no wider than a single word, we return a paradoxical
232 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
233 return gen_rtx_SUBREG (mode, force_reg (GET_MODE (op), op), 0);
235 /* Otherwise, get an object of MODE, clobber it, and set the low-order
238 result = gen_reg_rtx (mode);
239 emit_insn (gen_rtx_CLOBBER (VOIDmode, result));
240 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
244 /* Generate code to perform a straightforward complex divide. */
247 expand_cmplxdiv_straight (rtx real0, rtx real1, rtx imag0, rtx imag1,
248 rtx realr, rtx imagr, enum machine_mode submode,
249 int unsignedp, enum optab_methods methods,
250 enum mode_class class, optab binoptab)
256 optab this_add_optab = add_optab;
257 optab this_sub_optab = sub_optab;
258 optab this_neg_optab = neg_optab;
259 optab this_mul_optab = smul_optab;
261 if (binoptab == sdivv_optab)
263 this_add_optab = addv_optab;
264 this_sub_optab = subv_optab;
265 this_neg_optab = negv_optab;
266 this_mul_optab = smulv_optab;
269 /* Don't fetch these from memory more than once. */
270 real0 = force_reg (submode, real0);
271 real1 = force_reg (submode, real1);
274 imag0 = force_reg (submode, imag0);
276 imag1 = force_reg (submode, imag1);
278 /* Divisor: c*c + d*d. */
279 temp1 = expand_binop (submode, this_mul_optab, real1, real1,
280 NULL_RTX, unsignedp, methods);
282 temp2 = expand_binop (submode, this_mul_optab, imag1, imag1,
283 NULL_RTX, unsignedp, methods);
285 if (temp1 == 0 || temp2 == 0)
288 divisor = expand_binop (submode, this_add_optab, temp1, temp2,
289 NULL_RTX, unsignedp, methods);
295 /* Mathematically, ((a)(c-id))/divisor. */
296 /* Computationally, (a+i0) / (c+id) = (ac/(cc+dd)) + i(-ad/(cc+dd)). */
298 /* Calculate the dividend. */
299 real_t = expand_binop (submode, this_mul_optab, real0, real1,
300 NULL_RTX, unsignedp, methods);
302 imag_t = expand_binop (submode, this_mul_optab, real0, imag1,
303 NULL_RTX, unsignedp, methods);
305 if (real_t == 0 || imag_t == 0)
308 imag_t = expand_unop (submode, this_neg_optab, imag_t,
309 NULL_RTX, unsignedp);
313 /* Mathematically, ((a+ib)(c-id))/divider. */
314 /* Calculate the dividend. */
315 temp1 = expand_binop (submode, this_mul_optab, real0, real1,
316 NULL_RTX, unsignedp, methods);
318 temp2 = expand_binop (submode, this_mul_optab, imag0, imag1,
319 NULL_RTX, unsignedp, methods);
321 if (temp1 == 0 || temp2 == 0)
324 real_t = expand_binop (submode, this_add_optab, temp1, temp2,
325 NULL_RTX, unsignedp, methods);
327 temp1 = expand_binop (submode, this_mul_optab, imag0, real1,
328 NULL_RTX, unsignedp, methods);
330 temp2 = expand_binop (submode, this_mul_optab, real0, imag1,
331 NULL_RTX, unsignedp, methods);
333 if (temp1 == 0 || temp2 == 0)
336 imag_t = expand_binop (submode, this_sub_optab, temp1, temp2,
337 NULL_RTX, unsignedp, methods);
339 if (real_t == 0 || imag_t == 0)
343 if (class == MODE_COMPLEX_FLOAT)
344 res = expand_binop (submode, binoptab, real_t, divisor,
345 realr, unsignedp, methods);
347 res = expand_divmod (0, TRUNC_DIV_EXPR, submode,
348 real_t, divisor, realr, unsignedp);
354 emit_move_insn (realr, res);
356 if (class == MODE_COMPLEX_FLOAT)
357 res = expand_binop (submode, binoptab, imag_t, divisor,
358 imagr, unsignedp, methods);
360 res = expand_divmod (0, TRUNC_DIV_EXPR, submode,
361 imag_t, divisor, imagr, unsignedp);
367 emit_move_insn (imagr, res);
372 /* Generate code to perform a wide-input-range-acceptable complex divide. */
375 expand_cmplxdiv_wide (rtx real0, rtx real1, rtx imag0, rtx imag1, rtx realr,
376 rtx imagr, enum machine_mode submode, int unsignedp,
377 enum optab_methods methods, enum mode_class class,
382 rtx temp1, temp2, lab1, lab2;
383 enum machine_mode mode;
385 optab this_add_optab = add_optab;
386 optab this_sub_optab = sub_optab;
387 optab this_neg_optab = neg_optab;
388 optab this_mul_optab = smul_optab;
390 if (binoptab == sdivv_optab)
392 this_add_optab = addv_optab;
393 this_sub_optab = subv_optab;
394 this_neg_optab = negv_optab;
395 this_mul_optab = smulv_optab;
398 /* Don't fetch these from memory more than once. */
399 real0 = force_reg (submode, real0);
400 real1 = force_reg (submode, real1);
403 imag0 = force_reg (submode, imag0);
405 imag1 = force_reg (submode, imag1);
407 /* XXX What's an "unsigned" complex number? */
415 temp1 = expand_abs (submode, real1, NULL_RTX, unsignedp, 1);
416 temp2 = expand_abs (submode, imag1, NULL_RTX, unsignedp, 1);
419 if (temp1 == 0 || temp2 == 0)
422 mode = GET_MODE (temp1);
423 lab1 = gen_label_rtx ();
424 emit_cmp_and_jump_insns (temp1, temp2, LT, NULL_RTX,
425 mode, unsignedp, lab1);
427 /* |c| >= |d|; use ratio d/c to scale dividend and divisor. */
429 if (class == MODE_COMPLEX_FLOAT)
430 ratio = expand_binop (submode, binoptab, imag1, real1,
431 NULL_RTX, unsignedp, methods);
433 ratio = expand_divmod (0, TRUNC_DIV_EXPR, submode,
434 imag1, real1, NULL_RTX, unsignedp);
439 /* Calculate divisor. */
441 temp1 = expand_binop (submode, this_mul_optab, imag1, ratio,
442 NULL_RTX, unsignedp, methods);
447 divisor = expand_binop (submode, this_add_optab, temp1, real1,
448 NULL_RTX, unsignedp, methods);
453 /* Calculate dividend. */
459 /* Compute a / (c+id) as a / (c+d(d/c)) + i (-a(d/c)) / (c+d(d/c)). */
461 imag_t = expand_binop (submode, this_mul_optab, real0, ratio,
462 NULL_RTX, unsignedp, methods);
467 imag_t = expand_unop (submode, this_neg_optab, imag_t,
468 NULL_RTX, unsignedp);
470 if (real_t == 0 || imag_t == 0)
475 /* Compute (a+ib)/(c+id) as
476 (a+b(d/c))/(c+d(d/c) + i(b-a(d/c))/(c+d(d/c)). */
478 temp1 = expand_binop (submode, this_mul_optab, imag0, ratio,
479 NULL_RTX, unsignedp, methods);
484 real_t = expand_binop (submode, this_add_optab, temp1, real0,
485 NULL_RTX, unsignedp, methods);
487 temp1 = expand_binop (submode, this_mul_optab, real0, ratio,
488 NULL_RTX, unsignedp, methods);
493 imag_t = expand_binop (submode, this_sub_optab, imag0, temp1,
494 NULL_RTX, unsignedp, methods);
496 if (real_t == 0 || imag_t == 0)
500 if (class == MODE_COMPLEX_FLOAT)
501 res = expand_binop (submode, binoptab, real_t, divisor,
502 realr, unsignedp, methods);
504 res = expand_divmod (0, TRUNC_DIV_EXPR, submode,
505 real_t, divisor, realr, unsignedp);
511 emit_move_insn (realr, res);
513 if (class == MODE_COMPLEX_FLOAT)
514 res = expand_binop (submode, binoptab, imag_t, divisor,
515 imagr, unsignedp, methods);
517 res = expand_divmod (0, TRUNC_DIV_EXPR, submode,
518 imag_t, divisor, imagr, unsignedp);
524 emit_move_insn (imagr, res);
526 lab2 = gen_label_rtx ();
527 emit_jump_insn (gen_jump (lab2));
532 /* |d| > |c|; use ratio c/d to scale dividend and divisor. */
534 if (class == MODE_COMPLEX_FLOAT)
535 ratio = expand_binop (submode, binoptab, real1, imag1,
536 NULL_RTX, unsignedp, methods);
538 ratio = expand_divmod (0, TRUNC_DIV_EXPR, submode,
539 real1, imag1, NULL_RTX, unsignedp);
544 /* Calculate divisor. */
546 temp1 = expand_binop (submode, this_mul_optab, real1, ratio,
547 NULL_RTX, unsignedp, methods);
552 divisor = expand_binop (submode, this_add_optab, temp1, imag1,
553 NULL_RTX, unsignedp, methods);
558 /* Calculate dividend. */
562 /* Compute a / (c+id) as a(c/d) / (c(c/d)+d) + i (-a) / (c(c/d)+d). */
564 real_t = expand_binop (submode, this_mul_optab, real0, ratio,
565 NULL_RTX, unsignedp, methods);
567 imag_t = expand_unop (submode, this_neg_optab, real0,
568 NULL_RTX, unsignedp);
570 if (real_t == 0 || imag_t == 0)
575 /* Compute (a+ib)/(c+id) as
576 (a(c/d)+b)/(c(c/d)+d) + i (b(c/d)-a)/(c(c/d)+d). */
578 temp1 = expand_binop (submode, this_mul_optab, real0, ratio,
579 NULL_RTX, unsignedp, methods);
584 real_t = expand_binop (submode, this_add_optab, temp1, imag0,
585 NULL_RTX, unsignedp, methods);
587 temp1 = expand_binop (submode, this_mul_optab, imag0, ratio,
588 NULL_RTX, unsignedp, methods);
593 imag_t = expand_binop (submode, this_sub_optab, temp1, real0,
594 NULL_RTX, unsignedp, methods);
596 if (real_t == 0 || imag_t == 0)
600 if (class == MODE_COMPLEX_FLOAT)
601 res = expand_binop (submode, binoptab, real_t, divisor,
602 realr, unsignedp, methods);
604 res = expand_divmod (0, TRUNC_DIV_EXPR, submode,
605 real_t, divisor, realr, unsignedp);
611 emit_move_insn (realr, res);
613 if (class == MODE_COMPLEX_FLOAT)
614 res = expand_binop (submode, binoptab, imag_t, divisor,
615 imagr, unsignedp, methods);
617 res = expand_divmod (0, TRUNC_DIV_EXPR, submode,
618 imag_t, divisor, imagr, unsignedp);
624 emit_move_insn (imagr, res);
631 /* Wrapper around expand_binop which takes an rtx code to specify
632 the operation to perform, not an optab pointer. All other
633 arguments are the same. */
635 expand_simple_binop (enum machine_mode mode, enum rtx_code code, rtx op0,
636 rtx op1, rtx target, int unsignedp,
637 enum optab_methods methods)
639 optab binop = code_to_optab[(int) code];
643 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
646 /* Generate code to perform an operation specified by BINOPTAB
647 on operands OP0 and OP1, with result having machine-mode MODE.
649 UNSIGNEDP is for the case where we have to widen the operands
650 to perform the operation. It says to use zero-extension.
652 If TARGET is nonzero, the value
653 is generated there, if it is convenient to do so.
654 In all cases an rtx is returned for the locus of the value;
655 this may or may not be TARGET. */
658 expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1,
659 rtx target, int unsignedp, enum optab_methods methods)
661 enum optab_methods next_methods
662 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
663 ? OPTAB_WIDEN : methods);
664 enum mode_class class;
665 enum machine_mode wider_mode;
667 int commutative_op = 0;
668 int shift_op = (binoptab->code == ASHIFT
669 || binoptab->code == ASHIFTRT
670 || binoptab->code == LSHIFTRT
671 || binoptab->code == ROTATE
672 || binoptab->code == ROTATERT);
673 rtx entry_last = get_last_insn ();
676 class = GET_MODE_CLASS (mode);
678 op0 = protect_from_queue (op0, 0);
679 op1 = protect_from_queue (op1, 0);
681 target = protect_from_queue (target, 1);
685 /* Load duplicate non-volatile operands once. */
686 if (rtx_equal_p (op0, op1) && ! volatile_refs_p (op0))
688 op0 = force_not_mem (op0);
693 op0 = force_not_mem (op0);
694 op1 = force_not_mem (op1);
698 /* If subtracting an integer constant, convert this into an addition of
699 the negated constant. */
701 if (binoptab == sub_optab && GET_CODE (op1) == CONST_INT)
703 op1 = negate_rtx (mode, op1);
704 binoptab = add_optab;
707 /* If we are inside an appropriately-short loop and one operand is an
708 expensive constant, force it into a register. */
709 if (CONSTANT_P (op0) && preserve_subexpressions_p ()
710 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
711 op0 = force_reg (mode, op0);
713 if (CONSTANT_P (op1) && preserve_subexpressions_p ()
714 && ! shift_op && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
715 op1 = force_reg (mode, op1);
717 /* Record where to delete back to if we backtrack. */
718 last = get_last_insn ();
720 /* If operation is commutative,
721 try to make the first operand a register.
722 Even better, try to make it the same as the target.
723 Also try to make the last operand a constant. */
724 if (GET_RTX_CLASS (binoptab->code) == RTX_COMM_ARITH
725 || binoptab == smul_widen_optab
726 || binoptab == umul_widen_optab
727 || binoptab == smul_highpart_optab
728 || binoptab == umul_highpart_optab)
732 if (((target == 0 || GET_CODE (target) == REG)
733 ? ((GET_CODE (op1) == REG
734 && GET_CODE (op0) != REG)
736 : rtx_equal_p (op1, target))
737 || GET_CODE (op0) == CONST_INT)
745 /* If we can do it with a three-operand insn, do so. */
747 if (methods != OPTAB_MUST_WIDEN
748 && binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
750 int icode = (int) binoptab->handlers[(int) mode].insn_code;
751 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
752 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
754 rtx xop0 = op0, xop1 = op1;
759 temp = gen_reg_rtx (mode);
761 /* If it is a commutative operator and the modes would match
762 if we would swap the operands, we can save the conversions. */
765 if (GET_MODE (op0) != mode0 && GET_MODE (op1) != mode1
766 && GET_MODE (op0) == mode1 && GET_MODE (op1) == mode0)
770 tmp = op0; op0 = op1; op1 = tmp;
771 tmp = xop0; xop0 = xop1; xop1 = tmp;
775 /* In case the insn wants input operands in modes different from
776 those of the actual operands, convert the operands. It would
777 seem that we don't need to convert CONST_INTs, but we do, so
778 that they're properly zero-extended, sign-extended or truncated
781 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
782 xop0 = convert_modes (mode0,
783 GET_MODE (op0) != VOIDmode
788 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
789 xop1 = convert_modes (mode1,
790 GET_MODE (op1) != VOIDmode
795 /* Now, if insn's predicates don't allow our operands, put them into
798 if (! (*insn_data[icode].operand[1].predicate) (xop0, mode0)
799 && mode0 != VOIDmode)
800 xop0 = copy_to_mode_reg (mode0, xop0);
802 if (! (*insn_data[icode].operand[2].predicate) (xop1, mode1)
803 && mode1 != VOIDmode)
804 xop1 = copy_to_mode_reg (mode1, xop1);
806 if (! (*insn_data[icode].operand[0].predicate) (temp, mode))
807 temp = gen_reg_rtx (mode);
809 pat = GEN_FCN (icode) (temp, xop0, xop1);
812 /* If PAT is composed of more than one insn, try to add an appropriate
813 REG_EQUAL note to it. If we can't because TEMP conflicts with an
814 operand, call ourselves again, this time without a target. */
815 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
816 && ! add_equal_note (pat, temp, binoptab->code, xop0, xop1))
818 delete_insns_since (last);
819 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
827 delete_insns_since (last);
830 /* If this is a multiply, see if we can do a widening operation that
831 takes operands of this mode and makes a wider mode. */
833 if (binoptab == smul_optab && GET_MODE_WIDER_MODE (mode) != VOIDmode
834 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
835 ->handlers[(int) GET_MODE_WIDER_MODE (mode)].insn_code)
836 != CODE_FOR_nothing))
838 temp = expand_binop (GET_MODE_WIDER_MODE (mode),
839 unsignedp ? umul_widen_optab : smul_widen_optab,
840 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
844 if (GET_MODE_CLASS (mode) == MODE_INT)
845 return gen_lowpart (mode, temp);
847 return convert_to_mode (mode, temp, unsignedp);
851 /* Look for a wider mode of the same class for which we think we
852 can open-code the operation. Check for a widening multiply at the
853 wider mode as well. */
855 if ((class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
856 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
857 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
858 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
860 if (binoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
861 || (binoptab == smul_optab
862 && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
863 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
864 ->handlers[(int) GET_MODE_WIDER_MODE (wider_mode)].insn_code)
865 != CODE_FOR_nothing)))
867 rtx xop0 = op0, xop1 = op1;
870 /* For certain integer operations, we need not actually extend
871 the narrow operands, as long as we will truncate
872 the results to the same narrowness. */
874 if ((binoptab == ior_optab || binoptab == and_optab
875 || binoptab == xor_optab
876 || binoptab == add_optab || binoptab == sub_optab
877 || binoptab == smul_optab || binoptab == ashl_optab)
878 && class == MODE_INT)
881 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
883 /* The second operand of a shift must always be extended. */
884 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
885 no_extend && binoptab != ashl_optab);
887 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
888 unsignedp, OPTAB_DIRECT);
891 if (class != MODE_INT)
894 target = gen_reg_rtx (mode);
895 convert_move (target, temp, 0);
899 return gen_lowpart (mode, temp);
902 delete_insns_since (last);
906 /* These can be done a word at a time. */
907 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
909 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
910 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
916 /* If TARGET is the same as one of the operands, the REG_EQUAL note
917 won't be accurate, so use a new target. */
918 if (target == 0 || target == op0 || target == op1)
919 target = gen_reg_rtx (mode);
923 /* Do the actual arithmetic. */
924 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
926 rtx target_piece = operand_subword (target, i, 1, mode);
927 rtx x = expand_binop (word_mode, binoptab,
928 operand_subword_force (op0, i, mode),
929 operand_subword_force (op1, i, mode),
930 target_piece, unsignedp, next_methods);
935 if (target_piece != x)
936 emit_move_insn (target_piece, x);
939 insns = get_insns ();
942 if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
944 if (binoptab->code != UNKNOWN)
946 = gen_rtx_fmt_ee (binoptab->code, mode,
947 copy_rtx (op0), copy_rtx (op1));
951 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
956 /* Synthesize double word shifts from single word shifts. */
957 if ((binoptab == lshr_optab || binoptab == ashl_optab
958 || binoptab == ashr_optab)
960 && GET_CODE (op1) == CONST_INT
961 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
962 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
963 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
964 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
966 rtx insns, inter, equiv_value;
967 rtx into_target, outof_target;
968 rtx into_input, outof_input;
969 int shift_count, left_shift, outof_word;
971 /* If TARGET is the same as one of the operands, the REG_EQUAL note
972 won't be accurate, so use a new target. */
973 if (target == 0 || target == op0 || target == op1)
974 target = gen_reg_rtx (mode);
978 shift_count = INTVAL (op1);
980 /* OUTOF_* is the word we are shifting bits away from, and
981 INTO_* is the word that we are shifting bits towards, thus
982 they differ depending on the direction of the shift and
985 left_shift = binoptab == ashl_optab;
986 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
988 outof_target = operand_subword (target, outof_word, 1, mode);
989 into_target = operand_subword (target, 1 - outof_word, 1, mode);
991 outof_input = operand_subword_force (op0, outof_word, mode);
992 into_input = operand_subword_force (op0, 1 - outof_word, mode);
994 if (shift_count >= BITS_PER_WORD)
996 inter = expand_binop (word_mode, binoptab,
998 GEN_INT (shift_count - BITS_PER_WORD),
999 into_target, unsignedp, next_methods);
1001 if (inter != 0 && inter != into_target)
1002 emit_move_insn (into_target, inter);
1004 /* For a signed right shift, we must fill the word we are shifting
1005 out of with copies of the sign bit. Otherwise it is zeroed. */
1006 if (inter != 0 && binoptab != ashr_optab)
1007 inter = CONST0_RTX (word_mode);
1008 else if (inter != 0)
1009 inter = expand_binop (word_mode, binoptab,
1011 GEN_INT (BITS_PER_WORD - 1),
1012 outof_target, unsignedp, next_methods);
1014 if (inter != 0 && inter != outof_target)
1015 emit_move_insn (outof_target, inter);
1020 optab reverse_unsigned_shift, unsigned_shift;
1022 /* For a shift of less then BITS_PER_WORD, to compute the carry,
1023 we must do a logical shift in the opposite direction of the
1026 reverse_unsigned_shift = (left_shift ? lshr_optab : ashl_optab);
1028 /* For a shift of less than BITS_PER_WORD, to compute the word
1029 shifted towards, we need to unsigned shift the orig value of
1032 unsigned_shift = (left_shift ? ashl_optab : lshr_optab);
1034 carries = expand_binop (word_mode, reverse_unsigned_shift,
1036 GEN_INT (BITS_PER_WORD - shift_count),
1037 0, unsignedp, next_methods);
1042 inter = expand_binop (word_mode, unsigned_shift, into_input,
1043 op1, 0, unsignedp, next_methods);
1046 inter = expand_binop (word_mode, ior_optab, carries, inter,
1047 into_target, unsignedp, next_methods);
1049 if (inter != 0 && inter != into_target)
1050 emit_move_insn (into_target, inter);
1053 inter = expand_binop (word_mode, binoptab, outof_input,
1054 op1, outof_target, unsignedp, next_methods);
1056 if (inter != 0 && inter != outof_target)
1057 emit_move_insn (outof_target, inter);
1060 insns = get_insns ();
1065 if (binoptab->code != UNKNOWN)
1066 equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, op0, op1);
1070 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1075 /* Synthesize double word rotates from single word shifts. */
1076 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1077 && class == MODE_INT
1078 && GET_CODE (op1) == CONST_INT
1079 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1080 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1081 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1083 rtx insns, equiv_value;
1084 rtx into_target, outof_target;
1085 rtx into_input, outof_input;
1087 int shift_count, left_shift, outof_word;
1089 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1090 won't be accurate, so use a new target. */
1091 if (target == 0 || target == op0 || target == op1)
1092 target = gen_reg_rtx (mode);
1096 shift_count = INTVAL (op1);
1098 /* OUTOF_* is the word we are shifting bits away from, and
1099 INTO_* is the word that we are shifting bits towards, thus
1100 they differ depending on the direction of the shift and
1101 WORDS_BIG_ENDIAN. */
1103 left_shift = (binoptab == rotl_optab);
1104 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1106 outof_target = operand_subword (target, outof_word, 1, mode);
1107 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1109 outof_input = operand_subword_force (op0, outof_word, mode);
1110 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1112 if (shift_count == BITS_PER_WORD)
1114 /* This is just a word swap. */
1115 emit_move_insn (outof_target, into_input);
1116 emit_move_insn (into_target, outof_input);
1121 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1122 rtx first_shift_count, second_shift_count;
1123 optab reverse_unsigned_shift, unsigned_shift;
1125 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1126 ? lshr_optab : ashl_optab);
1128 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1129 ? ashl_optab : lshr_optab);
1131 if (shift_count > BITS_PER_WORD)
1133 first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1134 second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1138 first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1139 second_shift_count = GEN_INT (shift_count);
1142 into_temp1 = expand_binop (word_mode, unsigned_shift,
1143 outof_input, first_shift_count,
1144 NULL_RTX, unsignedp, next_methods);
1145 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1146 into_input, second_shift_count,
1147 NULL_RTX, unsignedp, next_methods);
1149 if (into_temp1 != 0 && into_temp2 != 0)
1150 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1151 into_target, unsignedp, next_methods);
1155 if (inter != 0 && inter != into_target)
1156 emit_move_insn (into_target, inter);
1158 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1159 into_input, first_shift_count,
1160 NULL_RTX, unsignedp, next_methods);
1161 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1162 outof_input, second_shift_count,
1163 NULL_RTX, unsignedp, next_methods);
1165 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1166 inter = expand_binop (word_mode, ior_optab,
1167 outof_temp1, outof_temp2,
1168 outof_target, unsignedp, next_methods);
1170 if (inter != 0 && inter != outof_target)
1171 emit_move_insn (outof_target, inter);
1174 insns = get_insns ();
1179 if (binoptab->code != UNKNOWN)
1180 equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, op0, op1);
1184 /* We can't make this a no conflict block if this is a word swap,
1185 because the word swap case fails if the input and output values
1186 are in the same register. */
1187 if (shift_count != BITS_PER_WORD)
1188 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1197 /* These can be done a word at a time by propagating carries. */
1198 if ((binoptab == add_optab || binoptab == sub_optab)
1199 && class == MODE_INT
1200 && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
1201 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1204 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1205 const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
1206 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1207 rtx xop0, xop1, xtarget;
1209 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1210 value is one of those, use it. Otherwise, use 1 since it is the
1211 one easiest to get. */
1212 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1213 int normalizep = STORE_FLAG_VALUE;
1218 /* Prepare the operands. */
1219 xop0 = force_reg (mode, op0);
1220 xop1 = force_reg (mode, op1);
1222 xtarget = gen_reg_rtx (mode);
1224 if (target == 0 || GET_CODE (target) != REG)
1227 /* Indicate for flow that the entire target reg is being set. */
1228 if (GET_CODE (target) == REG)
1229 emit_insn (gen_rtx_CLOBBER (VOIDmode, xtarget));
1231 /* Do the actual arithmetic. */
1232 for (i = 0; i < nwords; i++)
1234 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1235 rtx target_piece = operand_subword (xtarget, index, 1, mode);
1236 rtx op0_piece = operand_subword_force (xop0, index, mode);
1237 rtx op1_piece = operand_subword_force (xop1, index, mode);
1240 /* Main add/subtract of the input operands. */
1241 x = expand_binop (word_mode, binoptab,
1242 op0_piece, op1_piece,
1243 target_piece, unsignedp, next_methods);
1249 /* Store carry from main add/subtract. */
1250 carry_out = gen_reg_rtx (word_mode);
1251 carry_out = emit_store_flag_force (carry_out,
1252 (binoptab == add_optab
1255 word_mode, 1, normalizep);
1262 /* Add/subtract previous carry to main result. */
1263 newx = expand_binop (word_mode,
1264 normalizep == 1 ? binoptab : otheroptab,
1266 NULL_RTX, 1, next_methods);
1270 /* Get out carry from adding/subtracting carry in. */
1271 rtx carry_tmp = gen_reg_rtx (word_mode);
1272 carry_tmp = emit_store_flag_force (carry_tmp,
1273 (binoptab == add_optab
1276 word_mode, 1, normalizep);
1278 /* Logical-ior the two poss. carry together. */
1279 carry_out = expand_binop (word_mode, ior_optab,
1280 carry_out, carry_tmp,
1281 carry_out, 0, next_methods);
1285 emit_move_insn (target_piece, newx);
1288 carry_in = carry_out;
1291 if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
1293 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
1294 || ! rtx_equal_p (target, xtarget))
1296 rtx temp = emit_move_insn (target, xtarget);
1298 set_unique_reg_note (temp,
1300 gen_rtx_fmt_ee (binoptab->code, mode,
1311 delete_insns_since (last);
1314 /* If we want to multiply two two-word values and have normal and widening
1315 multiplies of single-word values, we can do this with three smaller
1316 multiplications. Note that we do not make a REG_NO_CONFLICT block here
1317 because we are not operating on one word at a time.
1319 The multiplication proceeds as follows:
1320 _______________________
1321 [__op0_high_|__op0_low__]
1322 _______________________
1323 * [__op1_high_|__op1_low__]
1324 _______________________________________________
1325 _______________________
1326 (1) [__op0_low__*__op1_low__]
1327 _______________________
1328 (2a) [__op0_low__*__op1_high_]
1329 _______________________
1330 (2b) [__op0_high_*__op1_low__]
1331 _______________________
1332 (3) [__op0_high_*__op1_high_]
1335 This gives a 4-word result. Since we are only interested in the
1336 lower 2 words, partial result (3) and the upper words of (2a) and
1337 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1338 calculated using non-widening multiplication.
1340 (1), however, needs to be calculated with an unsigned widening
1341 multiplication. If this operation is not directly supported we
1342 try using a signed widening multiplication and adjust the result.
1343 This adjustment works as follows:
1345 If both operands are positive then no adjustment is needed.
1347 If the operands have different signs, for example op0_low < 0 and
1348 op1_low >= 0, the instruction treats the most significant bit of
1349 op0_low as a sign bit instead of a bit with significance
1350 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1351 with 2**BITS_PER_WORD - op0_low, and two's complements the
1352 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1355 Similarly, if both operands are negative, we need to add
1356 (op0_low + op1_low) * 2**BITS_PER_WORD.
1358 We use a trick to adjust quickly. We logically shift op0_low right
1359 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1360 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1361 logical shift exists, we do an arithmetic right shift and subtract
1364 if (binoptab == smul_optab
1365 && class == MODE_INT
1366 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1367 && smul_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1368 && add_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1369 && ((umul_widen_optab->handlers[(int) mode].insn_code
1370 != CODE_FOR_nothing)
1371 || (smul_widen_optab->handlers[(int) mode].insn_code
1372 != CODE_FOR_nothing)))
1374 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
1375 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
1376 rtx op0_high = operand_subword_force (op0, high, mode);
1377 rtx op0_low = operand_subword_force (op0, low, mode);
1378 rtx op1_high = operand_subword_force (op1, high, mode);
1379 rtx op1_low = operand_subword_force (op1, low, mode);
1381 rtx op0_xhigh = NULL_RTX;
1382 rtx op1_xhigh = NULL_RTX;
1384 /* If the target is the same as one of the inputs, don't use it. This
1385 prevents problems with the REG_EQUAL note. */
1386 if (target == op0 || target == op1
1387 || (target != 0 && GET_CODE (target) != REG))
1390 /* Multiply the two lower words to get a double-word product.
1391 If unsigned widening multiplication is available, use that;
1392 otherwise use the signed form and compensate. */
1394 if (umul_widen_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1396 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
1397 target, 1, OPTAB_DIRECT);
1399 /* If we didn't succeed, delete everything we did so far. */
1401 delete_insns_since (last);
1403 op0_xhigh = op0_high, op1_xhigh = op1_high;
1407 && smul_widen_optab->handlers[(int) mode].insn_code
1408 != CODE_FOR_nothing)
1410 rtx wordm1 = GEN_INT (BITS_PER_WORD - 1);
1411 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
1412 target, 1, OPTAB_DIRECT);
1413 op0_xhigh = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
1414 NULL_RTX, 1, next_methods);
1416 op0_xhigh = expand_binop (word_mode, add_optab, op0_high,
1417 op0_xhigh, op0_xhigh, 0, next_methods);
1420 op0_xhigh = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
1421 NULL_RTX, 0, next_methods);
1423 op0_xhigh = expand_binop (word_mode, sub_optab, op0_high,
1424 op0_xhigh, op0_xhigh, 0,
1428 op1_xhigh = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
1429 NULL_RTX, 1, next_methods);
1431 op1_xhigh = expand_binop (word_mode, add_optab, op1_high,
1432 op1_xhigh, op1_xhigh, 0, next_methods);
1435 op1_xhigh = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
1436 NULL_RTX, 0, next_methods);
1438 op1_xhigh = expand_binop (word_mode, sub_optab, op1_high,
1439 op1_xhigh, op1_xhigh, 0,
1444 /* If we have been able to directly compute the product of the
1445 low-order words of the operands and perform any required adjustments
1446 of the operands, we proceed by trying two more multiplications
1447 and then computing the appropriate sum.
1449 We have checked above that the required addition is provided.
1450 Full-word addition will normally always succeed, especially if
1451 it is provided at all, so we don't worry about its failure. The
1452 multiplication may well fail, however, so we do handle that. */
1454 if (product && op0_xhigh && op1_xhigh)
1456 rtx product_high = operand_subword (product, high, 1, mode);
1457 rtx temp = expand_binop (word_mode, binoptab, op0_low, op1_xhigh,
1458 NULL_RTX, 0, OPTAB_DIRECT);
1460 if (!REG_P (product_high))
1461 product_high = force_reg (word_mode, product_high);
1464 temp = expand_binop (word_mode, add_optab, temp, product_high,
1465 product_high, 0, next_methods);
1467 if (temp != 0 && temp != product_high)
1468 emit_move_insn (product_high, temp);
1471 temp = expand_binop (word_mode, binoptab, op1_low, op0_xhigh,
1472 NULL_RTX, 0, OPTAB_DIRECT);
1475 temp = expand_binop (word_mode, add_optab, temp,
1476 product_high, product_high,
1479 if (temp != 0 && temp != product_high)
1480 emit_move_insn (product_high, temp);
1482 emit_move_insn (operand_subword (product, high, 1, mode), product_high);
1486 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1488 temp = emit_move_insn (product, product);
1489 set_unique_reg_note (temp,
1491 gen_rtx_fmt_ee (MULT, mode,
1500 /* If we get here, we couldn't do it for some reason even though we
1501 originally thought we could. Delete anything we've emitted in
1504 delete_insns_since (last);
1507 /* Open-code the vector operations if we have no hardware support
1509 if (class == MODE_VECTOR_INT || class == MODE_VECTOR_FLOAT)
1510 return expand_vector_binop (mode, binoptab, op0, op1, target,
1511 unsignedp, methods);
1513 /* We need to open-code the complex type operations: '+, -, * and /' */
1515 /* At this point we allow operations between two similar complex
1516 numbers, and also if one of the operands is not a complex number
1517 but rather of MODE_FLOAT or MODE_INT. However, the caller
1518 must make sure that the MODE of the non-complex operand matches
1519 the SUBMODE of the complex operand. */
1521 if (class == MODE_COMPLEX_FLOAT || class == MODE_COMPLEX_INT)
1523 rtx real0 = 0, imag0 = 0;
1524 rtx real1 = 0, imag1 = 0;
1525 rtx realr, imagr, res;
1529 /* Find the correct mode for the real and imaginary parts. */
1530 enum machine_mode submode = GET_MODE_INNER (mode);
1532 if (submode == BLKmode)
1537 if (GET_MODE (op0) == mode)
1539 real0 = gen_realpart (submode, op0);
1540 imag0 = gen_imagpart (submode, op0);
1545 if (GET_MODE (op1) == mode)
1547 real1 = gen_realpart (submode, op1);
1548 imag1 = gen_imagpart (submode, op1);
1553 if (real0 == 0 || real1 == 0 || ! (imag0 != 0 || imag1 != 0))
1556 result = gen_reg_rtx (mode);
1557 realr = gen_realpart (submode, result);
1558 imagr = gen_imagpart (submode, result);
1560 switch (binoptab->code)
1563 /* (a+ib) + (c+id) = (a+c) + i(b+d) */
1565 /* (a+ib) - (c+id) = (a-c) + i(b-d) */
1566 res = expand_binop (submode, binoptab, real0, real1,
1567 realr, unsignedp, methods);
1571 else if (res != realr)
1572 emit_move_insn (realr, res);
1574 if (imag0 != 0 && imag1 != 0)
1575 res = expand_binop (submode, binoptab, imag0, imag1,
1576 imagr, unsignedp, methods);
1577 else if (imag0 != 0)
1579 else if (binoptab->code == MINUS)
1580 res = expand_unop (submode,
1581 binoptab == subv_optab ? negv_optab : neg_optab,
1582 imag1, imagr, unsignedp);
1588 else if (res != imagr)
1589 emit_move_insn (imagr, res);
1595 /* (a+ib) * (c+id) = (ac-bd) + i(ad+cb) */
1597 if (imag0 != 0 && imag1 != 0)
1601 /* Don't fetch these from memory more than once. */
1602 real0 = force_reg (submode, real0);
1603 real1 = force_reg (submode, real1);
1604 imag0 = force_reg (submode, imag0);
1605 imag1 = force_reg (submode, imag1);
1607 temp1 = expand_binop (submode, binoptab, real0, real1, NULL_RTX,
1608 unsignedp, methods);
1610 temp2 = expand_binop (submode, binoptab, imag0, imag1, NULL_RTX,
1611 unsignedp, methods);
1613 if (temp1 == 0 || temp2 == 0)
1618 binoptab == smulv_optab ? subv_optab : sub_optab,
1619 temp1, temp2, realr, unsignedp, methods));
1623 else if (res != realr)
1624 emit_move_insn (realr, res);
1626 temp1 = expand_binop (submode, binoptab, real0, imag1,
1627 NULL_RTX, unsignedp, methods);
1629 /* Avoid expanding redundant multiplication for the common
1630 case of squaring a complex number. */
1631 if (rtx_equal_p (real0, real1) && rtx_equal_p (imag0, imag1))
1634 temp2 = expand_binop (submode, binoptab, real1, imag0,
1635 NULL_RTX, unsignedp, methods);
1637 if (temp1 == 0 || temp2 == 0)
1642 binoptab == smulv_optab ? addv_optab : add_optab,
1643 temp1, temp2, imagr, unsignedp, methods));
1647 else if (res != imagr)
1648 emit_move_insn (imagr, res);
1654 /* Don't fetch these from memory more than once. */
1655 real0 = force_reg (submode, real0);
1656 real1 = force_reg (submode, real1);
1658 res = expand_binop (submode, binoptab, real0, real1,
1659 realr, unsignedp, methods);
1662 else if (res != realr)
1663 emit_move_insn (realr, res);
1666 res = expand_binop (submode, binoptab,
1667 real1, imag0, imagr, unsignedp, methods);
1669 res = expand_binop (submode, binoptab,
1670 real0, imag1, imagr, unsignedp, methods);
1674 else if (res != imagr)
1675 emit_move_insn (imagr, res);
1682 /* (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd)) */
1686 /* (a+ib) / (c+i0) = (a/c) + i(b/c) */
1688 /* Don't fetch these from memory more than once. */
1689 real1 = force_reg (submode, real1);
1691 /* Simply divide the real and imaginary parts by `c' */
1692 if (class == MODE_COMPLEX_FLOAT)
1693 res = expand_binop (submode, binoptab, real0, real1,
1694 realr, unsignedp, methods);
1696 res = expand_divmod (0, TRUNC_DIV_EXPR, submode,
1697 real0, real1, realr, unsignedp);
1701 else if (res != realr)
1702 emit_move_insn (realr, res);
1704 if (class == MODE_COMPLEX_FLOAT)
1705 res = expand_binop (submode, binoptab, imag0, real1,
1706 imagr, unsignedp, methods);
1708 res = expand_divmod (0, TRUNC_DIV_EXPR, submode,
1709 imag0, real1, imagr, unsignedp);
1713 else if (res != imagr)
1714 emit_move_insn (imagr, res);
1720 switch (flag_complex_divide_method)
1723 ok = expand_cmplxdiv_straight (real0, real1, imag0, imag1,
1724 realr, imagr, submode,
1730 ok = expand_cmplxdiv_wide (real0, real1, imag0, imag1,
1731 realr, imagr, submode,
1751 rtx equiv = gen_rtx_fmt_ee (binoptab->code, mode,
1752 copy_rtx (op0), copy_rtx (op1));
1753 emit_no_conflict_block (seq, result, op0, op1, equiv);
1758 /* It can't be open-coded in this mode.
1759 Use a library call if one is available and caller says that's ok. */
1761 if (binoptab->handlers[(int) mode].libfunc
1762 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
1766 enum machine_mode op1_mode = mode;
1773 op1_mode = word_mode;
1774 /* Specify unsigned here,
1775 since negative shift counts are meaningless. */
1776 op1x = convert_to_mode (word_mode, op1, 1);
1779 if (GET_MODE (op0) != VOIDmode
1780 && GET_MODE (op0) != mode)
1781 op0 = convert_to_mode (mode, op0, unsignedp);
1783 /* Pass 1 for NO_QUEUE so we don't lose any increments
1784 if the libcall is cse'd or moved. */
1785 value = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
1786 NULL_RTX, LCT_CONST, mode, 2,
1787 op0, mode, op1x, op1_mode);
1789 insns = get_insns ();
1792 target = gen_reg_rtx (mode);
1793 emit_libcall_block (insns, target, value,
1794 gen_rtx_fmt_ee (binoptab->code, mode, op0, op1));
1799 delete_insns_since (last);
1801 /* It can't be done in this mode. Can we do it in a wider mode? */
1803 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
1804 || methods == OPTAB_MUST_WIDEN))
1806 /* Caller says, don't even try. */
1807 delete_insns_since (entry_last);
1811 /* Compute the value of METHODS to pass to recursive calls.
1812 Don't allow widening to be tried recursively. */
1814 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
1816 /* Look for a wider mode of the same class for which it appears we can do
1819 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
1821 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
1822 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1824 if ((binoptab->handlers[(int) wider_mode].insn_code
1825 != CODE_FOR_nothing)
1826 || (methods == OPTAB_LIB
1827 && binoptab->handlers[(int) wider_mode].libfunc))
1829 rtx xop0 = op0, xop1 = op1;
1832 /* For certain integer operations, we need not actually extend
1833 the narrow operands, as long as we will truncate
1834 the results to the same narrowness. */
1836 if ((binoptab == ior_optab || binoptab == and_optab
1837 || binoptab == xor_optab
1838 || binoptab == add_optab || binoptab == sub_optab
1839 || binoptab == smul_optab || binoptab == ashl_optab)
1840 && class == MODE_INT)
1843 xop0 = widen_operand (xop0, wider_mode, mode,
1844 unsignedp, no_extend);
1846 /* The second operand of a shift must always be extended. */
1847 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1848 no_extend && binoptab != ashl_optab);
1850 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1851 unsignedp, methods);
1854 if (class != MODE_INT)
1857 target = gen_reg_rtx (mode);
1858 convert_move (target, temp, 0);
1862 return gen_lowpart (mode, temp);
1865 delete_insns_since (last);
1870 delete_insns_since (entry_last);
1874 /* Like expand_binop, but for open-coding vectors binops. */
1877 expand_vector_binop (enum machine_mode mode, optab binoptab, rtx op0,
1878 rtx op1, rtx target, int unsignedp,
1879 enum optab_methods methods)
1881 enum machine_mode submode, tmode;
1882 int size, elts, subsize, subbitsize, i;
1883 rtx t, a, b, res, seq;
1884 enum mode_class class;
1886 class = GET_MODE_CLASS (mode);
1888 size = GET_MODE_SIZE (mode);
1889 submode = GET_MODE_INNER (mode);
1891 /* Search for the widest vector mode with the same inner mode that is
1892 still narrower than MODE and that allows to open-code this operator.
1893 Note, if we find such a mode and the handler later decides it can't
1894 do the expansion, we'll be called recursively with the narrower mode. */
1895 for (tmode = GET_CLASS_NARROWEST_MODE (class);
1896 GET_MODE_SIZE (tmode) < GET_MODE_SIZE (mode);
1897 tmode = GET_MODE_WIDER_MODE (tmode))
1899 if (GET_MODE_INNER (tmode) == GET_MODE_INNER (mode)
1900 && binoptab->handlers[(int) tmode].insn_code != CODE_FOR_nothing)
1904 switch (binoptab->code)
1909 tmode = int_mode_for_mode (mode);
1910 if (tmode != BLKmode)
1916 subsize = GET_MODE_SIZE (submode);
1917 subbitsize = GET_MODE_BITSIZE (submode);
1918 elts = size / subsize;
1920 /* If METHODS is OPTAB_DIRECT, we don't insist on the exact mode,
1921 but that we operate on more than one element at a time. */
1922 if (subsize == GET_MODE_UNIT_SIZE (mode) && methods == OPTAB_DIRECT)
1927 /* Errors can leave us with a const0_rtx as operand. */
1928 if (GET_MODE (op0) != mode)
1929 op0 = copy_to_mode_reg (mode, op0);
1930 if (GET_MODE (op1) != mode)
1931 op1 = copy_to_mode_reg (mode, op1);
1934 target = gen_reg_rtx (mode);
1936 for (i = 0; i < elts; ++i)
1938 /* If this is part of a register, and not the first item in the
1939 word, we can't store using a SUBREG - that would clobber
1941 And storing with a SUBREG is only possible for the least
1942 significant part, hence we can't do it for big endian
1943 (unless we want to permute the evaluation order. */
1944 if (GET_CODE (target) == REG
1945 && (BYTES_BIG_ENDIAN
1946 ? subsize < UNITS_PER_WORD
1947 : ((i * subsize) % UNITS_PER_WORD) != 0))
1950 t = simplify_gen_subreg (submode, target, mode, i * subsize);
1951 if (CONSTANT_P (op0))
1952 a = simplify_gen_subreg (submode, op0, mode, i * subsize);
1954 a = extract_bit_field (op0, subbitsize, i * subbitsize, unsignedp,
1955 NULL_RTX, submode, submode, size);
1956 if (CONSTANT_P (op1))
1957 b = simplify_gen_subreg (submode, op1, mode, i * subsize);
1959 b = extract_bit_field (op1, subbitsize, i * subbitsize, unsignedp,
1960 NULL_RTX, submode, submode, size);
1962 if (binoptab->code == DIV)
1964 if (class == MODE_VECTOR_FLOAT)
1965 res = expand_binop (submode, binoptab, a, b, t,
1966 unsignedp, methods);
1968 res = expand_divmod (0, TRUNC_DIV_EXPR, submode,
1969 a, b, t, unsignedp);
1972 res = expand_binop (submode, binoptab, a, b, t,
1973 unsignedp, methods);
1979 emit_move_insn (t, res);
1981 store_bit_field (target, subbitsize, i * subbitsize, submode, res,
1997 /* Like expand_unop but for open-coding vector unops. */
2000 expand_vector_unop (enum machine_mode mode, optab unoptab, rtx op0,
2001 rtx target, int unsignedp)
2003 enum machine_mode submode, tmode;
2004 int size, elts, subsize, subbitsize, i;
2007 size = GET_MODE_SIZE (mode);
2008 submode = GET_MODE_INNER (mode);
2010 /* Search for the widest vector mode with the same inner mode that is
2011 still narrower than MODE and that allows to open-code this operator.
2012 Note, if we find such a mode and the handler later decides it can't
2013 do the expansion, we'll be called recursively with the narrower mode. */
2014 for (tmode = GET_CLASS_NARROWEST_MODE (GET_MODE_CLASS (mode));
2015 GET_MODE_SIZE (tmode) < GET_MODE_SIZE (mode);
2016 tmode = GET_MODE_WIDER_MODE (tmode))
2018 if (GET_MODE_INNER (tmode) == GET_MODE_INNER (mode)
2019 && unoptab->handlers[(int) tmode].insn_code != CODE_FOR_nothing)
2022 /* If there is no negate operation, try doing a subtract from zero. */
2023 if (unoptab == neg_optab && GET_MODE_CLASS (submode) == MODE_INT
2024 /* Avoid infinite recursion when an
2025 error has left us with the wrong mode. */
2026 && GET_MODE (op0) == mode)
2029 temp = expand_binop (mode, sub_optab, CONST0_RTX (mode), op0,
2030 target, unsignedp, OPTAB_DIRECT);
2035 if (unoptab == one_cmpl_optab)
2037 tmode = int_mode_for_mode (mode);
2038 if (tmode != BLKmode)
2042 subsize = GET_MODE_SIZE (submode);
2043 subbitsize = GET_MODE_BITSIZE (submode);
2044 elts = size / subsize;
2046 /* Errors can leave us with a const0_rtx as operand. */
2047 if (GET_MODE (op0) != mode)
2048 op0 = copy_to_mode_reg (mode, op0);
2051 target = gen_reg_rtx (mode);
2055 for (i = 0; i < elts; ++i)
2057 /* If this is part of a register, and not the first item in the
2058 word, we can't store using a SUBREG - that would clobber
2060 And storing with a SUBREG is only possible for the least
2061 significant part, hence we can't do it for big endian
2062 (unless we want to permute the evaluation order. */
2063 if (GET_CODE (target) == REG
2064 && (BYTES_BIG_ENDIAN
2065 ? subsize < UNITS_PER_WORD
2066 : ((i * subsize) % UNITS_PER_WORD) != 0))
2069 t = simplify_gen_subreg (submode, target, mode, i * subsize);
2070 if (CONSTANT_P (op0))
2071 a = simplify_gen_subreg (submode, op0, mode, i * subsize);
2073 a = extract_bit_field (op0, subbitsize, i * subbitsize, unsignedp,
2074 t, submode, submode, size);
2076 res = expand_unop (submode, unoptab, a, t, unsignedp);
2079 emit_move_insn (t, res);
2081 store_bit_field (target, subbitsize, i * subbitsize, submode, res,
2092 /* Expand a binary operator which has both signed and unsigned forms.
2093 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2096 If we widen unsigned operands, we may use a signed wider operation instead
2097 of an unsigned wider operation, since the result would be the same. */
2100 sign_expand_binop (enum machine_mode mode, optab uoptab, optab soptab,
2101 rtx op0, rtx op1, rtx target, int unsignedp,
2102 enum optab_methods methods)
2105 optab direct_optab = unsignedp ? uoptab : soptab;
2106 struct optab wide_soptab;
2108 /* Do it without widening, if possible. */
2109 temp = expand_binop (mode, direct_optab, op0, op1, target,
2110 unsignedp, OPTAB_DIRECT);
2111 if (temp || methods == OPTAB_DIRECT)
2114 /* Try widening to a signed int. Make a fake signed optab that
2115 hides any signed insn for direct use. */
2116 wide_soptab = *soptab;
2117 wide_soptab.handlers[(int) mode].insn_code = CODE_FOR_nothing;
2118 wide_soptab.handlers[(int) mode].libfunc = 0;
2120 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2121 unsignedp, OPTAB_WIDEN);
2123 /* For unsigned operands, try widening to an unsigned int. */
2124 if (temp == 0 && unsignedp)
2125 temp = expand_binop (mode, uoptab, op0, op1, target,
2126 unsignedp, OPTAB_WIDEN);
2127 if (temp || methods == OPTAB_WIDEN)
2130 /* Use the right width lib call if that exists. */
2131 temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_LIB);
2132 if (temp || methods == OPTAB_LIB)
2135 /* Must widen and use a lib call, use either signed or unsigned. */
2136 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2137 unsignedp, methods);
2141 return expand_binop (mode, uoptab, op0, op1, target,
2142 unsignedp, methods);
2146 /* Generate code to perform an operation specified by BINOPTAB
2147 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2148 We assume that the order of the operands for the instruction
2149 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2150 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2152 Either TARG0 or TARG1 may be zero, but what that means is that
2153 the result is not actually wanted. We will generate it into
2154 a dummy pseudo-reg and discard it. They may not both be zero.
2156 Returns 1 if this operation can be performed; 0 if not. */
2159 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
2162 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2163 enum mode_class class;
2164 enum machine_mode wider_mode;
2165 rtx entry_last = get_last_insn ();
2168 class = GET_MODE_CLASS (mode);
2170 op0 = protect_from_queue (op0, 0);
2171 op1 = protect_from_queue (op1, 0);
2175 op0 = force_not_mem (op0);
2176 op1 = force_not_mem (op1);
2179 /* If we are inside an appropriately-short loop and one operand is an
2180 expensive constant, force it into a register. */
2181 if (CONSTANT_P (op0) && preserve_subexpressions_p ()
2182 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
2183 op0 = force_reg (mode, op0);
2185 if (CONSTANT_P (op1) && preserve_subexpressions_p ()
2186 && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
2187 op1 = force_reg (mode, op1);
2190 targ0 = protect_from_queue (targ0, 1);
2192 targ0 = gen_reg_rtx (mode);
2194 targ1 = protect_from_queue (targ1, 1);
2196 targ1 = gen_reg_rtx (mode);
2198 /* Record where to go back to if we fail. */
2199 last = get_last_insn ();
2201 if (binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2203 int icode = (int) binoptab->handlers[(int) mode].insn_code;
2204 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2205 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
2207 rtx xop0 = op0, xop1 = op1;
2209 /* In case the insn wants input operands in modes different from
2210 those of the actual operands, convert the operands. It would
2211 seem that we don't need to convert CONST_INTs, but we do, so
2212 that they're properly zero-extended, sign-extended or truncated
2215 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
2216 xop0 = convert_modes (mode0,
2217 GET_MODE (op0) != VOIDmode
2222 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
2223 xop1 = convert_modes (mode1,
2224 GET_MODE (op1) != VOIDmode
2229 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2230 if (! (*insn_data[icode].operand[1].predicate) (xop0, mode0))
2231 xop0 = copy_to_mode_reg (mode0, xop0);
2233 if (! (*insn_data[icode].operand[2].predicate) (xop1, mode1))
2234 xop1 = copy_to_mode_reg (mode1, xop1);
2236 /* We could handle this, but we should always be called with a pseudo
2237 for our targets and all insns should take them as outputs. */
2238 if (! (*insn_data[icode].operand[0].predicate) (targ0, mode)
2239 || ! (*insn_data[icode].operand[3].predicate) (targ1, mode))
2242 pat = GEN_FCN (icode) (targ0, xop0, xop1, targ1);
2249 delete_insns_since (last);
2252 /* It can't be done in this mode. Can we do it in a wider mode? */
2254 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2256 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
2257 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2259 if (binoptab->handlers[(int) wider_mode].insn_code
2260 != CODE_FOR_nothing)
2262 rtx t0 = gen_reg_rtx (wider_mode);
2263 rtx t1 = gen_reg_rtx (wider_mode);
2264 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2265 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2267 if (expand_twoval_binop (binoptab, cop0, cop1,
2270 convert_move (targ0, t0, unsignedp);
2271 convert_move (targ1, t1, unsignedp);
2275 delete_insns_since (last);
2280 delete_insns_since (entry_last);
2284 /* Wrapper around expand_unop which takes an rtx code to specify
2285 the operation to perform, not an optab pointer. All other
2286 arguments are the same. */
2288 expand_simple_unop (enum machine_mode mode, enum rtx_code code, rtx op0,
2289 rtx target, int unsignedp)
2291 optab unop = code_to_optab[(int) code];
2295 return expand_unop (mode, unop, op0, target, unsignedp);
2301 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2303 widen_clz (enum machine_mode mode, rtx op0, rtx target)
2305 enum mode_class class = GET_MODE_CLASS (mode);
2306 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2308 enum machine_mode wider_mode;
2309 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
2310 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2312 if (clz_optab->handlers[(int) wider_mode].insn_code
2313 != CODE_FOR_nothing)
2315 rtx xop0, temp, last;
2317 last = get_last_insn ();
2320 target = gen_reg_rtx (mode);
2321 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2322 temp = expand_unop (wider_mode, clz_optab, xop0, NULL_RTX, true);
2324 temp = expand_binop (wider_mode, sub_optab, temp,
2325 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2326 - GET_MODE_BITSIZE (mode)),
2327 target, true, OPTAB_DIRECT);
2329 delete_insns_since (last);
2338 /* Try calculating (parity x) as (and (popcount x) 1), where
2339 popcount can also be done in a wider mode. */
2341 expand_parity (enum machine_mode mode, rtx op0, rtx target)
2343 enum mode_class class = GET_MODE_CLASS (mode);
2344 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2346 enum machine_mode wider_mode;
2347 for (wider_mode = mode; wider_mode != VOIDmode;
2348 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2350 if (popcount_optab->handlers[(int) wider_mode].insn_code
2351 != CODE_FOR_nothing)
2353 rtx xop0, temp, last;
2355 last = get_last_insn ();
2358 target = gen_reg_rtx (mode);
2359 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2360 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2363 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2364 target, true, OPTAB_DIRECT);
2366 delete_insns_since (last);
2375 /* Generate code to perform an operation specified by UNOPTAB
2376 on operand OP0, with result having machine-mode MODE.
2378 UNSIGNEDP is for the case where we have to widen the operands
2379 to perform the operation. It says to use zero-extension.
2381 If TARGET is nonzero, the value
2382 is generated there, if it is convenient to do so.
2383 In all cases an rtx is returned for the locus of the value;
2384 this may or may not be TARGET. */
2387 expand_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
2390 enum mode_class class;
2391 enum machine_mode wider_mode;
2393 rtx last = get_last_insn ();
2396 class = GET_MODE_CLASS (mode);
2398 op0 = protect_from_queue (op0, 0);
2402 op0 = force_not_mem (op0);
2406 target = protect_from_queue (target, 1);
2408 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2410 int icode = (int) unoptab->handlers[(int) mode].insn_code;
2411 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2417 temp = gen_reg_rtx (mode);
2419 if (GET_MODE (xop0) != VOIDmode
2420 && GET_MODE (xop0) != mode0)
2421 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2423 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2425 if (! (*insn_data[icode].operand[1].predicate) (xop0, mode0))
2426 xop0 = copy_to_mode_reg (mode0, xop0);
2428 if (! (*insn_data[icode].operand[0].predicate) (temp, mode))
2429 temp = gen_reg_rtx (mode);
2431 pat = GEN_FCN (icode) (temp, xop0);
2434 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
2435 && ! add_equal_note (pat, temp, unoptab->code, xop0, NULL_RTX))
2437 delete_insns_since (last);
2438 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
2446 delete_insns_since (last);
2449 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2451 /* Widening clz needs special treatment. */
2452 if (unoptab == clz_optab)
2454 temp = widen_clz (mode, op0, target);
2461 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2462 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
2463 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2465 if (unoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing)
2469 /* For certain operations, we need not actually extend
2470 the narrow operand, as long as we will truncate the
2471 results to the same narrowness. */
2473 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2474 (unoptab == neg_optab
2475 || unoptab == one_cmpl_optab)
2476 && class == MODE_INT);
2478 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2483 if (class != MODE_INT)
2486 target = gen_reg_rtx (mode);
2487 convert_move (target, temp, 0);
2491 return gen_lowpart (mode, temp);
2494 delete_insns_since (last);
2498 /* These can be done a word at a time. */
2499 if (unoptab == one_cmpl_optab
2500 && class == MODE_INT
2501 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
2502 && unoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
2507 if (target == 0 || target == op0)
2508 target = gen_reg_rtx (mode);
2512 /* Do the actual arithmetic. */
2513 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
2515 rtx target_piece = operand_subword (target, i, 1, mode);
2516 rtx x = expand_unop (word_mode, unoptab,
2517 operand_subword_force (op0, i, mode),
2518 target_piece, unsignedp);
2520 if (target_piece != x)
2521 emit_move_insn (target_piece, x);
2524 insns = get_insns ();
2527 emit_no_conflict_block (insns, target, op0, NULL_RTX,
2528 gen_rtx_fmt_e (unoptab->code, mode,
2533 /* Open-code the complex negation operation. */
2534 else if (unoptab->code == NEG
2535 && (class == MODE_COMPLEX_FLOAT || class == MODE_COMPLEX_INT))
2541 /* Find the correct mode for the real and imaginary parts. */
2542 enum machine_mode submode = GET_MODE_INNER (mode);
2544 if (submode == BLKmode)
2548 target = gen_reg_rtx (mode);
2552 target_piece = gen_imagpart (submode, target);
2553 x = expand_unop (submode, unoptab,
2554 gen_imagpart (submode, op0),
2555 target_piece, unsignedp);
2556 if (target_piece != x)
2557 emit_move_insn (target_piece, x);
2559 target_piece = gen_realpart (submode, target);
2560 x = expand_unop (submode, unoptab,
2561 gen_realpart (submode, op0),
2562 target_piece, unsignedp);
2563 if (target_piece != x)
2564 emit_move_insn (target_piece, x);
2569 emit_no_conflict_block (seq, target, op0, 0,
2570 gen_rtx_fmt_e (unoptab->code, mode,
2575 /* Try negating floating point values by flipping the sign bit. */
2576 if (unoptab->code == NEG && class == MODE_FLOAT
2577 && GET_MODE_BITSIZE (mode) <= 2 * HOST_BITS_PER_WIDE_INT)
2579 const struct real_format *fmt = REAL_MODE_FORMAT (mode);
2580 enum machine_mode imode = int_mode_for_mode (mode);
2581 int bitpos = (fmt != 0) ? fmt->signbit : -1;
2583 if (imode != BLKmode && bitpos >= 0 && fmt->has_signed_zero)
2585 HOST_WIDE_INT hi, lo;
2586 rtx last = get_last_insn ();
2588 /* Handle targets with different FP word orders. */
2589 if (FLOAT_WORDS_BIG_ENDIAN != WORDS_BIG_ENDIAN)
2591 int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
2592 int word = nwords - (bitpos / BITS_PER_WORD) - 1;
2593 bitpos = word * BITS_PER_WORD + bitpos % BITS_PER_WORD;
2596 if (bitpos < HOST_BITS_PER_WIDE_INT)
2599 lo = (HOST_WIDE_INT) 1 << bitpos;
2603 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2606 temp = expand_binop (imode, xor_optab,
2607 gen_lowpart (imode, op0),
2608 immed_double_const (lo, hi, imode),
2609 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2614 target = gen_reg_rtx (mode);
2615 insn = emit_move_insn (target, gen_lowpart (mode, temp));
2616 set_unique_reg_note (insn, REG_EQUAL,
2617 gen_rtx_fmt_e (NEG, mode,
2621 delete_insns_since (last);
2625 /* Try calculating parity (x) as popcount (x) % 2. */
2626 if (unoptab == parity_optab)
2628 temp = expand_parity (mode, op0, target);
2634 /* Now try a library call in this mode. */
2635 if (unoptab->handlers[(int) mode].libfunc)
2639 enum machine_mode outmode = mode;
2641 /* All of these functions return small values. Thus we choose to
2642 have them return something that isn't a double-word. */
2643 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
2644 || unoptab == popcount_optab || unoptab == parity_optab)
2646 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node)));
2650 /* Pass 1 for NO_QUEUE so we don't lose any increments
2651 if the libcall is cse'd or moved. */
2652 value = emit_library_call_value (unoptab->handlers[(int) mode].libfunc,
2653 NULL_RTX, LCT_CONST, outmode,
2655 insns = get_insns ();
2658 target = gen_reg_rtx (outmode);
2659 emit_libcall_block (insns, target, value,
2660 gen_rtx_fmt_e (unoptab->code, mode, op0));
2665 if (class == MODE_VECTOR_FLOAT || class == MODE_VECTOR_INT)
2666 return expand_vector_unop (mode, unoptab, op0, target, unsignedp);
2668 /* It can't be done in this mode. Can we do it in a wider mode? */
2670 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2672 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
2673 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2675 if ((unoptab->handlers[(int) wider_mode].insn_code
2676 != CODE_FOR_nothing)
2677 || unoptab->handlers[(int) wider_mode].libfunc)
2681 /* For certain operations, we need not actually extend
2682 the narrow operand, as long as we will truncate the
2683 results to the same narrowness. */
2685 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2686 (unoptab == neg_optab
2687 || unoptab == one_cmpl_optab)
2688 && class == MODE_INT);
2690 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2693 /* If we are generating clz using wider mode, adjust the
2695 if (unoptab == clz_optab && temp != 0)
2696 temp = expand_binop (wider_mode, sub_optab, temp,
2697 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2698 - GET_MODE_BITSIZE (mode)),
2699 target, true, OPTAB_DIRECT);
2703 if (class != MODE_INT)
2706 target = gen_reg_rtx (mode);
2707 convert_move (target, temp, 0);
2711 return gen_lowpart (mode, temp);
2714 delete_insns_since (last);
2719 /* If there is no negate operation, try doing a subtract from zero.
2720 The US Software GOFAST library needs this. */
2721 if (unoptab->code == NEG)
2724 temp = expand_binop (mode,
2725 unoptab == negv_optab ? subv_optab : sub_optab,
2726 CONST0_RTX (mode), op0,
2727 target, unsignedp, OPTAB_LIB_WIDEN);
2735 /* Emit code to compute the absolute value of OP0, with result to
2736 TARGET if convenient. (TARGET may be 0.) The return value says
2737 where the result actually is to be found.
2739 MODE is the mode of the operand; the mode of the result is
2740 different but can be deduced from MODE.
2745 expand_abs_nojump (enum machine_mode mode, rtx op0, rtx target,
2746 int result_unsignedp)
2751 result_unsignedp = 1;
2753 /* First try to do it with a special abs instruction. */
2754 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
2759 /* For floating point modes, try clearing the sign bit. */
2760 if (GET_MODE_CLASS (mode) == MODE_FLOAT
2761 && GET_MODE_BITSIZE (mode) <= 2 * HOST_BITS_PER_WIDE_INT)
2763 const struct real_format *fmt = REAL_MODE_FORMAT (mode);
2764 enum machine_mode imode = int_mode_for_mode (mode);
2765 int bitpos = (fmt != 0) ? fmt->signbit : -1;
2767 if (imode != BLKmode && bitpos >= 0)
2769 HOST_WIDE_INT hi, lo;
2770 rtx last = get_last_insn ();
2772 /* Handle targets with different FP word orders. */
2773 if (FLOAT_WORDS_BIG_ENDIAN != WORDS_BIG_ENDIAN)
2775 int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
2776 int word = nwords - (bitpos / BITS_PER_WORD) - 1;
2777 bitpos = word * BITS_PER_WORD + bitpos % BITS_PER_WORD;
2780 if (bitpos < HOST_BITS_PER_WIDE_INT)
2783 lo = (HOST_WIDE_INT) 1 << bitpos;
2787 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2790 temp = expand_binop (imode, and_optab,
2791 gen_lowpart (imode, op0),
2792 immed_double_const (~lo, ~hi, imode),
2793 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2798 target = gen_reg_rtx (mode);
2799 insn = emit_move_insn (target, gen_lowpart (mode, temp));
2800 set_unique_reg_note (insn, REG_EQUAL,
2801 gen_rtx_fmt_e (ABS, mode,
2805 delete_insns_since (last);
2809 /* If we have a MAX insn, we can do this as MAX (x, -x). */
2810 if (smax_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2812 rtx last = get_last_insn ();
2814 temp = expand_unop (mode, neg_optab, op0, NULL_RTX, 0);
2816 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
2822 delete_insns_since (last);
2825 /* If this machine has expensive jumps, we can do integer absolute
2826 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2827 where W is the width of MODE. */
2829 if (GET_MODE_CLASS (mode) == MODE_INT && BRANCH_COST >= 2)
2831 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
2832 size_int (GET_MODE_BITSIZE (mode) - 1),
2835 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
2838 temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab,
2839 temp, extended, target, 0, OPTAB_LIB_WIDEN);
2849 expand_abs (enum machine_mode mode, rtx op0, rtx target,
2850 int result_unsignedp, int safe)
2855 result_unsignedp = 1;
2857 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
2861 /* If that does not win, use conditional jump and negate. */
2863 /* It is safe to use the target if it is the same
2864 as the source if this is also a pseudo register */
2865 if (op0 == target && GET_CODE (op0) == REG
2866 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
2869 op1 = gen_label_rtx ();
2870 if (target == 0 || ! safe
2871 || GET_MODE (target) != mode
2872 || (GET_CODE (target) == MEM && MEM_VOLATILE_P (target))
2873 || (GET_CODE (target) == REG
2874 && REGNO (target) < FIRST_PSEUDO_REGISTER))
2875 target = gen_reg_rtx (mode);
2877 emit_move_insn (target, op0);
2880 /* If this mode is an integer too wide to compare properly,
2881 compare word by word. Rely on CSE to optimize constant cases. */
2882 if (GET_MODE_CLASS (mode) == MODE_INT
2883 && ! can_compare_p (GE, mode, ccp_jump))
2884 do_jump_by_parts_greater_rtx (mode, 0, target, const0_rtx,
2887 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
2888 NULL_RTX, NULL_RTX, op1);
2890 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
2893 emit_move_insn (target, op0);
2899 /* Emit code to compute the absolute value of OP0, with result to
2900 TARGET if convenient. (TARGET may be 0.) The return value says
2901 where the result actually is to be found.
2903 MODE is the mode of the operand; the mode of the result is
2904 different but can be deduced from MODE.
2906 UNSIGNEDP is relevant for complex integer modes. */
2909 expand_complex_abs (enum machine_mode mode, rtx op0, rtx target,
2912 enum mode_class class = GET_MODE_CLASS (mode);
2913 enum machine_mode wider_mode;
2915 rtx entry_last = get_last_insn ();
2918 optab this_abs_optab;
2920 /* Find the correct mode for the real and imaginary parts. */
2921 enum machine_mode submode = GET_MODE_INNER (mode);
2923 if (submode == BLKmode)
2926 op0 = protect_from_queue (op0, 0);
2930 op0 = force_not_mem (op0);
2933 last = get_last_insn ();
2936 target = protect_from_queue (target, 1);
2938 this_abs_optab = ! unsignedp && flag_trapv
2939 && (GET_MODE_CLASS(mode) == MODE_INT)
2940 ? absv_optab : abs_optab;
2942 if (this_abs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2944 int icode = (int) this_abs_optab->handlers[(int) mode].insn_code;
2945 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2951 temp = gen_reg_rtx (submode);
2953 if (GET_MODE (xop0) != VOIDmode
2954 && GET_MODE (xop0) != mode0)
2955 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2957 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2959 if (! (*insn_data[icode].operand[1].predicate) (xop0, mode0))
2960 xop0 = copy_to_mode_reg (mode0, xop0);
2962 if (! (*insn_data[icode].operand[0].predicate) (temp, submode))
2963 temp = gen_reg_rtx (submode);
2965 pat = GEN_FCN (icode) (temp, xop0);
2968 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
2969 && ! add_equal_note (pat, temp, this_abs_optab->code, xop0,
2972 delete_insns_since (last);
2973 return expand_unop (mode, this_abs_optab, op0, NULL_RTX,
2982 delete_insns_since (last);
2985 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2987 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
2988 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2990 if (this_abs_optab->handlers[(int) wider_mode].insn_code
2991 != CODE_FOR_nothing)
2995 xop0 = convert_modes (wider_mode, mode, xop0, unsignedp);
2996 temp = expand_complex_abs (wider_mode, xop0, NULL_RTX, unsignedp);
3000 if (class != MODE_COMPLEX_INT)
3003 target = gen_reg_rtx (submode);
3004 convert_move (target, temp, 0);
3008 return gen_lowpart (submode, temp);
3011 delete_insns_since (last);
3015 /* Open-code the complex absolute-value operation
3016 if we can open-code sqrt. Otherwise it's not worth while. */
3017 if (sqrt_optab->handlers[(int) submode].insn_code != CODE_FOR_nothing
3020 rtx real, imag, total;
3022 real = gen_realpart (submode, op0);
3023 imag = gen_imagpart (submode, op0);
3025 /* Square both parts. */
3026 real = expand_mult (submode, real, real, NULL_RTX, 0);
3027 imag = expand_mult (submode, imag, imag, NULL_RTX, 0);
3029 /* Sum the parts. */
3030 total = expand_binop (submode, add_optab, real, imag, NULL_RTX,
3031 0, OPTAB_LIB_WIDEN);
3033 /* Get sqrt in TARGET. Set TARGET to where the result is. */
3034 target = expand_unop (submode, sqrt_optab, total, target, 0);
3036 delete_insns_since (last);
3041 /* Now try a library call in this mode. */
3042 if (this_abs_optab->handlers[(int) mode].libfunc)
3049 /* Pass 1 for NO_QUEUE so we don't lose any increments
3050 if the libcall is cse'd or moved. */
3051 value = emit_library_call_value (abs_optab->handlers[(int) mode].libfunc,
3052 NULL_RTX, LCT_CONST, submode, 1, op0, mode);
3053 insns = get_insns ();
3056 target = gen_reg_rtx (submode);
3057 emit_libcall_block (insns, target, value,
3058 gen_rtx_fmt_e (this_abs_optab->code, mode, op0));
3063 /* It can't be done in this mode. Can we do it in a wider mode? */
3065 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
3066 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
3068 if ((this_abs_optab->handlers[(int) wider_mode].insn_code
3069 != CODE_FOR_nothing)
3070 || this_abs_optab->handlers[(int) wider_mode].libfunc)
3074 xop0 = convert_modes (wider_mode, mode, xop0, unsignedp);
3076 temp = expand_complex_abs (wider_mode, xop0, NULL_RTX, unsignedp);
3080 if (class != MODE_COMPLEX_INT)
3083 target = gen_reg_rtx (submode);
3084 convert_move (target, temp, 0);
3088 return gen_lowpart (submode, temp);
3091 delete_insns_since (last);
3095 delete_insns_since (entry_last);
3099 /* Generate an instruction whose insn-code is INSN_CODE,
3100 with two operands: an output TARGET and an input OP0.
3101 TARGET *must* be nonzero, and the output is always stored there.
3102 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3103 the value that is stored into TARGET. */
3106 emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code)
3109 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
3112 temp = target = protect_from_queue (target, 1);
3114 op0 = protect_from_queue (op0, 0);
3116 /* Sign and zero extension from memory is often done specially on
3117 RISC machines, so forcing into a register here can pessimize
3119 if (flag_force_mem && code != SIGN_EXTEND && code != ZERO_EXTEND)
3120 op0 = force_not_mem (op0);
3122 /* Now, if insn does not accept our operands, put them into pseudos. */
3124 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
3125 op0 = copy_to_mode_reg (mode0, op0);
3127 if (! (*insn_data[icode].operand[0].predicate) (temp, GET_MODE (temp))
3128 || (flag_force_mem && GET_CODE (temp) == MEM))
3129 temp = gen_reg_rtx (GET_MODE (temp));
3131 pat = GEN_FCN (icode) (temp, op0);
3133 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && code != UNKNOWN)
3134 add_equal_note (pat, temp, code, op0, NULL_RTX);
3139 emit_move_insn (target, temp);
3142 /* Emit code to perform a series of operations on a multi-word quantity, one
3145 Such a block is preceded by a CLOBBER of the output, consists of multiple
3146 insns, each setting one word of the output, and followed by a SET copying
3147 the output to itself.
3149 Each of the insns setting words of the output receives a REG_NO_CONFLICT
3150 note indicating that it doesn't conflict with the (also multi-word)
3151 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
3154 INSNS is a block of code generated to perform the operation, not including
3155 the CLOBBER and final copy. All insns that compute intermediate values
3156 are first emitted, followed by the block as described above.
3158 TARGET, OP0, and OP1 are the output and inputs of the operations,
3159 respectively. OP1 may be zero for a unary operation.
3161 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
3164 If TARGET is not a register, INSNS is simply emitted with no special
3165 processing. Likewise if anything in INSNS is not an INSN or if
3166 there is a libcall block inside INSNS.
3168 The final insn emitted is returned. */
3171 emit_no_conflict_block (rtx insns, rtx target, rtx op0, rtx op1, rtx equiv)
3173 rtx prev, next, first, last, insn;
3175 if (GET_CODE (target) != REG || reload_in_progress)
3176 return emit_insn (insns);
3178 for (insn = insns; insn; insn = NEXT_INSN (insn))
3179 if (GET_CODE (insn) != INSN
3180 || find_reg_note (insn, REG_LIBCALL, NULL_RTX))
3181 return emit_insn (insns);
3183 /* First emit all insns that do not store into words of the output and remove
3184 these from the list. */
3185 for (insn = insns; insn; insn = next)
3190 next = NEXT_INSN (insn);
3192 /* Some ports (cris) create a libcall regions at their own. We must
3193 avoid any potential nesting of LIBCALLs. */
3194 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3195 remove_note (insn, note);
3196 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3197 remove_note (insn, note);
3199 if (GET_CODE (PATTERN (insn)) == SET || GET_CODE (PATTERN (insn)) == USE
3200 || GET_CODE (PATTERN (insn)) == CLOBBER)
3201 set = PATTERN (insn);
3202 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
3204 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
3205 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
3207 set = XVECEXP (PATTERN (insn), 0, i);
3215 if (! reg_overlap_mentioned_p (target, SET_DEST (set)))
3217 if (PREV_INSN (insn))
3218 NEXT_INSN (PREV_INSN (insn)) = next;
3223 PREV_INSN (next) = PREV_INSN (insn);
3229 prev = get_last_insn ();
3231 /* Now write the CLOBBER of the output, followed by the setting of each
3232 of the words, followed by the final copy. */
3233 if (target != op0 && target != op1)
3234 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
3236 for (insn = insns; insn; insn = next)
3238 next = NEXT_INSN (insn);
3241 if (op1 && GET_CODE (op1) == REG)
3242 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op1,
3245 if (op0 && GET_CODE (op0) == REG)
3246 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op0,
3250 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3251 != CODE_FOR_nothing)
3253 last = emit_move_insn (target, target);
3255 set_unique_reg_note (last, REG_EQUAL, equiv);
3259 last = get_last_insn ();
3261 /* Remove any existing REG_EQUAL note from "last", or else it will
3262 be mistaken for a note referring to the full contents of the
3263 alleged libcall value when found together with the REG_RETVAL
3264 note added below. An existing note can come from an insn
3265 expansion at "last". */
3266 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3270 first = get_insns ();
3272 first = NEXT_INSN (prev);
3274 /* Encapsulate the block so it gets manipulated as a unit. */
3275 REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
3277 REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first, REG_NOTES (last));
3282 /* Emit code to make a call to a constant function or a library call.
3284 INSNS is a list containing all insns emitted in the call.
3285 These insns leave the result in RESULT. Our block is to copy RESULT
3286 to TARGET, which is logically equivalent to EQUIV.
3288 We first emit any insns that set a pseudo on the assumption that these are
3289 loading constants into registers; doing so allows them to be safely cse'ed
3290 between blocks. Then we emit all the other insns in the block, followed by
3291 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3292 note with an operand of EQUIV.
3294 Moving assignments to pseudos outside of the block is done to improve
3295 the generated code, but is not required to generate correct code,
3296 hence being unable to move an assignment is not grounds for not making
3297 a libcall block. There are two reasons why it is safe to leave these
3298 insns inside the block: First, we know that these pseudos cannot be
3299 used in generated RTL outside the block since they are created for
3300 temporary purposes within the block. Second, CSE will not record the
3301 values of anything set inside a libcall block, so we know they must
3302 be dead at the end of the block.
3304 Except for the first group of insns (the ones setting pseudos), the
3305 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3308 emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv)
3310 rtx final_dest = target;
3311 rtx prev, next, first, last, insn;
3313 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3314 into a MEM later. Protect the libcall block from this change. */
3315 if (! REG_P (target) || REG_USERVAR_P (target))
3316 target = gen_reg_rtx (GET_MODE (target));
3318 /* If we're using non-call exceptions, a libcall corresponding to an
3319 operation that may trap may also trap. */
3320 if (flag_non_call_exceptions && may_trap_p (equiv))
3322 for (insn = insns; insn; insn = NEXT_INSN (insn))
3323 if (GET_CODE (insn) == CALL_INSN)
3325 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3327 if (note != 0 && INTVAL (XEXP (note, 0)) <= 0)
3328 remove_note (insn, note);
3332 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3333 reg note to indicate that this call cannot throw or execute a nonlocal
3334 goto (unless there is already a REG_EH_REGION note, in which case
3336 for (insn = insns; insn; insn = NEXT_INSN (insn))
3337 if (GET_CODE (insn) == CALL_INSN)
3339 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3342 XEXP (note, 0) = constm1_rtx;
3344 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx,
3348 /* First emit all insns that set pseudos. Remove them from the list as
3349 we go. Avoid insns that set pseudos which were referenced in previous
3350 insns. These can be generated by move_by_pieces, for example,
3351 to update an address. Similarly, avoid insns that reference things
3352 set in previous insns. */
3354 for (insn = insns; insn; insn = next)
3356 rtx set = single_set (insn);
3359 /* Some ports (cris) create a libcall regions at their own. We must
3360 avoid any potential nesting of LIBCALLs. */
3361 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3362 remove_note (insn, note);
3363 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3364 remove_note (insn, note);
3366 next = NEXT_INSN (insn);
3368 if (set != 0 && GET_CODE (SET_DEST (set)) == REG
3369 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
3371 || ((! INSN_P(insns)
3372 || ! reg_mentioned_p (SET_DEST (set), PATTERN (insns)))
3373 && ! reg_used_between_p (SET_DEST (set), insns, insn)
3374 && ! modified_in_p (SET_SRC (set), insns)
3375 && ! modified_between_p (SET_SRC (set), insns, insn))))
3377 if (PREV_INSN (insn))
3378 NEXT_INSN (PREV_INSN (insn)) = next;
3383 PREV_INSN (next) = PREV_INSN (insn);
3388 /* Some ports use a loop to copy large arguments onto the stack.
3389 Don't move anything outside such a loop. */
3390 if (GET_CODE (insn) == CODE_LABEL)
3394 prev = get_last_insn ();
3396 /* Write the remaining insns followed by the final copy. */
3398 for (insn = insns; insn; insn = next)
3400 next = NEXT_INSN (insn);
3405 last = emit_move_insn (target, result);
3406 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3407 != CODE_FOR_nothing)
3408 set_unique_reg_note (last, REG_EQUAL, copy_rtx (equiv));
3411 /* Remove any existing REG_EQUAL note from "last", or else it will
3412 be mistaken for a note referring to the full contents of the
3413 libcall value when found together with the REG_RETVAL note added
3414 below. An existing note can come from an insn expansion at
3416 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3419 if (final_dest != target)
3420 emit_move_insn (final_dest, target);
3423 first = get_insns ();
3425 first = NEXT_INSN (prev);
3427 /* Encapsulate the block so it gets manipulated as a unit. */
3428 if (!flag_non_call_exceptions || !may_trap_p (equiv))
3430 /* We can't attach the REG_LIBCALL and REG_RETVAL notes
3431 when the encapsulated region would not be in one basic block,
3432 i.e. when there is a control_flow_insn_p insn between FIRST and LAST.
3434 bool attach_libcall_retval_notes = true;
3435 next = NEXT_INSN (last);
3436 for (insn = first; insn != next; insn = NEXT_INSN (insn))
3437 if (control_flow_insn_p (insn))
3439 attach_libcall_retval_notes = false;
3443 if (attach_libcall_retval_notes)
3445 REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
3447 REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first,
3453 /* Generate code to store zero in X. */
3456 emit_clr_insn (rtx x)
3458 emit_move_insn (x, const0_rtx);
3461 /* Generate code to store 1 in X
3462 assuming it contains zero beforehand. */
3465 emit_0_to_1_insn (rtx x)
3467 emit_move_insn (x, const1_rtx);
3470 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3471 PURPOSE describes how this comparison will be used. CODE is the rtx
3472 comparison code we will be using.
3474 ??? Actually, CODE is slightly weaker than that. A target is still
3475 required to implement all of the normal bcc operations, but not
3476 required to implement all (or any) of the unordered bcc operations. */
3479 can_compare_p (enum rtx_code code, enum machine_mode mode,
3480 enum can_compare_purpose purpose)
3484 if (cmp_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3486 if (purpose == ccp_jump)
3487 return bcc_gen_fctn[(int) code] != NULL;
3488 else if (purpose == ccp_store_flag)
3489 return setcc_gen_code[(int) code] != CODE_FOR_nothing;
3491 /* There's only one cmov entry point, and it's allowed to fail. */
3494 if (purpose == ccp_jump
3495 && cbranch_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3497 if (purpose == ccp_cmov
3498 && cmov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3500 if (purpose == ccp_store_flag
3501 && cstore_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3504 mode = GET_MODE_WIDER_MODE (mode);
3506 while (mode != VOIDmode);
3511 /* This function is called when we are going to emit a compare instruction that
3512 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3514 *PMODE is the mode of the inputs (in case they are const_int).
3515 *PUNSIGNEDP nonzero says that the operands are unsigned;
3516 this matters if they need to be widened.
3518 If they have mode BLKmode, then SIZE specifies the size of both operands.
3520 This function performs all the setup necessary so that the caller only has
3521 to emit a single comparison insn. This setup can involve doing a BLKmode
3522 comparison or emitting a library call to perform the comparison if no insn
3523 is available to handle it.
3524 The values which are passed in through pointers can be modified; the caller
3525 should perform the comparison on the modified values. */
3528 prepare_cmp_insn (rtx *px, rtx *py, enum rtx_code *pcomparison, rtx size,
3529 enum machine_mode *pmode, int *punsignedp,
3530 enum can_compare_purpose purpose)
3532 enum machine_mode mode = *pmode;
3533 rtx x = *px, y = *py;
3534 int unsignedp = *punsignedp;
3535 enum mode_class class;
3537 class = GET_MODE_CLASS (mode);
3539 /* They could both be VOIDmode if both args are immediate constants,
3540 but we should fold that at an earlier stage.
3541 With no special code here, this will call abort,
3542 reminding the programmer to implement such folding. */
3544 if (mode != BLKmode && flag_force_mem)
3546 /* Load duplicate non-volatile operands once. */
3547 if (rtx_equal_p (x, y) && ! volatile_refs_p (x))
3549 x = force_not_mem (x);
3554 x = force_not_mem (x);
3555 y = force_not_mem (y);
3559 /* If we are inside an appropriately-short loop and one operand is an
3560 expensive constant, force it into a register. */
3561 if (CONSTANT_P (x) && preserve_subexpressions_p ()
3562 && rtx_cost (x, COMPARE) > COSTS_N_INSNS (1))
3563 x = force_reg (mode, x);
3565 if (CONSTANT_P (y) && preserve_subexpressions_p ()
3566 && rtx_cost (y, COMPARE) > COSTS_N_INSNS (1))
3567 y = force_reg (mode, y);
3570 /* Abort if we have a non-canonical comparison. The RTL documentation
3571 states that canonical comparisons are required only for targets which
3573 if (CONSTANT_P (x) && ! CONSTANT_P (y))
3577 /* Don't let both operands fail to indicate the mode. */
3578 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
3579 x = force_reg (mode, x);
3581 /* Handle all BLKmode compares. */
3583 if (mode == BLKmode)
3585 enum machine_mode cmp_mode, result_mode;
3586 enum insn_code cmp_code;
3591 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
3597 x = protect_from_queue (x, 0);
3598 y = protect_from_queue (y, 0);
3599 size = protect_from_queue (size, 0);
3601 /* Try to use a memory block compare insn - either cmpstr
3602 or cmpmem will do. */
3603 for (cmp_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
3604 cmp_mode != VOIDmode;
3605 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode))
3607 cmp_code = cmpmem_optab[cmp_mode];
3608 if (cmp_code == CODE_FOR_nothing)
3609 cmp_code = cmpstr_optab[cmp_mode];
3610 if (cmp_code == CODE_FOR_nothing)
3613 /* Must make sure the size fits the insn's mode. */
3614 if ((GET_CODE (size) == CONST_INT
3615 && INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode)))
3616 || (GET_MODE_BITSIZE (GET_MODE (size))
3617 > GET_MODE_BITSIZE (cmp_mode)))
3620 result_mode = insn_data[cmp_code].operand[0].mode;
3621 result = gen_reg_rtx (result_mode);
3622 size = convert_to_mode (cmp_mode, size, 1);
3623 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
3627 *pmode = result_mode;
3631 /* Otherwise call a library function, memcmp if we've got it,
3633 #ifdef TARGET_MEM_FUNCTIONS
3634 libfunc = memcmp_libfunc;
3635 length_type = sizetype;
3637 libfunc = bcmp_libfunc;
3638 length_type = integer_type_node;
3640 result_mode = TYPE_MODE (integer_type_node);
3641 cmp_mode = TYPE_MODE (length_type);
3642 size = convert_to_mode (TYPE_MODE (length_type), size,
3643 TREE_UNSIGNED (length_type));
3645 result = emit_library_call_value (libfunc, 0, LCT_PURE_MAKE_BLOCK,
3652 *pmode = result_mode;
3656 /* Don't allow operands to the compare to trap, as that can put the
3657 compare and branch in different basic blocks. */
3658 if (flag_non_call_exceptions)
3661 x = force_reg (mode, x);
3663 y = force_reg (mode, y);
3668 if (can_compare_p (*pcomparison, mode, purpose))
3671 /* Handle a lib call just for the mode we are using. */
3673 if (cmp_optab->handlers[(int) mode].libfunc && class != MODE_FLOAT)
3675 rtx libfunc = cmp_optab->handlers[(int) mode].libfunc;
3678 /* If we want unsigned, and this mode has a distinct unsigned
3679 comparison routine, use that. */
3680 if (unsignedp && ucmp_optab->handlers[(int) mode].libfunc)
3681 libfunc = ucmp_optab->handlers[(int) mode].libfunc;
3683 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST_MAKE_BLOCK,
3684 word_mode, 2, x, mode, y, mode);
3686 /* Integer comparison returns a result that must be compared against 1,
3687 so that even if we do an unsigned compare afterward,
3688 there is still a value that can represent the result "less than". */
3695 if (class == MODE_FLOAT)
3696 prepare_float_lib_cmp (px, py, pcomparison, pmode, punsignedp);
3702 /* Before emitting an insn with code ICODE, make sure that X, which is going
3703 to be used for operand OPNUM of the insn, is converted from mode MODE to
3704 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3705 that it is accepted by the operand predicate. Return the new value. */
3708 prepare_operand (int icode, rtx x, int opnum, enum machine_mode mode,
3709 enum machine_mode wider_mode, int unsignedp)
3711 x = protect_from_queue (x, 0);
3713 if (mode != wider_mode)
3714 x = convert_modes (wider_mode, mode, x, unsignedp);
3716 if (! (*insn_data[icode].operand[opnum].predicate)
3717 (x, insn_data[icode].operand[opnum].mode))
3721 x = copy_to_mode_reg (insn_data[icode].operand[opnum].mode, x);
3727 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3728 we can do the comparison.
3729 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3730 be NULL_RTX which indicates that only a comparison is to be generated. */
3733 emit_cmp_and_jump_insn_1 (rtx x, rtx y, enum machine_mode mode,
3734 enum rtx_code comparison, int unsignedp, rtx label)
3736 rtx test = gen_rtx_fmt_ee (comparison, mode, x, y);
3737 enum mode_class class = GET_MODE_CLASS (mode);
3738 enum machine_mode wider_mode = mode;
3740 /* Try combined insns first. */
3743 enum insn_code icode;
3744 PUT_MODE (test, wider_mode);
3748 icode = cbranch_optab->handlers[(int) wider_mode].insn_code;
3750 if (icode != CODE_FOR_nothing
3751 && (*insn_data[icode].operand[0].predicate) (test, wider_mode))
3753 x = prepare_operand (icode, x, 1, mode, wider_mode, unsignedp);
3754 y = prepare_operand (icode, y, 2, mode, wider_mode, unsignedp);
3755 emit_jump_insn (GEN_FCN (icode) (test, x, y, label));
3760 /* Handle some compares against zero. */
3761 icode = (int) tst_optab->handlers[(int) wider_mode].insn_code;
3762 if (y == CONST0_RTX (mode) && icode != CODE_FOR_nothing)
3764 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
3765 emit_insn (GEN_FCN (icode) (x));
3767 emit_jump_insn ((*bcc_gen_fctn[(int) comparison]) (label));
3771 /* Handle compares for which there is a directly suitable insn. */
3773 icode = (int) cmp_optab->handlers[(int) wider_mode].insn_code;
3774 if (icode != CODE_FOR_nothing)
3776 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
3777 y = prepare_operand (icode, y, 1, mode, wider_mode, unsignedp);
3778 emit_insn (GEN_FCN (icode) (x, y));
3780 emit_jump_insn ((*bcc_gen_fctn[(int) comparison]) (label));
3784 if (class != MODE_INT && class != MODE_FLOAT
3785 && class != MODE_COMPLEX_FLOAT)
3788 wider_mode = GET_MODE_WIDER_MODE (wider_mode);
3790 while (wider_mode != VOIDmode);
3795 /* Generate code to compare X with Y so that the condition codes are
3796 set and to jump to LABEL if the condition is true. If X is a
3797 constant and Y is not a constant, then the comparison is swapped to
3798 ensure that the comparison RTL has the canonical form.
3800 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3801 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
3802 the proper branch condition code.
3804 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
3806 MODE is the mode of the inputs (in case they are const_int).
3808 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
3809 be passed unchanged to emit_cmp_insn, then potentially converted into an
3810 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
3813 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
3814 enum machine_mode mode, int unsignedp, rtx label)
3816 rtx op0 = x, op1 = y;
3818 /* Swap operands and condition to ensure canonical RTL. */
3819 if (swap_commutative_operands_p (x, y))
3821 /* If we're not emitting a branch, this means some caller
3827 comparison = swap_condition (comparison);
3831 /* If OP0 is still a constant, then both X and Y must be constants. Force
3832 X into a register to avoid aborting in emit_cmp_insn due to non-canonical
3834 if (CONSTANT_P (op0))
3835 op0 = force_reg (mode, op0);
3840 comparison = unsigned_condition (comparison);
3842 prepare_cmp_insn (&op0, &op1, &comparison, size, &mode, &unsignedp,
3844 emit_cmp_and_jump_insn_1 (op0, op1, mode, comparison, unsignedp, label);
3847 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
3850 emit_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
3851 enum machine_mode mode, int unsignedp)
3853 emit_cmp_and_jump_insns (x, y, comparison, size, mode, unsignedp, 0);
3856 /* Emit a library call comparison between floating point X and Y.
3857 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
3860 prepare_float_lib_cmp (rtx *px, rtx *py, enum rtx_code *pcomparison,
3861 enum machine_mode *pmode, int *punsignedp)
3863 enum rtx_code comparison = *pcomparison;
3864 enum rtx_code swapped = swap_condition (comparison);
3865 rtx x = protect_from_queue (*px, 0);
3866 rtx y = protect_from_queue (*py, 0);
3867 enum machine_mode orig_mode = GET_MODE (x);
3868 enum machine_mode mode;
3869 rtx value, target, insns, equiv;
3872 for (mode = orig_mode; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
3874 if ((libfunc = code_to_optab[comparison]->handlers[mode].libfunc))
3877 if ((libfunc = code_to_optab[swapped]->handlers[mode].libfunc))
3880 tmp = x; x = y; y = tmp;
3881 comparison = swapped;
3886 if (mode == VOIDmode)
3889 if (mode != orig_mode)
3891 x = convert_to_mode (mode, x, 0);
3892 y = convert_to_mode (mode, y, 0);
3895 /* Attach a REG_EQUAL note describing the semantics of the libcall to
3896 the RTL. The allows the RTL optimizers to delete the libcall if the
3897 condition can be determined at compile-time. */
3898 if (comparison == UNORDERED)
3900 rtx temp = simplify_gen_relational (NE, word_mode, mode, x, x);
3901 equiv = simplify_gen_relational (NE, word_mode, mode, y, y);
3902 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
3903 temp, const_true_rtx, equiv);
3907 equiv = simplify_gen_relational (comparison, word_mode, mode, x, y);
3908 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
3910 rtx true_rtx, false_rtx;
3915 true_rtx = const0_rtx;
3916 false_rtx = const_true_rtx;
3920 true_rtx = const_true_rtx;
3921 false_rtx = const0_rtx;
3925 true_rtx = const1_rtx;
3926 false_rtx = const0_rtx;
3930 true_rtx = const0_rtx;
3931 false_rtx = constm1_rtx;
3935 true_rtx = constm1_rtx;
3936 false_rtx = const0_rtx;
3940 true_rtx = const0_rtx;
3941 false_rtx = const1_rtx;
3947 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
3948 equiv, true_rtx, false_rtx);
3953 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
3954 word_mode, 2, x, mode, y, mode);
3955 insns = get_insns ();
3958 target = gen_reg_rtx (word_mode);
3959 emit_libcall_block (insns, target, value, equiv);
3962 if (comparison == UNORDERED
3963 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
3969 *pcomparison = comparison;
3973 /* Generate code to indirectly jump to a location given in the rtx LOC. */
3976 emit_indirect_jump (rtx loc)
3978 if (! ((*insn_data[(int) CODE_FOR_indirect_jump].operand[0].predicate)
3980 loc = copy_to_mode_reg (Pmode, loc);
3982 emit_jump_insn (gen_indirect_jump (loc));
3986 #ifdef HAVE_conditional_move
3988 /* Emit a conditional move instruction if the machine supports one for that
3989 condition and machine mode.
3991 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3992 the mode to use should they be constants. If it is VOIDmode, they cannot
3995 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
3996 should be stored there. MODE is the mode to use should they be constants.
3997 If it is VOIDmode, they cannot both be constants.
3999 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4000 is not supported. */
4003 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
4004 enum machine_mode cmode, rtx op2, rtx op3,
4005 enum machine_mode mode, int unsignedp)
4007 rtx tem, subtarget, comparison, insn;
4008 enum insn_code icode;
4009 enum rtx_code reversed;
4011 /* If one operand is constant, make it the second one. Only do this
4012 if the other operand is not constant as well. */
4014 if (swap_commutative_operands_p (op0, op1))
4019 code = swap_condition (code);
4022 /* get_condition will prefer to generate LT and GT even if the old
4023 comparison was against zero, so undo that canonicalization here since
4024 comparisons against zero are cheaper. */
4025 if (code == LT && op1 == const1_rtx)
4026 code = LE, op1 = const0_rtx;
4027 else if (code == GT && op1 == constm1_rtx)
4028 code = GE, op1 = const0_rtx;
4030 if (cmode == VOIDmode)
4031 cmode = GET_MODE (op0);
4033 if (swap_commutative_operands_p (op2, op3)
4034 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4043 if (mode == VOIDmode)
4044 mode = GET_MODE (op2);
4046 icode = movcc_gen_code[mode];
4048 if (icode == CODE_FOR_nothing)
4053 op2 = force_not_mem (op2);
4054 op3 = force_not_mem (op3);
4058 target = protect_from_queue (target, 1);
4060 target = gen_reg_rtx (mode);
4066 op2 = protect_from_queue (op2, 0);
4067 op3 = protect_from_queue (op3, 0);
4069 /* If the insn doesn't accept these operands, put them in pseudos. */
4071 if (! (*insn_data[icode].operand[0].predicate)
4072 (subtarget, insn_data[icode].operand[0].mode))
4073 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4075 if (! (*insn_data[icode].operand[2].predicate)
4076 (op2, insn_data[icode].operand[2].mode))
4077 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4079 if (! (*insn_data[icode].operand[3].predicate)
4080 (op3, insn_data[icode].operand[3].mode))
4081 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4083 /* Everything should now be in the suitable form, so emit the compare insn
4084 and then the conditional move. */
4087 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4089 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4090 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4091 return NULL and let the caller figure out how best to deal with this
4093 if (GET_CODE (comparison) != code)
4096 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4098 /* If that failed, then give up. */
4104 if (subtarget != target)
4105 convert_move (target, subtarget, 0);
4110 /* Return nonzero if a conditional move of mode MODE is supported.
4112 This function is for combine so it can tell whether an insn that looks
4113 like a conditional move is actually supported by the hardware. If we
4114 guess wrong we lose a bit on optimization, but that's it. */
4115 /* ??? sparc64 supports conditionally moving integers values based on fp
4116 comparisons, and vice versa. How do we handle them? */
4119 can_conditionally_move_p (enum machine_mode mode)
4121 if (movcc_gen_code[mode] != CODE_FOR_nothing)
4127 #endif /* HAVE_conditional_move */
4129 /* Emit a conditional addition instruction if the machine supports one for that
4130 condition and machine mode.
4132 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4133 the mode to use should they be constants. If it is VOIDmode, they cannot
4136 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
4137 should be stored there. MODE is the mode to use should they be constants.
4138 If it is VOIDmode, they cannot both be constants.
4140 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4141 is not supported. */
4144 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
4145 enum machine_mode cmode, rtx op2, rtx op3,
4146 enum machine_mode mode, int unsignedp)
4148 rtx tem, subtarget, comparison, insn;
4149 enum insn_code icode;
4150 enum rtx_code reversed;
4152 /* If one operand is constant, make it the second one. Only do this
4153 if the other operand is not constant as well. */
4155 if (swap_commutative_operands_p (op0, op1))
4160 code = swap_condition (code);
4163 /* get_condition will prefer to generate LT and GT even if the old
4164 comparison was against zero, so undo that canonicalization here since
4165 comparisons against zero are cheaper. */
4166 if (code == LT && op1 == const1_rtx)
4167 code = LE, op1 = const0_rtx;
4168 else if (code == GT && op1 == constm1_rtx)
4169 code = GE, op1 = const0_rtx;
4171 if (cmode == VOIDmode)
4172 cmode = GET_MODE (op0);
4174 if (swap_commutative_operands_p (op2, op3)
4175 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4184 if (mode == VOIDmode)
4185 mode = GET_MODE (op2);
4187 icode = addcc_optab->handlers[(int) mode].insn_code;
4189 if (icode == CODE_FOR_nothing)
4194 op2 = force_not_mem (op2);
4195 op3 = force_not_mem (op3);
4199 target = protect_from_queue (target, 1);
4201 target = gen_reg_rtx (mode);
4207 op2 = protect_from_queue (op2, 0);
4208 op3 = protect_from_queue (op3, 0);
4210 /* If the insn doesn't accept these operands, put them in pseudos. */
4212 if (! (*insn_data[icode].operand[0].predicate)
4213 (subtarget, insn_data[icode].operand[0].mode))
4214 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4216 if (! (*insn_data[icode].operand[2].predicate)
4217 (op2, insn_data[icode].operand[2].mode))
4218 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4220 if (! (*insn_data[icode].operand[3].predicate)
4221 (op3, insn_data[icode].operand[3].mode))
4222 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4224 /* Everything should now be in the suitable form, so emit the compare insn
4225 and then the conditional move. */
4228 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4230 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4231 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4232 return NULL and let the caller figure out how best to deal with this
4234 if (GET_CODE (comparison) != code)
4237 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4239 /* If that failed, then give up. */
4245 if (subtarget != target)
4246 convert_move (target, subtarget, 0);
4251 /* These functions attempt to generate an insn body, rather than
4252 emitting the insn, but if the gen function already emits them, we
4253 make no attempt to turn them back into naked patterns.
4255 They do not protect from queued increments,
4256 because they may be used 1) in protect_from_queue itself
4257 and 2) in other passes where there is no queue. */
4259 /* Generate and return an insn body to add Y to X. */
4262 gen_add2_insn (rtx x, rtx y)
4264 int icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4266 if (! ((*insn_data[icode].operand[0].predicate)
4267 (x, insn_data[icode].operand[0].mode))
4268 || ! ((*insn_data[icode].operand[1].predicate)
4269 (x, insn_data[icode].operand[1].mode))
4270 || ! ((*insn_data[icode].operand[2].predicate)
4271 (y, insn_data[icode].operand[2].mode)))
4274 return (GEN_FCN (icode) (x, x, y));
4277 /* Generate and return an insn body to add r1 and c,
4278 storing the result in r0. */
4280 gen_add3_insn (rtx r0, rtx r1, rtx c)
4282 int icode = (int) add_optab->handlers[(int) GET_MODE (r0)].insn_code;
4284 if (icode == CODE_FOR_nothing
4285 || ! ((*insn_data[icode].operand[0].predicate)
4286 (r0, insn_data[icode].operand[0].mode))
4287 || ! ((*insn_data[icode].operand[1].predicate)
4288 (r1, insn_data[icode].operand[1].mode))
4289 || ! ((*insn_data[icode].operand[2].predicate)
4290 (c, insn_data[icode].operand[2].mode)))
4293 return (GEN_FCN (icode) (r0, r1, c));
4297 have_add2_insn (rtx x, rtx y)
4301 if (GET_MODE (x) == VOIDmode)
4304 icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4306 if (icode == CODE_FOR_nothing)
4309 if (! ((*insn_data[icode].operand[0].predicate)
4310 (x, insn_data[icode].operand[0].mode))
4311 || ! ((*insn_data[icode].operand[1].predicate)
4312 (x, insn_data[icode].operand[1].mode))
4313 || ! ((*insn_data[icode].operand[2].predicate)
4314 (y, insn_data[icode].operand[2].mode)))
4320 /* Generate and return an insn body to subtract Y from X. */
4323 gen_sub2_insn (rtx x, rtx y)
4325 int icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4327 if (! ((*insn_data[icode].operand[0].predicate)
4328 (x, insn_data[icode].operand[0].mode))
4329 || ! ((*insn_data[icode].operand[1].predicate)
4330 (x, insn_data[icode].operand[1].mode))
4331 || ! ((*insn_data[icode].operand[2].predicate)
4332 (y, insn_data[icode].operand[2].mode)))
4335 return (GEN_FCN (icode) (x, x, y));
4338 /* Generate and return an insn body to subtract r1 and c,
4339 storing the result in r0. */
4341 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4343 int icode = (int) sub_optab->handlers[(int) GET_MODE (r0)].insn_code;
4345 if (icode == CODE_FOR_nothing
4346 || ! ((*insn_data[icode].operand[0].predicate)
4347 (r0, insn_data[icode].operand[0].mode))
4348 || ! ((*insn_data[icode].operand[1].predicate)
4349 (r1, insn_data[icode].operand[1].mode))
4350 || ! ((*insn_data[icode].operand[2].predicate)
4351 (c, insn_data[icode].operand[2].mode)))
4354 return (GEN_FCN (icode) (r0, r1, c));
4358 have_sub2_insn (rtx x, rtx y)
4362 if (GET_MODE (x) == VOIDmode)
4365 icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4367 if (icode == CODE_FOR_nothing)
4370 if (! ((*insn_data[icode].operand[0].predicate)
4371 (x, insn_data[icode].operand[0].mode))
4372 || ! ((*insn_data[icode].operand[1].predicate)
4373 (x, insn_data[icode].operand[1].mode))
4374 || ! ((*insn_data[icode].operand[2].predicate)
4375 (y, insn_data[icode].operand[2].mode)))
4381 /* Generate the body of an instruction to copy Y into X.
4382 It may be a list of insns, if one insn isn't enough. */
4385 gen_move_insn (rtx x, rtx y)
4390 emit_move_insn_1 (x, y);
4396 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4397 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4398 no such operation exists, CODE_FOR_nothing will be returned. */
4401 can_extend_p (enum machine_mode to_mode, enum machine_mode from_mode,
4405 #ifdef HAVE_ptr_extend
4407 return CODE_FOR_ptr_extend;
4410 tab = unsignedp ? zext_optab : sext_optab;
4411 return tab->handlers[to_mode][from_mode].insn_code;
4414 /* Generate the body of an insn to extend Y (with mode MFROM)
4415 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4418 gen_extend_insn (rtx x, rtx y, enum machine_mode mto,
4419 enum machine_mode mfrom, int unsignedp)
4421 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
4422 return GEN_FCN (icode) (x, y);
4425 /* can_fix_p and can_float_p say whether the target machine
4426 can directly convert a given fixed point type to
4427 a given floating point type, or vice versa.
4428 The returned value is the CODE_FOR_... value to use,
4429 or CODE_FOR_nothing if these modes cannot be directly converted.
4431 *TRUNCP_PTR is set to 1 if it is necessary to output
4432 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4434 static enum insn_code
4435 can_fix_p (enum machine_mode fixmode, enum machine_mode fltmode,
4436 int unsignedp, int *truncp_ptr)
4439 enum insn_code icode;
4441 tab = unsignedp ? ufixtrunc_optab : sfixtrunc_optab;
4442 icode = tab->handlers[fixmode][fltmode].insn_code;
4443 if (icode != CODE_FOR_nothing)
4449 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4450 for this to work. We need to rework the fix* and ftrunc* patterns
4451 and documentation. */
4452 tab = unsignedp ? ufix_optab : sfix_optab;
4453 icode = tab->handlers[fixmode][fltmode].insn_code;
4454 if (icode != CODE_FOR_nothing
4455 && ftrunc_optab->handlers[fltmode].insn_code != CODE_FOR_nothing)
4462 return CODE_FOR_nothing;
4465 static enum insn_code
4466 can_float_p (enum machine_mode fltmode, enum machine_mode fixmode,
4471 tab = unsignedp ? ufloat_optab : sfloat_optab;
4472 return tab->handlers[fltmode][fixmode].insn_code;
4475 /* Generate code to convert FROM to floating point
4476 and store in TO. FROM must be fixed point and not VOIDmode.
4477 UNSIGNEDP nonzero means regard FROM as unsigned.
4478 Normally this is done by correcting the final value
4479 if it is negative. */
4482 expand_float (rtx to, rtx from, int unsignedp)
4484 enum insn_code icode;
4486 enum machine_mode fmode, imode;
4488 /* Crash now, because we won't be able to decide which mode to use. */
4489 if (GET_MODE (from) == VOIDmode)
4492 /* Look for an insn to do the conversion. Do it in the specified
4493 modes if possible; otherwise convert either input, output or both to
4494 wider mode. If the integer mode is wider than the mode of FROM,
4495 we can do the conversion signed even if the input is unsigned. */
4497 for (fmode = GET_MODE (to); fmode != VOIDmode;
4498 fmode = GET_MODE_WIDER_MODE (fmode))
4499 for (imode = GET_MODE (from); imode != VOIDmode;
4500 imode = GET_MODE_WIDER_MODE (imode))
4502 int doing_unsigned = unsignedp;
4504 if (fmode != GET_MODE (to)
4505 && significand_size (fmode) < GET_MODE_BITSIZE (GET_MODE (from)))
4508 icode = can_float_p (fmode, imode, unsignedp);
4509 if (icode == CODE_FOR_nothing && imode != GET_MODE (from) && unsignedp)
4510 icode = can_float_p (fmode, imode, 0), doing_unsigned = 0;
4512 if (icode != CODE_FOR_nothing)
4514 to = protect_from_queue (to, 1);
4515 from = protect_from_queue (from, 0);
4517 if (imode != GET_MODE (from))
4518 from = convert_to_mode (imode, from, unsignedp);
4520 if (fmode != GET_MODE (to))
4521 target = gen_reg_rtx (fmode);
4523 emit_unop_insn (icode, target, from,
4524 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
4527 convert_move (to, target, 0);
4532 /* Unsigned integer, and no way to convert directly.
4533 Convert as signed, then conditionally adjust the result. */
4536 rtx label = gen_label_rtx ();
4538 REAL_VALUE_TYPE offset;
4542 to = protect_from_queue (to, 1);
4543 from = protect_from_queue (from, 0);
4546 from = force_not_mem (from);
4548 /* Look for a usable floating mode FMODE wider than the source and at
4549 least as wide as the target. Using FMODE will avoid rounding woes
4550 with unsigned values greater than the signed maximum value. */
4552 for (fmode = GET_MODE (to); fmode != VOIDmode;
4553 fmode = GET_MODE_WIDER_MODE (fmode))
4554 if (GET_MODE_BITSIZE (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
4555 && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
4558 if (fmode == VOIDmode)
4560 /* There is no such mode. Pretend the target is wide enough. */
4561 fmode = GET_MODE (to);
4563 /* Avoid double-rounding when TO is narrower than FROM. */
4564 if ((significand_size (fmode) + 1)
4565 < GET_MODE_BITSIZE (GET_MODE (from)))
4568 rtx neglabel = gen_label_rtx ();
4570 /* Don't use TARGET if it isn't a register, is a hard register,
4571 or is the wrong mode. */
4572 if (GET_CODE (target) != REG
4573 || REGNO (target) < FIRST_PSEUDO_REGISTER
4574 || GET_MODE (target) != fmode)
4575 target = gen_reg_rtx (fmode);
4577 imode = GET_MODE (from);
4578 do_pending_stack_adjust ();
4580 /* Test whether the sign bit is set. */
4581 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
4584 /* The sign bit is not set. Convert as signed. */
4585 expand_float (target, from, 0);
4586 emit_jump_insn (gen_jump (label));
4589 /* The sign bit is set.
4590 Convert to a usable (positive signed) value by shifting right
4591 one bit, while remembering if a nonzero bit was shifted
4592 out; i.e., compute (from & 1) | (from >> 1). */
4594 emit_label (neglabel);
4595 temp = expand_binop (imode, and_optab, from, const1_rtx,
4596 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4597 temp1 = expand_shift (RSHIFT_EXPR, imode, from, integer_one_node,
4599 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
4601 expand_float (target, temp, 0);
4603 /* Multiply by 2 to undo the shift above. */
4604 temp = expand_binop (fmode, add_optab, target, target,
4605 target, 0, OPTAB_LIB_WIDEN);
4607 emit_move_insn (target, temp);
4609 do_pending_stack_adjust ();
4615 /* If we are about to do some arithmetic to correct for an
4616 unsigned operand, do it in a pseudo-register. */
4618 if (GET_MODE (to) != fmode
4619 || GET_CODE (to) != REG || REGNO (to) < FIRST_PSEUDO_REGISTER)
4620 target = gen_reg_rtx (fmode);
4622 /* Convert as signed integer to floating. */
4623 expand_float (target, from, 0);
4625 /* If FROM is negative (and therefore TO is negative),
4626 correct its value by 2**bitwidth. */
4628 do_pending_stack_adjust ();
4629 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from),
4633 real_2expN (&offset, GET_MODE_BITSIZE (GET_MODE (from)));
4634 temp = expand_binop (fmode, add_optab, target,
4635 CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode),
4636 target, 0, OPTAB_LIB_WIDEN);
4638 emit_move_insn (target, temp);
4640 do_pending_stack_adjust ();
4645 /* No hardware instruction available; call a library routine. */
4650 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
4652 to = protect_from_queue (to, 1);
4653 from = protect_from_queue (from, 0);
4655 if (GET_MODE_SIZE (GET_MODE (from)) < GET_MODE_SIZE (SImode))
4656 from = convert_to_mode (SImode, from, unsignedp);
4659 from = force_not_mem (from);
4661 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
4667 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4668 GET_MODE (to), 1, from,
4670 insns = get_insns ();
4673 emit_libcall_block (insns, target, value,
4674 gen_rtx_FLOAT (GET_MODE (to), from));
4679 /* Copy result to requested destination
4680 if we have been computing in a temp location. */
4684 if (GET_MODE (target) == GET_MODE (to))
4685 emit_move_insn (to, target);
4687 convert_move (to, target, 0);
4691 /* Generate code to convert FROM to fixed point and store in TO. FROM
4692 must be floating point. */
4695 expand_fix (rtx to, rtx from, int unsignedp)
4697 enum insn_code icode;
4699 enum machine_mode fmode, imode;
4702 /* We first try to find a pair of modes, one real and one integer, at
4703 least as wide as FROM and TO, respectively, in which we can open-code
4704 this conversion. If the integer mode is wider than the mode of TO,
4705 we can do the conversion either signed or unsigned. */
4707 for (fmode = GET_MODE (from); fmode != VOIDmode;
4708 fmode = GET_MODE_WIDER_MODE (fmode))
4709 for (imode = GET_MODE (to); imode != VOIDmode;
4710 imode = GET_MODE_WIDER_MODE (imode))
4712 int doing_unsigned = unsignedp;
4714 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
4715 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
4716 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
4718 if (icode != CODE_FOR_nothing)
4720 to = protect_from_queue (to, 1);
4721 from = protect_from_queue (from, 0);
4723 if (fmode != GET_MODE (from))
4724 from = convert_to_mode (fmode, from, 0);
4728 rtx temp = gen_reg_rtx (GET_MODE (from));
4729 from = expand_unop (GET_MODE (from), ftrunc_optab, from,
4733 if (imode != GET_MODE (to))
4734 target = gen_reg_rtx (imode);
4736 emit_unop_insn (icode, target, from,
4737 doing_unsigned ? UNSIGNED_FIX : FIX);
4739 convert_move (to, target, unsignedp);
4744 /* For an unsigned conversion, there is one more way to do it.
4745 If we have a signed conversion, we generate code that compares
4746 the real value to the largest representable positive number. If if
4747 is smaller, the conversion is done normally. Otherwise, subtract
4748 one plus the highest signed number, convert, and add it back.
4750 We only need to check all real modes, since we know we didn't find
4751 anything with a wider integer mode.
4753 This code used to extend FP value into mode wider than the destination.
4754 This is not needed. Consider, for instance conversion from SFmode
4757 The hot path trought the code is dealing with inputs smaller than 2^63
4758 and doing just the conversion, so there is no bits to lose.
4760 In the other path we know the value is positive in the range 2^63..2^64-1
4761 inclusive. (as for other imput overflow happens and result is undefined)
4762 So we know that the most important bit set in mantissa corresponds to
4763 2^63. The subtraction of 2^63 should not generate any rounding as it
4764 simply clears out that bit. The rest is trivial. */
4766 if (unsignedp && GET_MODE_BITSIZE (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
4767 for (fmode = GET_MODE (from); fmode != VOIDmode;
4768 fmode = GET_MODE_WIDER_MODE (fmode))
4769 if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0,
4773 REAL_VALUE_TYPE offset;
4774 rtx limit, lab1, lab2, insn;
4776 bitsize = GET_MODE_BITSIZE (GET_MODE (to));
4777 real_2expN (&offset, bitsize - 1);
4778 limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode);
4779 lab1 = gen_label_rtx ();
4780 lab2 = gen_label_rtx ();
4783 to = protect_from_queue (to, 1);
4784 from = protect_from_queue (from, 0);
4787 from = force_not_mem (from);
4789 if (fmode != GET_MODE (from))
4790 from = convert_to_mode (fmode, from, 0);
4792 /* See if we need to do the subtraction. */
4793 do_pending_stack_adjust ();
4794 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from),
4797 /* If not, do the signed "fix" and branch around fixup code. */
4798 expand_fix (to, from, 0);
4799 emit_jump_insn (gen_jump (lab2));
4802 /* Otherwise, subtract 2**(N-1), convert to signed number,
4803 then add 2**(N-1). Do the addition using XOR since this
4804 will often generate better code. */
4806 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
4807 NULL_RTX, 0, OPTAB_LIB_WIDEN);
4808 expand_fix (to, target, 0);
4809 target = expand_binop (GET_MODE (to), xor_optab, to,
4811 ((HOST_WIDE_INT) 1 << (bitsize - 1),
4813 to, 1, OPTAB_LIB_WIDEN);
4816 emit_move_insn (to, target);
4820 if (mov_optab->handlers[(int) GET_MODE (to)].insn_code
4821 != CODE_FOR_nothing)
4823 /* Make a place for a REG_NOTE and add it. */
4824 insn = emit_move_insn (to, to);
4825 set_unique_reg_note (insn,
4827 gen_rtx_fmt_e (UNSIGNED_FIX,
4835 /* We can't do it with an insn, so use a library call. But first ensure
4836 that the mode of TO is at least as wide as SImode, since those are the
4837 only library calls we know about. */
4839 if (GET_MODE_SIZE (GET_MODE (to)) < GET_MODE_SIZE (SImode))
4841 target = gen_reg_rtx (SImode);
4843 expand_fix (target, from, unsignedp);
4851 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
4852 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
4856 to = protect_from_queue (to, 1);
4857 from = protect_from_queue (from, 0);
4860 from = force_not_mem (from);
4864 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4865 GET_MODE (to), 1, from,
4867 insns = get_insns ();
4870 emit_libcall_block (insns, target, value,
4871 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
4872 GET_MODE (to), from));
4877 if (GET_MODE (to) == GET_MODE (target))
4878 emit_move_insn (to, target);
4880 convert_move (to, target, 0);
4884 /* Report whether we have an instruction to perform the operation
4885 specified by CODE on operands of mode MODE. */
4887 have_insn_for (enum rtx_code code, enum machine_mode mode)
4889 return (code_to_optab[(int) code] != 0
4890 && (code_to_optab[(int) code]->handlers[(int) mode].insn_code
4891 != CODE_FOR_nothing));
4894 /* Create a blank optab. */
4899 optab op = ggc_alloc (sizeof (struct optab));
4900 for (i = 0; i < NUM_MACHINE_MODES; i++)
4902 op->handlers[i].insn_code = CODE_FOR_nothing;
4903 op->handlers[i].libfunc = 0;
4909 static convert_optab
4910 new_convert_optab (void)
4913 convert_optab op = ggc_alloc (sizeof (struct convert_optab));
4914 for (i = 0; i < NUM_MACHINE_MODES; i++)
4915 for (j = 0; j < NUM_MACHINE_MODES; j++)
4917 op->handlers[i][j].insn_code = CODE_FOR_nothing;
4918 op->handlers[i][j].libfunc = 0;
4923 /* Same, but fill in its code as CODE, and write it into the
4924 code_to_optab table. */
4926 init_optab (enum rtx_code code)
4928 optab op = new_optab ();
4930 code_to_optab[(int) code] = op;
4934 /* Same, but fill in its code as CODE, and do _not_ write it into
4935 the code_to_optab table. */
4937 init_optabv (enum rtx_code code)
4939 optab op = new_optab ();
4944 /* Conversion optabs never go in the code_to_optab table. */
4945 static inline convert_optab
4946 init_convert_optab (enum rtx_code code)
4948 convert_optab op = new_convert_optab ();
4953 /* Initialize the libfunc fields of an entire group of entries in some
4954 optab. Each entry is set equal to a string consisting of a leading
4955 pair of underscores followed by a generic operation name followed by
4956 a mode name (downshifted to lowercase) followed by a single character
4957 representing the number of operands for the given operation (which is
4958 usually one of the characters '2', '3', or '4').
4960 OPTABLE is the table in which libfunc fields are to be initialized.
4961 FIRST_MODE is the first machine mode index in the given optab to
4963 LAST_MODE is the last machine mode index in the given optab to
4965 OPNAME is the generic (string) name of the operation.
4966 SUFFIX is the character which specifies the number of operands for
4967 the given generic operation.
4971 init_libfuncs (optab optable, int first_mode, int last_mode,
4972 const char *opname, int suffix)
4975 unsigned opname_len = strlen (opname);
4977 for (mode = first_mode; (int) mode <= (int) last_mode;
4978 mode = (enum machine_mode) ((int) mode + 1))
4980 const char *mname = GET_MODE_NAME (mode);
4981 unsigned mname_len = strlen (mname);
4982 char *libfunc_name = alloca (2 + opname_len + mname_len + 1 + 1);
4989 for (q = opname; *q; )
4991 for (q = mname; *q; q++)
4992 *p++ = TOLOWER (*q);
4996 optable->handlers[(int) mode].libfunc
4997 = init_one_libfunc (ggc_alloc_string (libfunc_name, p - libfunc_name));
5001 /* Initialize the libfunc fields of an entire group of entries in some
5002 optab which correspond to all integer mode operations. The parameters
5003 have the same meaning as similarly named ones for the `init_libfuncs'
5004 routine. (See above). */
5007 init_integral_libfuncs (optab optable, const char *opname, int suffix)
5009 int maxsize = 2*BITS_PER_WORD;
5010 if (maxsize < LONG_LONG_TYPE_SIZE)
5011 maxsize = LONG_LONG_TYPE_SIZE;
5012 init_libfuncs (optable, word_mode,
5013 mode_for_size (maxsize, MODE_INT, 0),
5017 /* Initialize the libfunc fields of an entire group of entries in some
5018 optab which correspond to all real mode operations. The parameters
5019 have the same meaning as similarly named ones for the `init_libfuncs'
5020 routine. (See above). */
5023 init_floating_libfuncs (optab optable, const char *opname, int suffix)
5025 init_libfuncs (optable, MIN_MODE_FLOAT, MAX_MODE_FLOAT, opname, suffix);
5028 /* Initialize the libfunc fields of an entire group of entries of an
5029 inter-mode-class conversion optab. The string formation rules are
5030 similar to the ones for init_libfuncs, above, but instead of having
5031 a mode name and an operand count these functions have two mode names
5032 and no operand count. */
5034 init_interclass_conv_libfuncs (convert_optab tab, const char *opname,
5035 enum mode_class from_class,
5036 enum mode_class to_class)
5038 enum machine_mode first_from_mode = GET_CLASS_NARROWEST_MODE (from_class);
5039 enum machine_mode first_to_mode = GET_CLASS_NARROWEST_MODE (to_class);
5040 size_t opname_len = strlen (opname);
5041 size_t max_mname_len = 0;
5043 enum machine_mode fmode, tmode;
5044 const char *fname, *tname;
5046 char *libfunc_name, *suffix;
5049 for (fmode = first_from_mode;
5051 fmode = GET_MODE_WIDER_MODE (fmode))
5052 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (fmode)));
5054 for (tmode = first_to_mode;
5056 tmode = GET_MODE_WIDER_MODE (tmode))
5057 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (tmode)));
5059 libfunc_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
5060 libfunc_name[0] = '_';
5061 libfunc_name[1] = '_';
5062 memcpy (&libfunc_name[2], opname, opname_len);
5063 suffix = libfunc_name + opname_len + 2;
5065 for (fmode = first_from_mode; fmode != VOIDmode;
5066 fmode = GET_MODE_WIDER_MODE (fmode))
5067 for (tmode = first_to_mode; tmode != VOIDmode;
5068 tmode = GET_MODE_WIDER_MODE (tmode))
5070 fname = GET_MODE_NAME (fmode);
5071 tname = GET_MODE_NAME (tmode);
5074 for (q = fname; *q; p++, q++)
5076 for (q = tname; *q; p++, q++)
5081 tab->handlers[tmode][fmode].libfunc
5082 = init_one_libfunc (ggc_alloc_string (libfunc_name,
5087 /* Initialize the libfunc fields of an entire group of entries of an
5088 intra-mode-class conversion optab. The string formation rules are
5089 similar to the ones for init_libfunc, above. WIDENING says whether
5090 the optab goes from narrow to wide modes or vice versa. These functions
5091 have two mode names _and_ an operand count. */
5093 init_intraclass_conv_libfuncs (convert_optab tab, const char *opname,
5094 enum mode_class class, bool widening)
5096 enum machine_mode first_mode = GET_CLASS_NARROWEST_MODE (class);
5097 size_t opname_len = strlen (opname);
5098 size_t max_mname_len = 0;
5100 enum machine_mode nmode, wmode;
5101 const char *nname, *wname;
5103 char *libfunc_name, *suffix;
5106 for (nmode = first_mode; nmode != VOIDmode;
5107 nmode = GET_MODE_WIDER_MODE (nmode))
5108 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (nmode)));
5110 libfunc_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
5111 libfunc_name[0] = '_';
5112 libfunc_name[1] = '_';
5113 memcpy (&libfunc_name[2], opname, opname_len);
5114 suffix = libfunc_name + opname_len + 2;
5116 for (nmode = first_mode; nmode != VOIDmode;
5117 nmode = GET_MODE_WIDER_MODE (nmode))
5118 for (wmode = GET_MODE_WIDER_MODE (nmode); wmode != VOIDmode;
5119 wmode = GET_MODE_WIDER_MODE (wmode))
5121 nname = GET_MODE_NAME (nmode);
5122 wname = GET_MODE_NAME (wmode);
5125 for (q = widening ? nname : wname; *q; p++, q++)
5127 for (q = widening ? wname : nname; *q; p++, q++)
5133 tab->handlers[widening ? wmode : nmode]
5134 [widening ? nmode : wmode].libfunc
5135 = init_one_libfunc (ggc_alloc_string (libfunc_name,
5142 init_one_libfunc (const char *name)
5146 /* Create a FUNCTION_DECL that can be passed to
5147 targetm.encode_section_info. */
5148 /* ??? We don't have any type information except for this is
5149 a function. Pretend this is "int foo()". */
5150 tree decl = build_decl (FUNCTION_DECL, get_identifier (name),
5151 build_function_type (integer_type_node, NULL_TREE));
5152 DECL_ARTIFICIAL (decl) = 1;
5153 DECL_EXTERNAL (decl) = 1;
5154 TREE_PUBLIC (decl) = 1;
5156 symbol = XEXP (DECL_RTL (decl), 0);
5158 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
5159 are the flags assigned by targetm.encode_section_info. */
5160 SYMBOL_REF_DECL (symbol) = 0;
5165 /* Call this to reset the function entry for one optab (OPTABLE) in mode
5166 MODE to NAME, which should be either 0 or a string constant. */
5168 set_optab_libfunc (optab optable, enum machine_mode mode, const char *name)
5171 optable->handlers[mode].libfunc = init_one_libfunc (name);
5173 optable->handlers[mode].libfunc = 0;
5176 /* Call this to reset the function entry for one conversion optab
5177 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
5178 either 0 or a string constant. */
5180 set_conv_libfunc (convert_optab optable, enum machine_mode tmode,
5181 enum machine_mode fmode, const char *name)
5184 optable->handlers[tmode][fmode].libfunc = init_one_libfunc (name);
5186 optable->handlers[tmode][fmode].libfunc = 0;
5189 /* Call this once to initialize the contents of the optabs
5190 appropriately for the current target machine. */
5197 /* Start by initializing all tables to contain CODE_FOR_nothing. */
5199 for (i = 0; i < NUM_RTX_CODE; i++)
5200 setcc_gen_code[i] = CODE_FOR_nothing;
5202 #ifdef HAVE_conditional_move
5203 for (i = 0; i < NUM_MACHINE_MODES; i++)
5204 movcc_gen_code[i] = CODE_FOR_nothing;
5207 add_optab = init_optab (PLUS);
5208 addv_optab = init_optabv (PLUS);
5209 sub_optab = init_optab (MINUS);
5210 subv_optab = init_optabv (MINUS);
5211 smul_optab = init_optab (MULT);
5212 smulv_optab = init_optabv (MULT);
5213 smul_highpart_optab = init_optab (UNKNOWN);
5214 umul_highpart_optab = init_optab (UNKNOWN);
5215 smul_widen_optab = init_optab (UNKNOWN);
5216 umul_widen_optab = init_optab (UNKNOWN);
5217 sdiv_optab = init_optab (DIV);
5218 sdivv_optab = init_optabv (DIV);
5219 sdivmod_optab = init_optab (UNKNOWN);
5220 udiv_optab = init_optab (UDIV);
5221 udivmod_optab = init_optab (UNKNOWN);
5222 smod_optab = init_optab (MOD);
5223 umod_optab = init_optab (UMOD);
5224 ftrunc_optab = init_optab (UNKNOWN);
5225 and_optab = init_optab (AND);
5226 ior_optab = init_optab (IOR);
5227 xor_optab = init_optab (XOR);
5228 ashl_optab = init_optab (ASHIFT);
5229 ashr_optab = init_optab (ASHIFTRT);
5230 lshr_optab = init_optab (LSHIFTRT);
5231 rotl_optab = init_optab (ROTATE);
5232 rotr_optab = init_optab (ROTATERT);
5233 smin_optab = init_optab (SMIN);
5234 smax_optab = init_optab (SMAX);
5235 umin_optab = init_optab (UMIN);
5236 umax_optab = init_optab (UMAX);
5237 pow_optab = init_optab (UNKNOWN);
5238 atan2_optab = init_optab (UNKNOWN);
5240 /* These three have codes assigned exclusively for the sake of
5242 mov_optab = init_optab (SET);
5243 movstrict_optab = init_optab (STRICT_LOW_PART);
5244 cmp_optab = init_optab (COMPARE);
5246 ucmp_optab = init_optab (UNKNOWN);
5247 tst_optab = init_optab (UNKNOWN);
5249 eq_optab = init_optab (EQ);
5250 ne_optab = init_optab (NE);
5251 gt_optab = init_optab (GT);
5252 ge_optab = init_optab (GE);
5253 lt_optab = init_optab (LT);
5254 le_optab = init_optab (LE);
5255 unord_optab = init_optab (UNORDERED);
5257 neg_optab = init_optab (NEG);
5258 negv_optab = init_optabv (NEG);
5259 abs_optab = init_optab (ABS);
5260 absv_optab = init_optabv (ABS);
5261 addcc_optab = init_optab (UNKNOWN);
5262 one_cmpl_optab = init_optab (NOT);
5263 ffs_optab = init_optab (FFS);
5264 clz_optab = init_optab (CLZ);
5265 ctz_optab = init_optab (CTZ);
5266 popcount_optab = init_optab (POPCOUNT);
5267 parity_optab = init_optab (PARITY);
5268 sqrt_optab = init_optab (SQRT);
5269 floor_optab = init_optab (UNKNOWN);
5270 ceil_optab = init_optab (UNKNOWN);
5271 round_optab = init_optab (UNKNOWN);
5272 btrunc_optab = init_optab (UNKNOWN);
5273 nearbyint_optab = init_optab (UNKNOWN);
5274 sin_optab = init_optab (UNKNOWN);
5275 cos_optab = init_optab (UNKNOWN);
5276 exp_optab = init_optab (UNKNOWN);
5277 log_optab = init_optab (UNKNOWN);
5278 log10_optab = init_optab (UNKNOWN);
5279 log2_optab = init_optab (UNKNOWN);
5280 tan_optab = init_optab (UNKNOWN);
5281 atan_optab = init_optab (UNKNOWN);
5282 strlen_optab = init_optab (UNKNOWN);
5283 cbranch_optab = init_optab (UNKNOWN);
5284 cmov_optab = init_optab (UNKNOWN);
5285 cstore_optab = init_optab (UNKNOWN);
5286 push_optab = init_optab (UNKNOWN);
5288 vec_extract_optab = init_optab (UNKNOWN);
5289 vec_set_optab = init_optab (UNKNOWN);
5290 vec_init_optab = init_optab (UNKNOWN);
5292 sext_optab = init_convert_optab (SIGN_EXTEND);
5293 zext_optab = init_convert_optab (ZERO_EXTEND);
5294 trunc_optab = init_convert_optab (TRUNCATE);
5295 sfix_optab = init_convert_optab (FIX);
5296 ufix_optab = init_convert_optab (UNSIGNED_FIX);
5297 sfixtrunc_optab = init_convert_optab (UNKNOWN);
5298 ufixtrunc_optab = init_convert_optab (UNKNOWN);
5299 sfloat_optab = init_convert_optab (FLOAT);
5300 ufloat_optab = init_convert_optab (UNSIGNED_FLOAT);
5302 for (i = 0; i < NUM_MACHINE_MODES; i++)
5304 movstr_optab[i] = CODE_FOR_nothing;
5305 clrstr_optab[i] = CODE_FOR_nothing;
5306 cmpstr_optab[i] = CODE_FOR_nothing;
5307 cmpmem_optab[i] = CODE_FOR_nothing;
5309 #ifdef HAVE_SECONDARY_RELOADS
5310 reload_in_optab[i] = reload_out_optab[i] = CODE_FOR_nothing;
5314 /* Fill in the optabs with the insns we support. */
5317 /* Initialize the optabs with the names of the library functions. */
5318 init_integral_libfuncs (add_optab, "add", '3');
5319 init_floating_libfuncs (add_optab, "add", '3');
5320 init_integral_libfuncs (addv_optab, "addv", '3');
5321 init_floating_libfuncs (addv_optab, "add", '3');
5322 init_integral_libfuncs (sub_optab, "sub", '3');
5323 init_floating_libfuncs (sub_optab, "sub", '3');
5324 init_integral_libfuncs (subv_optab, "subv", '3');
5325 init_floating_libfuncs (subv_optab, "sub", '3');
5326 init_integral_libfuncs (smul_optab, "mul", '3');
5327 init_floating_libfuncs (smul_optab, "mul", '3');
5328 init_integral_libfuncs (smulv_optab, "mulv", '3');
5329 init_floating_libfuncs (smulv_optab, "mul", '3');
5330 init_integral_libfuncs (sdiv_optab, "div", '3');
5331 init_floating_libfuncs (sdiv_optab, "div", '3');
5332 init_integral_libfuncs (sdivv_optab, "divv", '3');
5333 init_integral_libfuncs (udiv_optab, "udiv", '3');
5334 init_integral_libfuncs (sdivmod_optab, "divmod", '4');
5335 init_integral_libfuncs (udivmod_optab, "udivmod", '4');
5336 init_integral_libfuncs (smod_optab, "mod", '3');
5337 init_integral_libfuncs (umod_optab, "umod", '3');
5338 init_floating_libfuncs (ftrunc_optab, "ftrunc", '2');
5339 init_integral_libfuncs (and_optab, "and", '3');
5340 init_integral_libfuncs (ior_optab, "ior", '3');
5341 init_integral_libfuncs (xor_optab, "xor", '3');
5342 init_integral_libfuncs (ashl_optab, "ashl", '3');
5343 init_integral_libfuncs (ashr_optab, "ashr", '3');
5344 init_integral_libfuncs (lshr_optab, "lshr", '3');
5345 init_integral_libfuncs (smin_optab, "min", '3');
5346 init_floating_libfuncs (smin_optab, "min", '3');
5347 init_integral_libfuncs (smax_optab, "max", '3');
5348 init_floating_libfuncs (smax_optab, "max", '3');
5349 init_integral_libfuncs (umin_optab, "umin", '3');
5350 init_integral_libfuncs (umax_optab, "umax", '3');
5351 init_integral_libfuncs (neg_optab, "neg", '2');
5352 init_floating_libfuncs (neg_optab, "neg", '2');
5353 init_integral_libfuncs (negv_optab, "negv", '2');
5354 init_floating_libfuncs (negv_optab, "neg", '2');
5355 init_integral_libfuncs (one_cmpl_optab, "one_cmpl", '2');
5356 init_integral_libfuncs (ffs_optab, "ffs", '2');
5357 init_integral_libfuncs (clz_optab, "clz", '2');
5358 init_integral_libfuncs (ctz_optab, "ctz", '2');
5359 init_integral_libfuncs (popcount_optab, "popcount", '2');
5360 init_integral_libfuncs (parity_optab, "parity", '2');
5362 /* Comparison libcalls for integers MUST come in pairs, signed/unsigned. */
5363 init_integral_libfuncs (cmp_optab, "cmp", '2');
5364 init_integral_libfuncs (ucmp_optab, "ucmp", '2');
5365 init_floating_libfuncs (cmp_optab, "cmp", '2');
5367 /* EQ etc are floating point only. */
5368 init_floating_libfuncs (eq_optab, "eq", '2');
5369 init_floating_libfuncs (ne_optab, "ne", '2');
5370 init_floating_libfuncs (gt_optab, "gt", '2');
5371 init_floating_libfuncs (ge_optab, "ge", '2');
5372 init_floating_libfuncs (lt_optab, "lt", '2');
5373 init_floating_libfuncs (le_optab, "le", '2');
5374 init_floating_libfuncs (unord_optab, "unord", '2');
5377 init_interclass_conv_libfuncs (sfloat_optab, "float", MODE_INT, MODE_FLOAT);
5378 init_interclass_conv_libfuncs (sfix_optab, "fix", MODE_FLOAT, MODE_INT);
5379 init_interclass_conv_libfuncs (ufix_optab, "fixuns", MODE_FLOAT, MODE_INT);
5381 /* sext_optab is also used for FLOAT_EXTEND. */
5382 init_intraclass_conv_libfuncs (sext_optab, "extend", MODE_FLOAT, true);
5383 init_intraclass_conv_libfuncs (trunc_optab, "trunc", MODE_FLOAT, false);
5385 /* Use cabs for double complex abs, since systems generally have cabs.
5386 Don't define any libcall for float complex, so that cabs will be used. */
5387 if (complex_double_type_node)
5388 abs_optab->handlers[TYPE_MODE (complex_double_type_node)].libfunc
5389 = init_one_libfunc ("cabs");
5391 /* The ffs function operates on `int'. */
5392 ffs_optab->handlers[(int) mode_for_size (INT_TYPE_SIZE, MODE_INT, 0)].libfunc
5393 = init_one_libfunc ("ffs");
5395 abort_libfunc = init_one_libfunc ("abort");
5396 memcpy_libfunc = init_one_libfunc ("memcpy");
5397 memmove_libfunc = init_one_libfunc ("memmove");
5398 bcopy_libfunc = init_one_libfunc ("bcopy");
5399 memcmp_libfunc = init_one_libfunc ("memcmp");
5400 bcmp_libfunc = init_one_libfunc ("__gcc_bcmp");
5401 memset_libfunc = init_one_libfunc ("memset");
5402 bzero_libfunc = init_one_libfunc ("bzero");
5403 setbits_libfunc = init_one_libfunc ("__setbits");
5405 unwind_resume_libfunc = init_one_libfunc (USING_SJLJ_EXCEPTIONS
5406 ? "_Unwind_SjLj_Resume"
5407 : "_Unwind_Resume");
5408 #ifndef DONT_USE_BUILTIN_SETJMP
5409 setjmp_libfunc = init_one_libfunc ("__builtin_setjmp");
5410 longjmp_libfunc = init_one_libfunc ("__builtin_longjmp");
5412 setjmp_libfunc = init_one_libfunc ("setjmp");
5413 longjmp_libfunc = init_one_libfunc ("longjmp");
5415 unwind_sjlj_register_libfunc = init_one_libfunc ("_Unwind_SjLj_Register");
5416 unwind_sjlj_unregister_libfunc
5417 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5419 /* For function entry/exit instrumentation. */
5420 profile_function_entry_libfunc
5421 = init_one_libfunc ("__cyg_profile_func_enter");
5422 profile_function_exit_libfunc
5423 = init_one_libfunc ("__cyg_profile_func_exit");
5425 gcov_flush_libfunc = init_one_libfunc ("__gcov_flush");
5426 gcov_init_libfunc = init_one_libfunc ("__gcov_init");
5428 if (HAVE_conditional_trap)
5429 trap_rtx = gen_rtx_fmt_ee (EQ, VOIDmode, NULL_RTX, NULL_RTX);
5431 /* Allow the target to add more libcalls or rename some, etc. */
5432 targetm.init_libfuncs ();
5435 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5436 CODE. Return 0 on failure. */
5439 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED, rtx op1,
5440 rtx op2 ATTRIBUTE_UNUSED, rtx tcode ATTRIBUTE_UNUSED)
5442 enum machine_mode mode = GET_MODE (op1);
5443 enum insn_code icode;
5446 if (!HAVE_conditional_trap)
5449 if (mode == VOIDmode)
5452 icode = cmp_optab->handlers[(int) mode].insn_code;
5453 if (icode == CODE_FOR_nothing)
5457 op1 = prepare_operand (icode, op1, 0, mode, mode, 0);
5458 op2 = prepare_operand (icode, op2, 1, mode, mode, 0);
5464 emit_insn (GEN_FCN (icode) (op1, op2));
5466 PUT_CODE (trap_rtx, code);
5467 insn = gen_conditional_trap (trap_rtx, tcode);
5471 insn = get_insns ();
5478 #include "gt-optabs.h"