1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
27 #include "diagnostic-core.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
44 #include "basic-block.h"
47 struct target_optabs default_target_optabs;
48 struct target_libfuncs default_target_libfuncs;
50 struct target_optabs *this_target_optabs = &default_target_optabs;
51 struct target_libfuncs *this_target_libfuncs = &default_target_libfuncs;
54 #define libfunc_hash \
55 (this_target_libfuncs->x_libfunc_hash)
57 /* Contains the optab used for each rtx code. */
58 optab code_to_optab[NUM_RTX_CODE + 1];
60 static void prepare_float_lib_cmp (rtx, rtx, enum rtx_code, rtx *,
62 static rtx expand_unop_direct (enum machine_mode, optab, rtx, rtx, int);
64 /* Debug facility for use in GDB. */
65 void debug_optab_libfuncs (void);
67 /* Prefixes for the current version of decimal floating point (BID vs. DPD) */
68 #if ENABLE_DECIMAL_BID_FORMAT
69 #define DECIMAL_PREFIX "bid_"
71 #define DECIMAL_PREFIX "dpd_"
74 /* Used for libfunc_hash. */
77 hash_libfunc (const void *p)
79 const struct libfunc_entry *const e = (const struct libfunc_entry *) p;
81 return (((int) e->mode1 + (int) e->mode2 * NUM_MACHINE_MODES)
85 /* Used for libfunc_hash. */
88 eq_libfunc (const void *p, const void *q)
90 const struct libfunc_entry *const e1 = (const struct libfunc_entry *) p;
91 const struct libfunc_entry *const e2 = (const struct libfunc_entry *) q;
93 return (e1->optab == e2->optab
94 && e1->mode1 == e2->mode1
95 && e1->mode2 == e2->mode2);
98 /* Return libfunc corresponding operation defined by OPTAB converting
99 from MODE2 to MODE1. Trigger lazy initialization if needed, return NULL
100 if no libfunc is available. */
102 convert_optab_libfunc (convert_optab optab, enum machine_mode mode1,
103 enum machine_mode mode2)
105 struct libfunc_entry e;
106 struct libfunc_entry **slot;
108 e.optab = (size_t) (optab - &convert_optab_table[0]);
111 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, NO_INSERT);
114 if (optab->libcall_gen)
116 optab->libcall_gen (optab, optab->libcall_basename, mode1, mode2);
117 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, NO_INSERT);
119 return (*slot)->libfunc;
125 return (*slot)->libfunc;
128 /* Return libfunc corresponding operation defined by OPTAB in MODE.
129 Trigger lazy initialization if needed, return NULL if no libfunc is
132 optab_libfunc (optab optab, enum machine_mode mode)
134 struct libfunc_entry e;
135 struct libfunc_entry **slot;
137 e.optab = (size_t) (optab - &optab_table[0]);
140 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, NO_INSERT);
143 if (optab->libcall_gen)
145 optab->libcall_gen (optab, optab->libcall_basename,
146 optab->libcall_suffix, mode);
147 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash,
150 return (*slot)->libfunc;
156 return (*slot)->libfunc;
160 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
161 the result of operation CODE applied to OP0 (and OP1 if it is a binary
164 If the last insn does not set TARGET, don't do anything, but return 1.
166 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
167 don't add the REG_EQUAL note but return 0. Our caller can then try
168 again, ensuring that TARGET is not one of the operands. */
171 add_equal_note (rtx insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
173 rtx last_insn, insn, set;
176 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
178 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
179 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
180 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
181 && GET_RTX_CLASS (code) != RTX_COMPARE
182 && GET_RTX_CLASS (code) != RTX_UNARY)
185 if (GET_CODE (target) == ZERO_EXTRACT)
188 for (last_insn = insns;
189 NEXT_INSN (last_insn) != NULL_RTX;
190 last_insn = NEXT_INSN (last_insn))
193 set = single_set (last_insn);
197 if (! rtx_equal_p (SET_DEST (set), target)
198 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
199 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
200 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
203 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
204 besides the last insn. */
205 if (reg_overlap_mentioned_p (target, op0)
206 || (op1 && reg_overlap_mentioned_p (target, op1)))
208 insn = PREV_INSN (last_insn);
209 while (insn != NULL_RTX)
211 if (reg_set_p (target, insn))
214 insn = PREV_INSN (insn);
218 if (GET_RTX_CLASS (code) == RTX_UNARY)
228 if (GET_MODE (op0) != VOIDmode && GET_MODE (target) != GET_MODE (op0))
230 note = gen_rtx_fmt_e (code, GET_MODE (op0), copy_rtx (op0));
231 if (GET_MODE_SIZE (GET_MODE (op0))
232 > GET_MODE_SIZE (GET_MODE (target)))
233 note = simplify_gen_unary (TRUNCATE, GET_MODE (target),
234 note, GET_MODE (op0));
236 note = simplify_gen_unary (ZERO_EXTEND, GET_MODE (target),
237 note, GET_MODE (op0));
242 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
246 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
248 set_unique_reg_note (last_insn, REG_EQUAL, note);
253 /* Given two input operands, OP0 and OP1, determine what the correct from_mode
254 for a widening operation would be. In most cases this would be OP0, but if
255 that's a constant it'll be VOIDmode, which isn't useful. */
257 static enum machine_mode
258 widened_mode (enum machine_mode to_mode, rtx op0, rtx op1)
260 enum machine_mode m0 = GET_MODE (op0);
261 enum machine_mode m1 = GET_MODE (op1);
262 enum machine_mode result;
264 if (m0 == VOIDmode && m1 == VOIDmode)
266 else if (m0 == VOIDmode || GET_MODE_SIZE (m0) < GET_MODE_SIZE (m1))
271 if (GET_MODE_SIZE (result) > GET_MODE_SIZE (to_mode))
277 /* Find a widening optab even if it doesn't widen as much as we want.
278 E.g. if from_mode is HImode, and to_mode is DImode, and there is no
279 direct HI->SI insn, then return SI->DI, if that exists.
280 If PERMIT_NON_WIDENING is non-zero then this can be used with
281 non-widening optabs also. */
284 find_widening_optab_handler_and_mode (optab op, enum machine_mode to_mode,
285 enum machine_mode from_mode,
286 int permit_non_widening,
287 enum machine_mode *found_mode)
289 for (; (permit_non_widening || from_mode != to_mode)
290 && GET_MODE_SIZE (from_mode) <= GET_MODE_SIZE (to_mode)
291 && from_mode != VOIDmode;
292 from_mode = GET_MODE_WIDER_MODE (from_mode))
294 enum insn_code handler = widening_optab_handler (op, to_mode,
297 if (handler != CODE_FOR_nothing)
300 *found_mode = from_mode;
305 return CODE_FOR_nothing;
308 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
309 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
310 not actually do a sign-extend or zero-extend, but can leave the
311 higher-order bits of the result rtx undefined, for example, in the case
312 of logical operations, but not right shifts. */
315 widen_operand (rtx op, enum machine_mode mode, enum machine_mode oldmode,
316 int unsignedp, int no_extend)
320 /* If we don't have to extend and this is a constant, return it. */
321 if (no_extend && GET_MODE (op) == VOIDmode)
324 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
325 extend since it will be more efficient to do so unless the signedness of
326 a promoted object differs from our extension. */
328 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
329 && SUBREG_PROMOTED_UNSIGNED_P (op) == unsignedp))
330 return convert_modes (mode, oldmode, op, unsignedp);
332 /* If MODE is no wider than a single word, we return a paradoxical
334 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
335 return gen_rtx_SUBREG (mode, force_reg (GET_MODE (op), op), 0);
337 /* Otherwise, get an object of MODE, clobber it, and set the low-order
340 result = gen_reg_rtx (mode);
341 emit_clobber (result);
342 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
346 /* Return the optab used for computing the operation given by the tree code,
347 CODE and the tree EXP. This function is not always usable (for example, it
348 cannot give complete results for multiplication or division) but probably
349 ought to be relied on more widely throughout the expander. */
351 optab_for_tree_code (enum tree_code code, const_tree type,
352 enum optab_subtype subtype)
364 return one_cmpl_optab;
373 return TYPE_UNSIGNED (type) ? umod_optab : smod_optab;
381 if (TYPE_SATURATING(type))
382 return TYPE_UNSIGNED(type) ? usdiv_optab : ssdiv_optab;
383 return TYPE_UNSIGNED (type) ? udiv_optab : sdiv_optab;
386 if (TREE_CODE (type) == VECTOR_TYPE)
388 if (subtype == optab_vector)
389 return TYPE_SATURATING (type) ? NULL : vashl_optab;
391 gcc_assert (subtype == optab_scalar);
393 if (TYPE_SATURATING(type))
394 return TYPE_UNSIGNED(type) ? usashl_optab : ssashl_optab;
398 if (TREE_CODE (type) == VECTOR_TYPE)
400 if (subtype == optab_vector)
401 return TYPE_UNSIGNED (type) ? vlshr_optab : vashr_optab;
403 gcc_assert (subtype == optab_scalar);
405 return TYPE_UNSIGNED (type) ? lshr_optab : ashr_optab;
408 if (TREE_CODE (type) == VECTOR_TYPE)
410 if (subtype == optab_vector)
413 gcc_assert (subtype == optab_scalar);
418 if (TREE_CODE (type) == VECTOR_TYPE)
420 if (subtype == optab_vector)
423 gcc_assert (subtype == optab_scalar);
428 return TYPE_UNSIGNED (type) ? umax_optab : smax_optab;
431 return TYPE_UNSIGNED (type) ? umin_optab : smin_optab;
433 case REALIGN_LOAD_EXPR:
434 return vec_realign_load_optab;
437 return TYPE_UNSIGNED (type) ? usum_widen_optab : ssum_widen_optab;
440 return TYPE_UNSIGNED (type) ? udot_prod_optab : sdot_prod_optab;
442 case WIDEN_MULT_PLUS_EXPR:
443 return (TYPE_UNSIGNED (type)
444 ? (TYPE_SATURATING (type)
445 ? usmadd_widen_optab : umadd_widen_optab)
446 : (TYPE_SATURATING (type)
447 ? ssmadd_widen_optab : smadd_widen_optab));
449 case WIDEN_MULT_MINUS_EXPR:
450 return (TYPE_UNSIGNED (type)
451 ? (TYPE_SATURATING (type)
452 ? usmsub_widen_optab : umsub_widen_optab)
453 : (TYPE_SATURATING (type)
454 ? ssmsub_widen_optab : smsub_widen_optab));
460 return TYPE_UNSIGNED (type) ? reduc_umax_optab : reduc_smax_optab;
463 return TYPE_UNSIGNED (type) ? reduc_umin_optab : reduc_smin_optab;
465 case REDUC_PLUS_EXPR:
466 return TYPE_UNSIGNED (type) ? reduc_uplus_optab : reduc_splus_optab;
468 case VEC_LSHIFT_EXPR:
469 return vec_shl_optab;
471 case VEC_RSHIFT_EXPR:
472 return vec_shr_optab;
474 case VEC_WIDEN_MULT_HI_EXPR:
475 return TYPE_UNSIGNED (type) ?
476 vec_widen_umult_hi_optab : vec_widen_smult_hi_optab;
478 case VEC_WIDEN_MULT_LO_EXPR:
479 return TYPE_UNSIGNED (type) ?
480 vec_widen_umult_lo_optab : vec_widen_smult_lo_optab;
482 case VEC_WIDEN_LSHIFT_HI_EXPR:
483 return TYPE_UNSIGNED (type) ?
484 vec_widen_ushiftl_hi_optab : vec_widen_sshiftl_hi_optab;
486 case VEC_WIDEN_LSHIFT_LO_EXPR:
487 return TYPE_UNSIGNED (type) ?
488 vec_widen_ushiftl_lo_optab : vec_widen_sshiftl_lo_optab;
490 case VEC_UNPACK_HI_EXPR:
491 return TYPE_UNSIGNED (type) ?
492 vec_unpacku_hi_optab : vec_unpacks_hi_optab;
494 case VEC_UNPACK_LO_EXPR:
495 return TYPE_UNSIGNED (type) ?
496 vec_unpacku_lo_optab : vec_unpacks_lo_optab;
498 case VEC_UNPACK_FLOAT_HI_EXPR:
499 /* The signedness is determined from input operand. */
500 return TYPE_UNSIGNED (type) ?
501 vec_unpacku_float_hi_optab : vec_unpacks_float_hi_optab;
503 case VEC_UNPACK_FLOAT_LO_EXPR:
504 /* The signedness is determined from input operand. */
505 return TYPE_UNSIGNED (type) ?
506 vec_unpacku_float_lo_optab : vec_unpacks_float_lo_optab;
508 case VEC_PACK_TRUNC_EXPR:
509 return vec_pack_trunc_optab;
511 case VEC_PACK_SAT_EXPR:
512 return TYPE_UNSIGNED (type) ? vec_pack_usat_optab : vec_pack_ssat_optab;
514 case VEC_PACK_FIX_TRUNC_EXPR:
515 /* The signedness is determined from output operand. */
516 return TYPE_UNSIGNED (type) ?
517 vec_pack_ufix_trunc_optab : vec_pack_sfix_trunc_optab;
523 trapv = INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type);
526 case POINTER_PLUS_EXPR:
528 if (TYPE_SATURATING(type))
529 return TYPE_UNSIGNED(type) ? usadd_optab : ssadd_optab;
530 return trapv ? addv_optab : add_optab;
533 if (TYPE_SATURATING(type))
534 return TYPE_UNSIGNED(type) ? ussub_optab : sssub_optab;
535 return trapv ? subv_optab : sub_optab;
538 if (TYPE_SATURATING(type))
539 return TYPE_UNSIGNED(type) ? usmul_optab : ssmul_optab;
540 return trapv ? smulv_optab : smul_optab;
543 if (TYPE_SATURATING(type))
544 return TYPE_UNSIGNED(type) ? usneg_optab : ssneg_optab;
545 return trapv ? negv_optab : neg_optab;
548 return trapv ? absv_optab : abs_optab;
550 case VEC_EXTRACT_EVEN_EXPR:
551 return vec_extract_even_optab;
553 case VEC_EXTRACT_ODD_EXPR:
554 return vec_extract_odd_optab;
562 /* Expand vector widening operations.
564 There are two different classes of operations handled here:
565 1) Operations whose result is wider than all the arguments to the operation.
566 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
567 In this case OP0 and optionally OP1 would be initialized,
568 but WIDE_OP wouldn't (not relevant for this case).
569 2) Operations whose result is of the same size as the last argument to the
570 operation, but wider than all the other arguments to the operation.
571 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
572 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
574 E.g, when called to expand the following operations, this is how
575 the arguments will be initialized:
577 widening-sum 2 oprnd0 - oprnd1
578 widening-dot-product 3 oprnd0 oprnd1 oprnd2
579 widening-mult 2 oprnd0 oprnd1 -
580 type-promotion (vec-unpack) 1 oprnd0 - - */
583 expand_widen_pattern_expr (sepops ops, rtx op0, rtx op1, rtx wide_op,
584 rtx target, int unsignedp)
586 struct expand_operand eops[4];
587 tree oprnd0, oprnd1, oprnd2;
588 enum machine_mode wmode = VOIDmode, tmode0, tmode1 = VOIDmode;
589 optab widen_pattern_optab;
590 enum insn_code icode;
591 int nops = TREE_CODE_LENGTH (ops->code);
595 tmode0 = TYPE_MODE (TREE_TYPE (oprnd0));
596 widen_pattern_optab =
597 optab_for_tree_code (ops->code, TREE_TYPE (oprnd0), optab_default);
598 if (ops->code == WIDEN_MULT_PLUS_EXPR
599 || ops->code == WIDEN_MULT_MINUS_EXPR)
600 icode = find_widening_optab_handler (widen_pattern_optab,
601 TYPE_MODE (TREE_TYPE (ops->op2)),
604 icode = optab_handler (widen_pattern_optab, tmode0);
605 gcc_assert (icode != CODE_FOR_nothing);
610 tmode1 = TYPE_MODE (TREE_TYPE (oprnd1));
613 /* The last operand is of a wider mode than the rest of the operands. */
618 gcc_assert (tmode1 == tmode0);
621 wmode = TYPE_MODE (TREE_TYPE (oprnd2));
625 create_output_operand (&eops[op++], target, TYPE_MODE (ops->type));
626 create_convert_operand_from (&eops[op++], op0, tmode0, unsignedp);
628 create_convert_operand_from (&eops[op++], op1, tmode1, unsignedp);
630 create_convert_operand_from (&eops[op++], wide_op, wmode, unsignedp);
631 expand_insn (icode, op, eops);
632 return eops[0].value;
635 /* Generate code to perform an operation specified by TERNARY_OPTAB
636 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
638 UNSIGNEDP is for the case where we have to widen the operands
639 to perform the operation. It says to use zero-extension.
641 If TARGET is nonzero, the value
642 is generated there, if it is convenient to do so.
643 In all cases an rtx is returned for the locus of the value;
644 this may or may not be TARGET. */
647 expand_ternary_op (enum machine_mode mode, optab ternary_optab, rtx op0,
648 rtx op1, rtx op2, rtx target, int unsignedp)
650 struct expand_operand ops[4];
651 enum insn_code icode = optab_handler (ternary_optab, mode);
653 gcc_assert (optab_handler (ternary_optab, mode) != CODE_FOR_nothing);
655 create_output_operand (&ops[0], target, mode);
656 create_convert_operand_from (&ops[1], op0, mode, unsignedp);
657 create_convert_operand_from (&ops[2], op1, mode, unsignedp);
658 create_convert_operand_from (&ops[3], op2, mode, unsignedp);
659 expand_insn (icode, 4, ops);
664 /* Like expand_binop, but return a constant rtx if the result can be
665 calculated at compile time. The arguments and return value are
666 otherwise the same as for expand_binop. */
669 simplify_expand_binop (enum machine_mode mode, optab binoptab,
670 rtx op0, rtx op1, rtx target, int unsignedp,
671 enum optab_methods methods)
673 if (CONSTANT_P (op0) && CONSTANT_P (op1))
675 rtx x = simplify_binary_operation (binoptab->code, mode, op0, op1);
681 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
684 /* Like simplify_expand_binop, but always put the result in TARGET.
685 Return true if the expansion succeeded. */
688 force_expand_binop (enum machine_mode mode, optab binoptab,
689 rtx op0, rtx op1, rtx target, int unsignedp,
690 enum optab_methods methods)
692 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
693 target, unsignedp, methods);
697 emit_move_insn (target, x);
701 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
704 expand_vec_shift_expr (sepops ops, rtx target)
706 struct expand_operand eops[3];
707 enum insn_code icode;
708 rtx rtx_op1, rtx_op2;
709 enum machine_mode mode = TYPE_MODE (ops->type);
710 tree vec_oprnd = ops->op0;
711 tree shift_oprnd = ops->op1;
716 case VEC_RSHIFT_EXPR:
717 shift_optab = vec_shr_optab;
719 case VEC_LSHIFT_EXPR:
720 shift_optab = vec_shl_optab;
726 icode = optab_handler (shift_optab, mode);
727 gcc_assert (icode != CODE_FOR_nothing);
729 rtx_op1 = expand_normal (vec_oprnd);
730 rtx_op2 = expand_normal (shift_oprnd);
732 create_output_operand (&eops[0], target, mode);
733 create_input_operand (&eops[1], rtx_op1, GET_MODE (rtx_op1));
734 create_convert_operand_from_type (&eops[2], rtx_op2, TREE_TYPE (shift_oprnd));
735 expand_insn (icode, 3, eops);
737 return eops[0].value;
740 /* Create a new vector value in VMODE with all elements set to OP. The
741 mode of OP must be the element mode of VMODE. If OP is a constant,
742 then the return value will be a constant. */
745 expand_vector_broadcast (enum machine_mode vmode, rtx op)
747 enum insn_code icode;
752 gcc_checking_assert (VECTOR_MODE_P (vmode));
754 n = GET_MODE_NUNITS (vmode);
755 vec = rtvec_alloc (n);
756 for (i = 0; i < n; ++i)
757 RTVEC_ELT (vec, i) = op;
760 return gen_rtx_CONST_VECTOR (vmode, vec);
762 /* ??? If the target doesn't have a vec_init, then we have no easy way
763 of performing this operation. Most of this sort of generic support
764 is hidden away in the vector lowering support in gimple. */
765 icode = optab_handler (vec_init_optab, vmode);
766 if (icode == CODE_FOR_nothing)
769 ret = gen_reg_rtx (vmode);
770 emit_insn (GEN_FCN (icode) (ret, gen_rtx_PARALLEL (vmode, vec)));
775 /* This subroutine of expand_doubleword_shift handles the cases in which
776 the effective shift value is >= BITS_PER_WORD. The arguments and return
777 value are the same as for the parent routine, except that SUPERWORD_OP1
778 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
779 INTO_TARGET may be null if the caller has decided to calculate it. */
782 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
783 rtx outof_target, rtx into_target,
784 int unsignedp, enum optab_methods methods)
786 if (into_target != 0)
787 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
788 into_target, unsignedp, methods))
791 if (outof_target != 0)
793 /* For a signed right shift, we must fill OUTOF_TARGET with copies
794 of the sign bit, otherwise we must fill it with zeros. */
795 if (binoptab != ashr_optab)
796 emit_move_insn (outof_target, CONST0_RTX (word_mode));
798 if (!force_expand_binop (word_mode, binoptab,
799 outof_input, GEN_INT (BITS_PER_WORD - 1),
800 outof_target, unsignedp, methods))
806 /* This subroutine of expand_doubleword_shift handles the cases in which
807 the effective shift value is < BITS_PER_WORD. The arguments and return
808 value are the same as for the parent routine. */
811 expand_subword_shift (enum machine_mode op1_mode, optab binoptab,
812 rtx outof_input, rtx into_input, rtx op1,
813 rtx outof_target, rtx into_target,
814 int unsignedp, enum optab_methods methods,
815 unsigned HOST_WIDE_INT shift_mask)
817 optab reverse_unsigned_shift, unsigned_shift;
820 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
821 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
823 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
824 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
825 the opposite direction to BINOPTAB. */
826 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
828 carries = outof_input;
829 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
830 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
835 /* We must avoid shifting by BITS_PER_WORD bits since that is either
836 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
837 has unknown behavior. Do a single shift first, then shift by the
838 remainder. It's OK to use ~OP1 as the remainder if shift counts
839 are truncated to the mode size. */
840 carries = expand_binop (word_mode, reverse_unsigned_shift,
841 outof_input, const1_rtx, 0, unsignedp, methods);
842 if (shift_mask == BITS_PER_WORD - 1)
844 tmp = immed_double_const (-1, -1, op1_mode);
845 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
850 tmp = immed_double_const (BITS_PER_WORD - 1, 0, op1_mode);
851 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
855 if (tmp == 0 || carries == 0)
857 carries = expand_binop (word_mode, reverse_unsigned_shift,
858 carries, tmp, 0, unsignedp, methods);
862 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
863 so the result can go directly into INTO_TARGET if convenient. */
864 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
865 into_target, unsignedp, methods);
869 /* Now OR in the bits carried over from OUTOF_INPUT. */
870 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
871 into_target, unsignedp, methods))
874 /* Use a standard word_mode shift for the out-of half. */
875 if (outof_target != 0)
876 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
877 outof_target, unsignedp, methods))
884 #ifdef HAVE_conditional_move
885 /* Try implementing expand_doubleword_shift using conditional moves.
886 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
887 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
888 are the shift counts to use in the former and latter case. All other
889 arguments are the same as the parent routine. */
892 expand_doubleword_shift_condmove (enum machine_mode op1_mode, optab binoptab,
893 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
894 rtx outof_input, rtx into_input,
895 rtx subword_op1, rtx superword_op1,
896 rtx outof_target, rtx into_target,
897 int unsignedp, enum optab_methods methods,
898 unsigned HOST_WIDE_INT shift_mask)
900 rtx outof_superword, into_superword;
902 /* Put the superword version of the output into OUTOF_SUPERWORD and
904 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
905 if (outof_target != 0 && subword_op1 == superword_op1)
907 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
908 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
909 into_superword = outof_target;
910 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
911 outof_superword, 0, unsignedp, methods))
916 into_superword = gen_reg_rtx (word_mode);
917 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
918 outof_superword, into_superword,
923 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
924 if (!expand_subword_shift (op1_mode, binoptab,
925 outof_input, into_input, subword_op1,
926 outof_target, into_target,
927 unsignedp, methods, shift_mask))
930 /* Select between them. Do the INTO half first because INTO_SUPERWORD
931 might be the current value of OUTOF_TARGET. */
932 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
933 into_target, into_superword, word_mode, false))
936 if (outof_target != 0)
937 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
938 outof_target, outof_superword,
946 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
947 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
948 input operand; the shift moves bits in the direction OUTOF_INPUT->
949 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
950 of the target. OP1 is the shift count and OP1_MODE is its mode.
951 If OP1 is constant, it will have been truncated as appropriate
952 and is known to be nonzero.
954 If SHIFT_MASK is zero, the result of word shifts is undefined when the
955 shift count is outside the range [0, BITS_PER_WORD). This routine must
956 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
958 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
959 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
960 fill with zeros or sign bits as appropriate.
962 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
963 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
964 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
965 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
968 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
969 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
970 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
971 function wants to calculate it itself.
973 Return true if the shift could be successfully synthesized. */
976 expand_doubleword_shift (enum machine_mode op1_mode, optab binoptab,
977 rtx outof_input, rtx into_input, rtx op1,
978 rtx outof_target, rtx into_target,
979 int unsignedp, enum optab_methods methods,
980 unsigned HOST_WIDE_INT shift_mask)
982 rtx superword_op1, tmp, cmp1, cmp2;
983 rtx subword_label, done_label;
984 enum rtx_code cmp_code;
986 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
987 fill the result with sign or zero bits as appropriate. If so, the value
988 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
989 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
990 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
992 This isn't worthwhile for constant shifts since the optimizers will
993 cope better with in-range shift counts. */
994 if (shift_mask >= BITS_PER_WORD
996 && !CONSTANT_P (op1))
998 if (!expand_doubleword_shift (op1_mode, binoptab,
999 outof_input, into_input, op1,
1001 unsignedp, methods, shift_mask))
1003 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
1004 outof_target, unsignedp, methods))
1009 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
1010 is true when the effective shift value is less than BITS_PER_WORD.
1011 Set SUPERWORD_OP1 to the shift count that should be used to shift
1012 OUTOF_INPUT into INTO_TARGET when the condition is false. */
1013 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
1014 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
1016 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
1017 is a subword shift count. */
1018 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
1020 cmp2 = CONST0_RTX (op1_mode);
1022 superword_op1 = op1;
1026 /* Set CMP1 to OP1 - BITS_PER_WORD. */
1027 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
1029 cmp2 = CONST0_RTX (op1_mode);
1031 superword_op1 = cmp1;
1036 /* If we can compute the condition at compile time, pick the
1037 appropriate subroutine. */
1038 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
1039 if (tmp != 0 && CONST_INT_P (tmp))
1041 if (tmp == const0_rtx)
1042 return expand_superword_shift (binoptab, outof_input, superword_op1,
1043 outof_target, into_target,
1044 unsignedp, methods);
1046 return expand_subword_shift (op1_mode, binoptab,
1047 outof_input, into_input, op1,
1048 outof_target, into_target,
1049 unsignedp, methods, shift_mask);
1052 #ifdef HAVE_conditional_move
1053 /* Try using conditional moves to generate straight-line code. */
1055 rtx start = get_last_insn ();
1056 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
1057 cmp_code, cmp1, cmp2,
1058 outof_input, into_input,
1060 outof_target, into_target,
1061 unsignedp, methods, shift_mask))
1063 delete_insns_since (start);
1067 /* As a last resort, use branches to select the correct alternative. */
1068 subword_label = gen_label_rtx ();
1069 done_label = gen_label_rtx ();
1072 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
1073 0, 0, subword_label, -1);
1076 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
1077 outof_target, into_target,
1078 unsignedp, methods))
1081 emit_jump_insn (gen_jump (done_label));
1083 emit_label (subword_label);
1085 if (!expand_subword_shift (op1_mode, binoptab,
1086 outof_input, into_input, op1,
1087 outof_target, into_target,
1088 unsignedp, methods, shift_mask))
1091 emit_label (done_label);
1095 /* Subroutine of expand_binop. Perform a double word multiplication of
1096 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
1097 as the target's word_mode. This function return NULL_RTX if anything
1098 goes wrong, in which case it may have already emitted instructions
1099 which need to be deleted.
1101 If we want to multiply two two-word values and have normal and widening
1102 multiplies of single-word values, we can do this with three smaller
1105 The multiplication proceeds as follows:
1106 _______________________
1107 [__op0_high_|__op0_low__]
1108 _______________________
1109 * [__op1_high_|__op1_low__]
1110 _______________________________________________
1111 _______________________
1112 (1) [__op0_low__*__op1_low__]
1113 _______________________
1114 (2a) [__op0_low__*__op1_high_]
1115 _______________________
1116 (2b) [__op0_high_*__op1_low__]
1117 _______________________
1118 (3) [__op0_high_*__op1_high_]
1121 This gives a 4-word result. Since we are only interested in the
1122 lower 2 words, partial result (3) and the upper words of (2a) and
1123 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1124 calculated using non-widening multiplication.
1126 (1), however, needs to be calculated with an unsigned widening
1127 multiplication. If this operation is not directly supported we
1128 try using a signed widening multiplication and adjust the result.
1129 This adjustment works as follows:
1131 If both operands are positive then no adjustment is needed.
1133 If the operands have different signs, for example op0_low < 0 and
1134 op1_low >= 0, the instruction treats the most significant bit of
1135 op0_low as a sign bit instead of a bit with significance
1136 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1137 with 2**BITS_PER_WORD - op0_low, and two's complements the
1138 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1141 Similarly, if both operands are negative, we need to add
1142 (op0_low + op1_low) * 2**BITS_PER_WORD.
1144 We use a trick to adjust quickly. We logically shift op0_low right
1145 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1146 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1147 logical shift exists, we do an arithmetic right shift and subtract
1151 expand_doubleword_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
1152 bool umulp, enum optab_methods methods)
1154 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
1155 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
1156 rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
1157 rtx product, adjust, product_high, temp;
1159 rtx op0_high = operand_subword_force (op0, high, mode);
1160 rtx op0_low = operand_subword_force (op0, low, mode);
1161 rtx op1_high = operand_subword_force (op1, high, mode);
1162 rtx op1_low = operand_subword_force (op1, low, mode);
1164 /* If we're using an unsigned multiply to directly compute the product
1165 of the low-order words of the operands and perform any required
1166 adjustments of the operands, we begin by trying two more multiplications
1167 and then computing the appropriate sum.
1169 We have checked above that the required addition is provided.
1170 Full-word addition will normally always succeed, especially if
1171 it is provided at all, so we don't worry about its failure. The
1172 multiplication may well fail, however, so we do handle that. */
1176 /* ??? This could be done with emit_store_flag where available. */
1177 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
1178 NULL_RTX, 1, methods);
1180 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
1181 NULL_RTX, 0, OPTAB_DIRECT);
1184 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
1185 NULL_RTX, 0, methods);
1188 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
1189 NULL_RTX, 0, OPTAB_DIRECT);
1196 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
1197 NULL_RTX, 0, OPTAB_DIRECT);
1201 /* OP0_HIGH should now be dead. */
1205 /* ??? This could be done with emit_store_flag where available. */
1206 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
1207 NULL_RTX, 1, methods);
1209 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
1210 NULL_RTX, 0, OPTAB_DIRECT);
1213 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
1214 NULL_RTX, 0, methods);
1217 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
1218 NULL_RTX, 0, OPTAB_DIRECT);
1225 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
1226 NULL_RTX, 0, OPTAB_DIRECT);
1230 /* OP1_HIGH should now be dead. */
1232 adjust = expand_binop (word_mode, add_optab, adjust, temp,
1233 NULL_RTX, 0, OPTAB_DIRECT);
1235 if (target && !REG_P (target))
1239 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
1240 target, 1, OPTAB_DIRECT);
1242 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
1243 target, 1, OPTAB_DIRECT);
1248 product_high = operand_subword (product, high, 1, mode);
1249 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
1250 NULL_RTX, 0, OPTAB_DIRECT);
1251 emit_move_insn (product_high, adjust);
1255 /* Wrapper around expand_binop which takes an rtx code to specify
1256 the operation to perform, not an optab pointer. All other
1257 arguments are the same. */
1259 expand_simple_binop (enum machine_mode mode, enum rtx_code code, rtx op0,
1260 rtx op1, rtx target, int unsignedp,
1261 enum optab_methods methods)
1263 optab binop = code_to_optab[(int) code];
1266 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
1269 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1270 binop. Order them according to commutative_operand_precedence and, if
1271 possible, try to put TARGET or a pseudo first. */
1273 swap_commutative_operands_with_target (rtx target, rtx op0, rtx op1)
1275 int op0_prec = commutative_operand_precedence (op0);
1276 int op1_prec = commutative_operand_precedence (op1);
1278 if (op0_prec < op1_prec)
1281 if (op0_prec > op1_prec)
1284 /* With equal precedence, both orders are ok, but it is better if the
1285 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
1286 if (target == 0 || REG_P (target))
1287 return (REG_P (op1) && !REG_P (op0)) || target == op1;
1289 return rtx_equal_p (op1, target);
1292 /* Return true if BINOPTAB implements a shift operation. */
1295 shift_optab_p (optab binoptab)
1297 switch (binoptab->code)
1313 /* Return true if BINOPTAB implements a commutative binary operation. */
1316 commutative_optab_p (optab binoptab)
1318 return (GET_RTX_CLASS (binoptab->code) == RTX_COMM_ARITH
1319 || binoptab == smul_widen_optab
1320 || binoptab == umul_widen_optab
1321 || binoptab == smul_highpart_optab
1322 || binoptab == umul_highpart_optab);
1325 /* X is to be used in mode MODE as operand OPN to BINOPTAB. If we're
1326 optimizing, and if the operand is a constant that costs more than
1327 1 instruction, force the constant into a register and return that
1328 register. Return X otherwise. UNSIGNEDP says whether X is unsigned. */
1331 avoid_expensive_constant (enum machine_mode mode, optab binoptab,
1332 int opn, rtx x, bool unsignedp)
1334 bool speed = optimize_insn_for_speed_p ();
1336 if (mode != VOIDmode
1339 && rtx_cost (x, binoptab->code, opn, speed) > set_src_cost (x, speed))
1341 if (CONST_INT_P (x))
1343 HOST_WIDE_INT intval = trunc_int_for_mode (INTVAL (x), mode);
1344 if (intval != INTVAL (x))
1345 x = GEN_INT (intval);
1348 x = convert_modes (mode, VOIDmode, x, unsignedp);
1349 x = force_reg (mode, x);
1354 /* Helper function for expand_binop: handle the case where there
1355 is an insn that directly implements the indicated operation.
1356 Returns null if this is not possible. */
1358 expand_binop_directly (enum machine_mode mode, optab binoptab,
1360 rtx target, int unsignedp, enum optab_methods methods,
1363 enum machine_mode from_mode = widened_mode (mode, op0, op1);
1364 enum insn_code icode = find_widening_optab_handler (binoptab, mode,
1366 enum machine_mode xmode0 = insn_data[(int) icode].operand[1].mode;
1367 enum machine_mode xmode1 = insn_data[(int) icode].operand[2].mode;
1368 enum machine_mode mode0, mode1, tmp_mode;
1369 struct expand_operand ops[3];
1372 rtx xop0 = op0, xop1 = op1;
1375 /* If it is a commutative operator and the modes would match
1376 if we would swap the operands, we can save the conversions. */
1377 commutative_p = commutative_optab_p (binoptab);
1379 && GET_MODE (xop0) != xmode0 && GET_MODE (xop1) != xmode1
1380 && GET_MODE (xop0) == xmode1 && GET_MODE (xop1) == xmode1)
1387 /* If we are optimizing, force expensive constants into a register. */
1388 xop0 = avoid_expensive_constant (xmode0, binoptab, 0, xop0, unsignedp);
1389 if (!shift_optab_p (binoptab))
1390 xop1 = avoid_expensive_constant (xmode1, binoptab, 1, xop1, unsignedp);
1392 /* In case the insn wants input operands in modes different from
1393 those of the actual operands, convert the operands. It would
1394 seem that we don't need to convert CONST_INTs, but we do, so
1395 that they're properly zero-extended, sign-extended or truncated
1398 mode0 = GET_MODE (xop0) != VOIDmode ? GET_MODE (xop0) : mode;
1399 if (xmode0 != VOIDmode && xmode0 != mode0)
1401 xop0 = convert_modes (xmode0, mode0, xop0, unsignedp);
1405 mode1 = GET_MODE (xop1) != VOIDmode ? GET_MODE (xop1) : mode;
1406 if (xmode1 != VOIDmode && xmode1 != mode1)
1408 xop1 = convert_modes (xmode1, mode1, xop1, unsignedp);
1412 /* If operation is commutative,
1413 try to make the first operand a register.
1414 Even better, try to make it the same as the target.
1415 Also try to make the last operand a constant. */
1417 && swap_commutative_operands_with_target (target, xop0, xop1))
1424 /* Now, if insn's predicates don't allow our operands, put them into
1427 if (binoptab == vec_pack_trunc_optab
1428 || binoptab == vec_pack_usat_optab
1429 || binoptab == vec_pack_ssat_optab
1430 || binoptab == vec_pack_ufix_trunc_optab
1431 || binoptab == vec_pack_sfix_trunc_optab)
1433 /* The mode of the result is different then the mode of the
1435 tmp_mode = insn_data[(int) icode].operand[0].mode;
1436 if (GET_MODE_NUNITS (tmp_mode) != 2 * GET_MODE_NUNITS (mode))
1438 delete_insns_since (last);
1445 create_output_operand (&ops[0], target, tmp_mode);
1446 create_input_operand (&ops[1], xop0, mode0);
1447 create_input_operand (&ops[2], xop1, mode1);
1448 pat = maybe_gen_insn (icode, 3, ops);
1451 /* If PAT is composed of more than one insn, try to add an appropriate
1452 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1453 operand, call expand_binop again, this time without a target. */
1454 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1455 && ! add_equal_note (pat, ops[0].value, binoptab->code,
1456 ops[1].value, ops[2].value))
1458 delete_insns_since (last);
1459 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1460 unsignedp, methods);
1464 return ops[0].value;
1466 delete_insns_since (last);
1470 /* Generate code to perform an operation specified by BINOPTAB
1471 on operands OP0 and OP1, with result having machine-mode MODE.
1473 UNSIGNEDP is for the case where we have to widen the operands
1474 to perform the operation. It says to use zero-extension.
1476 If TARGET is nonzero, the value
1477 is generated there, if it is convenient to do so.
1478 In all cases an rtx is returned for the locus of the value;
1479 this may or may not be TARGET. */
1482 expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1,
1483 rtx target, int unsignedp, enum optab_methods methods)
1485 enum optab_methods next_methods
1486 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1487 ? OPTAB_WIDEN : methods);
1488 enum mode_class mclass;
1489 enum machine_mode wider_mode;
1492 rtx entry_last = get_last_insn ();
1495 mclass = GET_MODE_CLASS (mode);
1497 /* If subtracting an integer constant, convert this into an addition of
1498 the negated constant. */
1500 if (binoptab == sub_optab && CONST_INT_P (op1))
1502 op1 = negate_rtx (mode, op1);
1503 binoptab = add_optab;
1506 /* Record where to delete back to if we backtrack. */
1507 last = get_last_insn ();
1509 /* If we can do it with a three-operand insn, do so. */
1511 if (methods != OPTAB_MUST_WIDEN
1512 && find_widening_optab_handler (binoptab, mode,
1513 widened_mode (mode, op0, op1), 1)
1514 != CODE_FOR_nothing)
1516 temp = expand_binop_directly (mode, binoptab, op0, op1, target,
1517 unsignedp, methods, last);
1522 /* If we were trying to rotate, and that didn't work, try rotating
1523 the other direction before falling back to shifts and bitwise-or. */
1524 if (((binoptab == rotl_optab
1525 && optab_handler (rotr_optab, mode) != CODE_FOR_nothing)
1526 || (binoptab == rotr_optab
1527 && optab_handler (rotl_optab, mode) != CODE_FOR_nothing))
1528 && mclass == MODE_INT)
1530 optab otheroptab = (binoptab == rotl_optab ? rotr_optab : rotl_optab);
1532 unsigned int bits = GET_MODE_PRECISION (mode);
1534 if (CONST_INT_P (op1))
1535 newop1 = GEN_INT (bits - INTVAL (op1));
1536 else if (targetm.shift_truncation_mask (mode) == bits - 1)
1537 newop1 = negate_rtx (GET_MODE (op1), op1);
1539 newop1 = expand_binop (GET_MODE (op1), sub_optab,
1540 GEN_INT (bits), op1,
1541 NULL_RTX, unsignedp, OPTAB_DIRECT);
1543 temp = expand_binop_directly (mode, otheroptab, op0, newop1,
1544 target, unsignedp, methods, last);
1549 /* If this is a multiply, see if we can do a widening operation that
1550 takes operands of this mode and makes a wider mode. */
1552 if (binoptab == smul_optab
1553 && GET_MODE_2XWIDER_MODE (mode) != VOIDmode
1554 && (widening_optab_handler ((unsignedp ? umul_widen_optab
1555 : smul_widen_optab),
1556 GET_MODE_2XWIDER_MODE (mode), mode)
1557 != CODE_FOR_nothing))
1559 temp = expand_binop (GET_MODE_2XWIDER_MODE (mode),
1560 unsignedp ? umul_widen_optab : smul_widen_optab,
1561 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1565 if (GET_MODE_CLASS (mode) == MODE_INT
1566 && TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (temp)))
1567 return gen_lowpart (mode, temp);
1569 return convert_to_mode (mode, temp, unsignedp);
1573 /* If this is a vector shift by a scalar, see if we can do a vector
1574 shift by a vector. If so, broadcast the scalar into a vector. */
1575 if (mclass == MODE_VECTOR_INT)
1577 optab otheroptab = NULL;
1579 if (binoptab == ashl_optab)
1580 otheroptab = vashl_optab;
1581 else if (binoptab == ashr_optab)
1582 otheroptab = vashr_optab;
1583 else if (binoptab == lshr_optab)
1584 otheroptab = vlshr_optab;
1585 else if (binoptab == rotl_optab)
1586 otheroptab = vrotl_optab;
1587 else if (binoptab == rotr_optab)
1588 otheroptab = vrotr_optab;
1590 if (otheroptab && optab_handler (otheroptab, mode) != CODE_FOR_nothing)
1592 rtx vop1 = expand_vector_broadcast (mode, op1);
1595 temp = expand_binop_directly (mode, otheroptab, op0, vop1,
1596 target, unsignedp, methods, last);
1603 /* Certain vector operations can be implemented with vector permutation. */
1604 if (VECTOR_MODE_P (mode))
1606 enum tree_code tcode = ERROR_MARK;
1609 if (binoptab == vec_extract_even_optab)
1610 tcode = VEC_EXTRACT_EVEN_EXPR;
1611 else if (binoptab == vec_extract_odd_optab)
1612 tcode = VEC_EXTRACT_ODD_EXPR;
1614 if (tcode != ERROR_MARK
1615 && can_vec_perm_for_code_p (tcode, mode, &sel))
1617 temp = expand_vec_perm (mode, op0, op1, sel, target);
1618 gcc_assert (temp != NULL);
1623 /* Look for a wider mode of the same class for which we think we
1624 can open-code the operation. Check for a widening multiply at the
1625 wider mode as well. */
1627 if (CLASS_HAS_WIDER_MODES_P (mclass)
1628 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1629 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1630 wider_mode != VOIDmode;
1631 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1633 if (optab_handler (binoptab, wider_mode) != CODE_FOR_nothing
1634 || (binoptab == smul_optab
1635 && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
1636 && (find_widening_optab_handler ((unsignedp
1638 : smul_widen_optab),
1639 GET_MODE_WIDER_MODE (wider_mode),
1641 != CODE_FOR_nothing)))
1643 rtx xop0 = op0, xop1 = op1;
1646 /* For certain integer operations, we need not actually extend
1647 the narrow operands, as long as we will truncate
1648 the results to the same narrowness. */
1650 if ((binoptab == ior_optab || binoptab == and_optab
1651 || binoptab == xor_optab
1652 || binoptab == add_optab || binoptab == sub_optab
1653 || binoptab == smul_optab || binoptab == ashl_optab)
1654 && mclass == MODE_INT)
1657 xop0 = avoid_expensive_constant (mode, binoptab, 0,
1659 if (binoptab != ashl_optab)
1660 xop1 = avoid_expensive_constant (mode, binoptab, 1,
1664 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1666 /* The second operand of a shift must always be extended. */
1667 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1668 no_extend && binoptab != ashl_optab);
1670 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1671 unsignedp, OPTAB_DIRECT);
1674 if (mclass != MODE_INT
1675 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
1678 target = gen_reg_rtx (mode);
1679 convert_move (target, temp, 0);
1683 return gen_lowpart (mode, temp);
1686 delete_insns_since (last);
1690 /* If operation is commutative,
1691 try to make the first operand a register.
1692 Even better, try to make it the same as the target.
1693 Also try to make the last operand a constant. */
1694 if (commutative_optab_p (binoptab)
1695 && swap_commutative_operands_with_target (target, op0, op1))
1702 /* These can be done a word at a time. */
1703 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1704 && mclass == MODE_INT
1705 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
1706 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing)
1711 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1712 won't be accurate, so use a new target. */
1716 || !valid_multiword_target_p (target))
1717 target = gen_reg_rtx (mode);
1721 /* Do the actual arithmetic. */
1722 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
1724 rtx target_piece = operand_subword (target, i, 1, mode);
1725 rtx x = expand_binop (word_mode, binoptab,
1726 operand_subword_force (op0, i, mode),
1727 operand_subword_force (op1, i, mode),
1728 target_piece, unsignedp, next_methods);
1733 if (target_piece != x)
1734 emit_move_insn (target_piece, x);
1737 insns = get_insns ();
1740 if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
1747 /* Synthesize double word shifts from single word shifts. */
1748 if ((binoptab == lshr_optab || binoptab == ashl_optab
1749 || binoptab == ashr_optab)
1750 && mclass == MODE_INT
1751 && (CONST_INT_P (op1) || optimize_insn_for_speed_p ())
1752 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1753 && GET_MODE_PRECISION (mode) == GET_MODE_BITSIZE (mode)
1754 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing
1755 && optab_handler (ashl_optab, word_mode) != CODE_FOR_nothing
1756 && optab_handler (lshr_optab, word_mode) != CODE_FOR_nothing)
1758 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1759 enum machine_mode op1_mode;
1761 double_shift_mask = targetm.shift_truncation_mask (mode);
1762 shift_mask = targetm.shift_truncation_mask (word_mode);
1763 op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode;
1765 /* Apply the truncation to constant shifts. */
1766 if (double_shift_mask > 0 && CONST_INT_P (op1))
1767 op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
1769 if (op1 == CONST0_RTX (op1_mode))
1772 /* Make sure that this is a combination that expand_doubleword_shift
1773 can handle. See the comments there for details. */
1774 if (double_shift_mask == 0
1775 || (shift_mask == BITS_PER_WORD - 1
1776 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1779 rtx into_target, outof_target;
1780 rtx into_input, outof_input;
1781 int left_shift, outof_word;
1783 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1784 won't be accurate, so use a new target. */
1788 || !valid_multiword_target_p (target))
1789 target = gen_reg_rtx (mode);
1793 /* OUTOF_* is the word we are shifting bits away from, and
1794 INTO_* is the word that we are shifting bits towards, thus
1795 they differ depending on the direction of the shift and
1796 WORDS_BIG_ENDIAN. */
1798 left_shift = binoptab == ashl_optab;
1799 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1801 outof_target = operand_subword (target, outof_word, 1, mode);
1802 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1804 outof_input = operand_subword_force (op0, outof_word, mode);
1805 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1807 if (expand_doubleword_shift (op1_mode, binoptab,
1808 outof_input, into_input, op1,
1809 outof_target, into_target,
1810 unsignedp, next_methods, shift_mask))
1812 insns = get_insns ();
1822 /* Synthesize double word rotates from single word shifts. */
1823 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1824 && mclass == MODE_INT
1825 && CONST_INT_P (op1)
1826 && GET_MODE_PRECISION (mode) == 2 * BITS_PER_WORD
1827 && optab_handler (ashl_optab, word_mode) != CODE_FOR_nothing
1828 && optab_handler (lshr_optab, word_mode) != CODE_FOR_nothing)
1831 rtx into_target, outof_target;
1832 rtx into_input, outof_input;
1834 int shift_count, left_shift, outof_word;
1836 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1837 won't be accurate, so use a new target. Do this also if target is not
1838 a REG, first because having a register instead may open optimization
1839 opportunities, and second because if target and op0 happen to be MEMs
1840 designating the same location, we would risk clobbering it too early
1841 in the code sequence we generate below. */
1846 || !valid_multiword_target_p (target))
1847 target = gen_reg_rtx (mode);
1851 shift_count = INTVAL (op1);
1853 /* OUTOF_* is the word we are shifting bits away from, and
1854 INTO_* is the word that we are shifting bits towards, thus
1855 they differ depending on the direction of the shift and
1856 WORDS_BIG_ENDIAN. */
1858 left_shift = (binoptab == rotl_optab);
1859 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1861 outof_target = operand_subword (target, outof_word, 1, mode);
1862 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1864 outof_input = operand_subword_force (op0, outof_word, mode);
1865 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1867 if (shift_count == BITS_PER_WORD)
1869 /* This is just a word swap. */
1870 emit_move_insn (outof_target, into_input);
1871 emit_move_insn (into_target, outof_input);
1876 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1877 rtx first_shift_count, second_shift_count;
1878 optab reverse_unsigned_shift, unsigned_shift;
1880 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1881 ? lshr_optab : ashl_optab);
1883 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1884 ? ashl_optab : lshr_optab);
1886 if (shift_count > BITS_PER_WORD)
1888 first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1889 second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1893 first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1894 second_shift_count = GEN_INT (shift_count);
1897 into_temp1 = expand_binop (word_mode, unsigned_shift,
1898 outof_input, first_shift_count,
1899 NULL_RTX, unsignedp, next_methods);
1900 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1901 into_input, second_shift_count,
1902 NULL_RTX, unsignedp, next_methods);
1904 if (into_temp1 != 0 && into_temp2 != 0)
1905 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1906 into_target, unsignedp, next_methods);
1910 if (inter != 0 && inter != into_target)
1911 emit_move_insn (into_target, inter);
1913 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1914 into_input, first_shift_count,
1915 NULL_RTX, unsignedp, next_methods);
1916 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1917 outof_input, second_shift_count,
1918 NULL_RTX, unsignedp, next_methods);
1920 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1921 inter = expand_binop (word_mode, ior_optab,
1922 outof_temp1, outof_temp2,
1923 outof_target, unsignedp, next_methods);
1925 if (inter != 0 && inter != outof_target)
1926 emit_move_insn (outof_target, inter);
1929 insns = get_insns ();
1939 /* These can be done a word at a time by propagating carries. */
1940 if ((binoptab == add_optab || binoptab == sub_optab)
1941 && mclass == MODE_INT
1942 && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
1943 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing)
1946 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1947 const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
1948 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1949 rtx xop0, xop1, xtarget;
1951 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1952 value is one of those, use it. Otherwise, use 1 since it is the
1953 one easiest to get. */
1954 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1955 int normalizep = STORE_FLAG_VALUE;
1960 /* Prepare the operands. */
1961 xop0 = force_reg (mode, op0);
1962 xop1 = force_reg (mode, op1);
1964 xtarget = gen_reg_rtx (mode);
1966 if (target == 0 || !REG_P (target) || !valid_multiword_target_p (target))
1969 /* Indicate for flow that the entire target reg is being set. */
1971 emit_clobber (xtarget);
1973 /* Do the actual arithmetic. */
1974 for (i = 0; i < nwords; i++)
1976 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1977 rtx target_piece = operand_subword (xtarget, index, 1, mode);
1978 rtx op0_piece = operand_subword_force (xop0, index, mode);
1979 rtx op1_piece = operand_subword_force (xop1, index, mode);
1982 /* Main add/subtract of the input operands. */
1983 x = expand_binop (word_mode, binoptab,
1984 op0_piece, op1_piece,
1985 target_piece, unsignedp, next_methods);
1991 /* Store carry from main add/subtract. */
1992 carry_out = gen_reg_rtx (word_mode);
1993 carry_out = emit_store_flag_force (carry_out,
1994 (binoptab == add_optab
1997 word_mode, 1, normalizep);
2004 /* Add/subtract previous carry to main result. */
2005 newx = expand_binop (word_mode,
2006 normalizep == 1 ? binoptab : otheroptab,
2008 NULL_RTX, 1, next_methods);
2012 /* Get out carry from adding/subtracting carry in. */
2013 rtx carry_tmp = gen_reg_rtx (word_mode);
2014 carry_tmp = emit_store_flag_force (carry_tmp,
2015 (binoptab == add_optab
2018 word_mode, 1, normalizep);
2020 /* Logical-ior the two poss. carry together. */
2021 carry_out = expand_binop (word_mode, ior_optab,
2022 carry_out, carry_tmp,
2023 carry_out, 0, next_methods);
2027 emit_move_insn (target_piece, newx);
2031 if (x != target_piece)
2032 emit_move_insn (target_piece, x);
2035 carry_in = carry_out;
2038 if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
2040 if (optab_handler (mov_optab, mode) != CODE_FOR_nothing
2041 || ! rtx_equal_p (target, xtarget))
2043 rtx temp = emit_move_insn (target, xtarget);
2045 set_dst_reg_note (temp, REG_EQUAL,
2046 gen_rtx_fmt_ee (binoptab->code, mode,
2058 delete_insns_since (last);
2061 /* Attempt to synthesize double word multiplies using a sequence of word
2062 mode multiplications. We first attempt to generate a sequence using a
2063 more efficient unsigned widening multiply, and if that fails we then
2064 try using a signed widening multiply. */
2066 if (binoptab == smul_optab
2067 && mclass == MODE_INT
2068 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
2069 && optab_handler (smul_optab, word_mode) != CODE_FOR_nothing
2070 && optab_handler (add_optab, word_mode) != CODE_FOR_nothing)
2072 rtx product = NULL_RTX;
2073 if (widening_optab_handler (umul_widen_optab, mode, word_mode)
2074 != CODE_FOR_nothing)
2076 product = expand_doubleword_mult (mode, op0, op1, target,
2079 delete_insns_since (last);
2082 if (product == NULL_RTX
2083 && widening_optab_handler (smul_widen_optab, mode, word_mode)
2084 != CODE_FOR_nothing)
2086 product = expand_doubleword_mult (mode, op0, op1, target,
2089 delete_insns_since (last);
2092 if (product != NULL_RTX)
2094 if (optab_handler (mov_optab, mode) != CODE_FOR_nothing)
2096 temp = emit_move_insn (target ? target : product, product);
2097 set_dst_reg_note (temp,
2099 gen_rtx_fmt_ee (MULT, mode,
2102 target ? target : product);
2108 /* It can't be open-coded in this mode.
2109 Use a library call if one is available and caller says that's ok. */
2111 libfunc = optab_libfunc (binoptab, mode);
2113 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
2117 enum machine_mode op1_mode = mode;
2122 if (shift_optab_p (binoptab))
2124 op1_mode = targetm.libgcc_shift_count_mode ();
2125 /* Specify unsigned here,
2126 since negative shift counts are meaningless. */
2127 op1x = convert_to_mode (op1_mode, op1, 1);
2130 if (GET_MODE (op0) != VOIDmode
2131 && GET_MODE (op0) != mode)
2132 op0 = convert_to_mode (mode, op0, unsignedp);
2134 /* Pass 1 for NO_QUEUE so we don't lose any increments
2135 if the libcall is cse'd or moved. */
2136 value = emit_library_call_value (libfunc,
2137 NULL_RTX, LCT_CONST, mode, 2,
2138 op0, mode, op1x, op1_mode);
2140 insns = get_insns ();
2143 target = gen_reg_rtx (mode);
2144 emit_libcall_block (insns, target, value,
2145 gen_rtx_fmt_ee (binoptab->code, mode, op0, op1));
2150 delete_insns_since (last);
2152 /* It can't be done in this mode. Can we do it in a wider mode? */
2154 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
2155 || methods == OPTAB_MUST_WIDEN))
2157 /* Caller says, don't even try. */
2158 delete_insns_since (entry_last);
2162 /* Compute the value of METHODS to pass to recursive calls.
2163 Don't allow widening to be tried recursively. */
2165 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
2167 /* Look for a wider mode of the same class for which it appears we can do
2170 if (CLASS_HAS_WIDER_MODES_P (mclass))
2172 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2173 wider_mode != VOIDmode;
2174 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2176 if (find_widening_optab_handler (binoptab, wider_mode, mode, 1)
2178 || (methods == OPTAB_LIB
2179 && optab_libfunc (binoptab, wider_mode)))
2181 rtx xop0 = op0, xop1 = op1;
2184 /* For certain integer operations, we need not actually extend
2185 the narrow operands, as long as we will truncate
2186 the results to the same narrowness. */
2188 if ((binoptab == ior_optab || binoptab == and_optab
2189 || binoptab == xor_optab
2190 || binoptab == add_optab || binoptab == sub_optab
2191 || binoptab == smul_optab || binoptab == ashl_optab)
2192 && mclass == MODE_INT)
2195 xop0 = widen_operand (xop0, wider_mode, mode,
2196 unsignedp, no_extend);
2198 /* The second operand of a shift must always be extended. */
2199 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
2200 no_extend && binoptab != ashl_optab);
2202 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
2203 unsignedp, methods);
2206 if (mclass != MODE_INT
2207 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
2210 target = gen_reg_rtx (mode);
2211 convert_move (target, temp, 0);
2215 return gen_lowpart (mode, temp);
2218 delete_insns_since (last);
2223 delete_insns_since (entry_last);
2227 /* Expand a binary operator which has both signed and unsigned forms.
2228 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2231 If we widen unsigned operands, we may use a signed wider operation instead
2232 of an unsigned wider operation, since the result would be the same. */
2235 sign_expand_binop (enum machine_mode mode, optab uoptab, optab soptab,
2236 rtx op0, rtx op1, rtx target, int unsignedp,
2237 enum optab_methods methods)
2240 optab direct_optab = unsignedp ? uoptab : soptab;
2241 struct optab_d wide_soptab;
2243 /* Do it without widening, if possible. */
2244 temp = expand_binop (mode, direct_optab, op0, op1, target,
2245 unsignedp, OPTAB_DIRECT);
2246 if (temp || methods == OPTAB_DIRECT)
2249 /* Try widening to a signed int. Make a fake signed optab that
2250 hides any signed insn for direct use. */
2251 wide_soptab = *soptab;
2252 set_optab_handler (&wide_soptab, mode, CODE_FOR_nothing);
2253 /* We don't want to generate new hash table entries from this fake
2255 wide_soptab.libcall_gen = NULL;
2257 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2258 unsignedp, OPTAB_WIDEN);
2260 /* For unsigned operands, try widening to an unsigned int. */
2261 if (temp == 0 && unsignedp)
2262 temp = expand_binop (mode, uoptab, op0, op1, target,
2263 unsignedp, OPTAB_WIDEN);
2264 if (temp || methods == OPTAB_WIDEN)
2267 /* Use the right width libcall if that exists. */
2268 temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_LIB);
2269 if (temp || methods == OPTAB_LIB)
2272 /* Must widen and use a libcall, use either signed or unsigned. */
2273 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2274 unsignedp, methods);
2278 return expand_binop (mode, uoptab, op0, op1, target,
2279 unsignedp, methods);
2283 /* Generate code to perform an operation specified by UNOPPTAB
2284 on operand OP0, with two results to TARG0 and TARG1.
2285 We assume that the order of the operands for the instruction
2286 is TARG0, TARG1, OP0.
2288 Either TARG0 or TARG1 may be zero, but what that means is that
2289 the result is not actually wanted. We will generate it into
2290 a dummy pseudo-reg and discard it. They may not both be zero.
2292 Returns 1 if this operation can be performed; 0 if not. */
2295 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
2298 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2299 enum mode_class mclass;
2300 enum machine_mode wider_mode;
2301 rtx entry_last = get_last_insn ();
2304 mclass = GET_MODE_CLASS (mode);
2307 targ0 = gen_reg_rtx (mode);
2309 targ1 = gen_reg_rtx (mode);
2311 /* Record where to go back to if we fail. */
2312 last = get_last_insn ();
2314 if (optab_handler (unoptab, mode) != CODE_FOR_nothing)
2316 struct expand_operand ops[3];
2317 enum insn_code icode = optab_handler (unoptab, mode);
2319 create_fixed_operand (&ops[0], targ0);
2320 create_fixed_operand (&ops[1], targ1);
2321 create_convert_operand_from (&ops[2], op0, mode, unsignedp);
2322 if (maybe_expand_insn (icode, 3, ops))
2326 /* It can't be done in this mode. Can we do it in a wider mode? */
2328 if (CLASS_HAS_WIDER_MODES_P (mclass))
2330 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2331 wider_mode != VOIDmode;
2332 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2334 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
2336 rtx t0 = gen_reg_rtx (wider_mode);
2337 rtx t1 = gen_reg_rtx (wider_mode);
2338 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2340 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
2342 convert_move (targ0, t0, unsignedp);
2343 convert_move (targ1, t1, unsignedp);
2347 delete_insns_since (last);
2352 delete_insns_since (entry_last);
2356 /* Generate code to perform an operation specified by BINOPTAB
2357 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2358 We assume that the order of the operands for the instruction
2359 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2360 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2362 Either TARG0 or TARG1 may be zero, but what that means is that
2363 the result is not actually wanted. We will generate it into
2364 a dummy pseudo-reg and discard it. They may not both be zero.
2366 Returns 1 if this operation can be performed; 0 if not. */
2369 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
2372 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2373 enum mode_class mclass;
2374 enum machine_mode wider_mode;
2375 rtx entry_last = get_last_insn ();
2378 mclass = GET_MODE_CLASS (mode);
2381 targ0 = gen_reg_rtx (mode);
2383 targ1 = gen_reg_rtx (mode);
2385 /* Record where to go back to if we fail. */
2386 last = get_last_insn ();
2388 if (optab_handler (binoptab, mode) != CODE_FOR_nothing)
2390 struct expand_operand ops[4];
2391 enum insn_code icode = optab_handler (binoptab, mode);
2392 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2393 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
2394 rtx xop0 = op0, xop1 = op1;
2396 /* If we are optimizing, force expensive constants into a register. */
2397 xop0 = avoid_expensive_constant (mode0, binoptab, 0, xop0, unsignedp);
2398 xop1 = avoid_expensive_constant (mode1, binoptab, 1, xop1, unsignedp);
2400 create_fixed_operand (&ops[0], targ0);
2401 create_convert_operand_from (&ops[1], op0, mode, unsignedp);
2402 create_convert_operand_from (&ops[2], op1, mode, unsignedp);
2403 create_fixed_operand (&ops[3], targ1);
2404 if (maybe_expand_insn (icode, 4, ops))
2406 delete_insns_since (last);
2409 /* It can't be done in this mode. Can we do it in a wider mode? */
2411 if (CLASS_HAS_WIDER_MODES_P (mclass))
2413 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2414 wider_mode != VOIDmode;
2415 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2417 if (optab_handler (binoptab, wider_mode) != CODE_FOR_nothing)
2419 rtx t0 = gen_reg_rtx (wider_mode);
2420 rtx t1 = gen_reg_rtx (wider_mode);
2421 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2422 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2424 if (expand_twoval_binop (binoptab, cop0, cop1,
2427 convert_move (targ0, t0, unsignedp);
2428 convert_move (targ1, t1, unsignedp);
2432 delete_insns_since (last);
2437 delete_insns_since (entry_last);
2441 /* Expand the two-valued library call indicated by BINOPTAB, but
2442 preserve only one of the values. If TARG0 is non-NULL, the first
2443 value is placed into TARG0; otherwise the second value is placed
2444 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2445 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2446 This routine assumes that the value returned by the library call is
2447 as if the return value was of an integral mode twice as wide as the
2448 mode of OP0. Returns 1 if the call was successful. */
2451 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2452 rtx targ0, rtx targ1, enum rtx_code code)
2454 enum machine_mode mode;
2455 enum machine_mode libval_mode;
2460 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2461 gcc_assert (!targ0 != !targ1);
2463 mode = GET_MODE (op0);
2464 libfunc = optab_libfunc (binoptab, mode);
2468 /* The value returned by the library function will have twice as
2469 many bits as the nominal MODE. */
2470 libval_mode = smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode),
2473 libval = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
2477 /* Get the part of VAL containing the value that we want. */
2478 libval = simplify_gen_subreg (mode, libval, libval_mode,
2479 targ0 ? 0 : GET_MODE_SIZE (mode));
2480 insns = get_insns ();
2482 /* Move the into the desired location. */
2483 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2484 gen_rtx_fmt_ee (code, mode, op0, op1));
2490 /* Wrapper around expand_unop which takes an rtx code to specify
2491 the operation to perform, not an optab pointer. All other
2492 arguments are the same. */
2494 expand_simple_unop (enum machine_mode mode, enum rtx_code code, rtx op0,
2495 rtx target, int unsignedp)
2497 optab unop = code_to_optab[(int) code];
2500 return expand_unop (mode, unop, op0, target, unsignedp);
2506 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)).
2508 A similar operation can be used for clrsb. UNOPTAB says which operation
2509 we are trying to expand. */
2511 widen_leading (enum machine_mode mode, rtx op0, rtx target, optab unoptab)
2513 enum mode_class mclass = GET_MODE_CLASS (mode);
2514 if (CLASS_HAS_WIDER_MODES_P (mclass))
2516 enum machine_mode wider_mode;
2517 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2518 wider_mode != VOIDmode;
2519 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2521 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
2523 rtx xop0, temp, last;
2525 last = get_last_insn ();
2528 target = gen_reg_rtx (mode);
2529 xop0 = widen_operand (op0, wider_mode, mode,
2530 unoptab != clrsb_optab, false);
2531 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2532 unoptab != clrsb_optab);
2534 temp = expand_binop (wider_mode, sub_optab, temp,
2535 GEN_INT (GET_MODE_PRECISION (wider_mode)
2536 - GET_MODE_PRECISION (mode)),
2537 target, true, OPTAB_DIRECT);
2539 delete_insns_since (last);
2548 /* Try calculating clz of a double-word quantity as two clz's of word-sized
2549 quantities, choosing which based on whether the high word is nonzero. */
2551 expand_doubleword_clz (enum machine_mode mode, rtx op0, rtx target)
2553 rtx xop0 = force_reg (mode, op0);
2554 rtx subhi = gen_highpart (word_mode, xop0);
2555 rtx sublo = gen_lowpart (word_mode, xop0);
2556 rtx hi0_label = gen_label_rtx ();
2557 rtx after_label = gen_label_rtx ();
2558 rtx seq, temp, result;
2560 /* If we were not given a target, use a word_mode register, not a
2561 'mode' register. The result will fit, and nobody is expecting
2562 anything bigger (the return type of __builtin_clz* is int). */
2564 target = gen_reg_rtx (word_mode);
2566 /* In any case, write to a word_mode scratch in both branches of the
2567 conditional, so we can ensure there is a single move insn setting
2568 'target' to tag a REG_EQUAL note on. */
2569 result = gen_reg_rtx (word_mode);
2573 /* If the high word is not equal to zero,
2574 then clz of the full value is clz of the high word. */
2575 emit_cmp_and_jump_insns (subhi, CONST0_RTX (word_mode), EQ, 0,
2576 word_mode, true, hi0_label);
2578 temp = expand_unop_direct (word_mode, clz_optab, subhi, result, true);
2583 convert_move (result, temp, true);
2585 emit_jump_insn (gen_jump (after_label));
2588 /* Else clz of the full value is clz of the low word plus the number
2589 of bits in the high word. */
2590 emit_label (hi0_label);
2592 temp = expand_unop_direct (word_mode, clz_optab, sublo, 0, true);
2595 temp = expand_binop (word_mode, add_optab, temp,
2596 GEN_INT (GET_MODE_BITSIZE (word_mode)),
2597 result, true, OPTAB_DIRECT);
2601 convert_move (result, temp, true);
2603 emit_label (after_label);
2604 convert_move (target, result, true);
2609 add_equal_note (seq, target, CLZ, xop0, 0);
2621 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2623 widen_bswap (enum machine_mode mode, rtx op0, rtx target)
2625 enum mode_class mclass = GET_MODE_CLASS (mode);
2626 enum machine_mode wider_mode;
2629 if (!CLASS_HAS_WIDER_MODES_P (mclass))
2632 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2633 wider_mode != VOIDmode;
2634 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2635 if (optab_handler (bswap_optab, wider_mode) != CODE_FOR_nothing)
2640 last = get_last_insn ();
2642 x = widen_operand (op0, wider_mode, mode, true, true);
2643 x = expand_unop (wider_mode, bswap_optab, x, NULL_RTX, true);
2645 gcc_assert (GET_MODE_PRECISION (wider_mode) == GET_MODE_BITSIZE (wider_mode)
2646 && GET_MODE_PRECISION (mode) == GET_MODE_BITSIZE (mode));
2648 x = expand_shift (RSHIFT_EXPR, wider_mode, x,
2649 GET_MODE_BITSIZE (wider_mode)
2650 - GET_MODE_BITSIZE (mode),
2656 target = gen_reg_rtx (mode);
2657 emit_move_insn (target, gen_lowpart (mode, x));
2660 delete_insns_since (last);
2665 /* Try calculating bswap as two bswaps of two word-sized operands. */
2668 expand_doubleword_bswap (enum machine_mode mode, rtx op, rtx target)
2672 t1 = expand_unop (word_mode, bswap_optab,
2673 operand_subword_force (op, 0, mode), NULL_RTX, true);
2674 t0 = expand_unop (word_mode, bswap_optab,
2675 operand_subword_force (op, 1, mode), NULL_RTX, true);
2677 if (target == 0 || !valid_multiword_target_p (target))
2678 target = gen_reg_rtx (mode);
2680 emit_clobber (target);
2681 emit_move_insn (operand_subword (target, 0, 1, mode), t0);
2682 emit_move_insn (operand_subword (target, 1, 1, mode), t1);
2687 /* Try calculating (parity x) as (and (popcount x) 1), where
2688 popcount can also be done in a wider mode. */
2690 expand_parity (enum machine_mode mode, rtx op0, rtx target)
2692 enum mode_class mclass = GET_MODE_CLASS (mode);
2693 if (CLASS_HAS_WIDER_MODES_P (mclass))
2695 enum machine_mode wider_mode;
2696 for (wider_mode = mode; wider_mode != VOIDmode;
2697 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2699 if (optab_handler (popcount_optab, wider_mode) != CODE_FOR_nothing)
2701 rtx xop0, temp, last;
2703 last = get_last_insn ();
2706 target = gen_reg_rtx (mode);
2707 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2708 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2711 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2712 target, true, OPTAB_DIRECT);
2714 delete_insns_since (last);
2723 /* Try calculating ctz(x) as K - clz(x & -x) ,
2724 where K is GET_MODE_PRECISION(mode) - 1.
2726 Both __builtin_ctz and __builtin_clz are undefined at zero, so we
2727 don't have to worry about what the hardware does in that case. (If
2728 the clz instruction produces the usual value at 0, which is K, the
2729 result of this code sequence will be -1; expand_ffs, below, relies
2730 on this. It might be nice to have it be K instead, for consistency
2731 with the (very few) processors that provide a ctz with a defined
2732 value, but that would take one more instruction, and it would be
2733 less convenient for expand_ffs anyway. */
2736 expand_ctz (enum machine_mode mode, rtx op0, rtx target)
2740 if (optab_handler (clz_optab, mode) == CODE_FOR_nothing)
2745 temp = expand_unop_direct (mode, neg_optab, op0, NULL_RTX, true);
2747 temp = expand_binop (mode, and_optab, op0, temp, NULL_RTX,
2748 true, OPTAB_DIRECT);
2750 temp = expand_unop_direct (mode, clz_optab, temp, NULL_RTX, true);
2752 temp = expand_binop (mode, sub_optab, GEN_INT (GET_MODE_PRECISION (mode) - 1),
2754 true, OPTAB_DIRECT);
2764 add_equal_note (seq, temp, CTZ, op0, 0);
2770 /* Try calculating ffs(x) using ctz(x) if we have that instruction, or
2771 else with the sequence used by expand_clz.
2773 The ffs builtin promises to return zero for a zero value and ctz/clz
2774 may have an undefined value in that case. If they do not give us a
2775 convenient value, we have to generate a test and branch. */
2777 expand_ffs (enum machine_mode mode, rtx op0, rtx target)
2779 HOST_WIDE_INT val = 0;
2780 bool defined_at_zero = false;
2783 if (optab_handler (ctz_optab, mode) != CODE_FOR_nothing)
2787 temp = expand_unop_direct (mode, ctz_optab, op0, 0, true);
2791 defined_at_zero = (CTZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2);
2793 else if (optab_handler (clz_optab, mode) != CODE_FOR_nothing)
2796 temp = expand_ctz (mode, op0, 0);
2800 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2)
2802 defined_at_zero = true;
2803 val = (GET_MODE_PRECISION (mode) - 1) - val;
2809 if (defined_at_zero && val == -1)
2810 /* No correction needed at zero. */;
2813 /* We don't try to do anything clever with the situation found
2814 on some processors (eg Alpha) where ctz(0:mode) ==
2815 bitsize(mode). If someone can think of a way to send N to -1
2816 and leave alone all values in the range 0..N-1 (where N is a
2817 power of two), cheaper than this test-and-branch, please add it.
2819 The test-and-branch is done after the operation itself, in case
2820 the operation sets condition codes that can be recycled for this.
2821 (This is true on i386, for instance.) */
2823 rtx nonzero_label = gen_label_rtx ();
2824 emit_cmp_and_jump_insns (op0, CONST0_RTX (mode), NE, 0,
2825 mode, true, nonzero_label);
2827 convert_move (temp, GEN_INT (-1), false);
2828 emit_label (nonzero_label);
2831 /* temp now has a value in the range -1..bitsize-1. ffs is supposed
2832 to produce a value in the range 0..bitsize. */
2833 temp = expand_binop (mode, add_optab, temp, GEN_INT (1),
2834 target, false, OPTAB_DIRECT);
2841 add_equal_note (seq, temp, FFS, op0, 0);
2850 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2851 conditions, VAL may already be a SUBREG against which we cannot generate
2852 a further SUBREG. In this case, we expect forcing the value into a
2853 register will work around the situation. */
2856 lowpart_subreg_maybe_copy (enum machine_mode omode, rtx val,
2857 enum machine_mode imode)
2860 ret = lowpart_subreg (omode, val, imode);
2863 val = force_reg (imode, val);
2864 ret = lowpart_subreg (omode, val, imode);
2865 gcc_assert (ret != NULL);
2870 /* Expand a floating point absolute value or negation operation via a
2871 logical operation on the sign bit. */
2874 expand_absneg_bit (enum rtx_code code, enum machine_mode mode,
2875 rtx op0, rtx target)
2877 const struct real_format *fmt;
2878 int bitpos, word, nwords, i;
2879 enum machine_mode imode;
2883 /* The format has to have a simple sign bit. */
2884 fmt = REAL_MODE_FORMAT (mode);
2888 bitpos = fmt->signbit_rw;
2892 /* Don't create negative zeros if the format doesn't support them. */
2893 if (code == NEG && !fmt->has_signed_zero)
2896 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2898 imode = int_mode_for_mode (mode);
2899 if (imode == BLKmode)
2908 if (FLOAT_WORDS_BIG_ENDIAN)
2909 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2911 word = bitpos / BITS_PER_WORD;
2912 bitpos = bitpos % BITS_PER_WORD;
2913 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2916 mask = double_int_setbit (double_int_zero, bitpos);
2918 mask = double_int_not (mask);
2922 || (nwords > 1 && !valid_multiword_target_p (target)))
2923 target = gen_reg_rtx (mode);
2929 for (i = 0; i < nwords; ++i)
2931 rtx targ_piece = operand_subword (target, i, 1, mode);
2932 rtx op0_piece = operand_subword_force (op0, i, mode);
2936 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2938 immed_double_int_const (mask, imode),
2939 targ_piece, 1, OPTAB_LIB_WIDEN);
2940 if (temp != targ_piece)
2941 emit_move_insn (targ_piece, temp);
2944 emit_move_insn (targ_piece, op0_piece);
2947 insns = get_insns ();
2954 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2955 gen_lowpart (imode, op0),
2956 immed_double_int_const (mask, imode),
2957 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2958 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2960 set_dst_reg_note (get_last_insn (), REG_EQUAL,
2961 gen_rtx_fmt_e (code, mode, copy_rtx (op0)),
2968 /* As expand_unop, but will fail rather than attempt the operation in a
2969 different mode or with a libcall. */
2971 expand_unop_direct (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
2974 if (optab_handler (unoptab, mode) != CODE_FOR_nothing)
2976 struct expand_operand ops[2];
2977 enum insn_code icode = optab_handler (unoptab, mode);
2978 rtx last = get_last_insn ();
2981 create_output_operand (&ops[0], target, mode);
2982 create_convert_operand_from (&ops[1], op0, mode, unsignedp);
2983 pat = maybe_gen_insn (icode, 2, ops);
2986 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
2987 && ! add_equal_note (pat, ops[0].value, unoptab->code,
2988 ops[1].value, NULL_RTX))
2990 delete_insns_since (last);
2991 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
2996 return ops[0].value;
3002 /* Generate code to perform an operation specified by UNOPTAB
3003 on operand OP0, with result having machine-mode MODE.
3005 UNSIGNEDP is for the case where we have to widen the operands
3006 to perform the operation. It says to use zero-extension.
3008 If TARGET is nonzero, the value
3009 is generated there, if it is convenient to do so.
3010 In all cases an rtx is returned for the locus of the value;
3011 this may or may not be TARGET. */
3014 expand_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
3017 enum mode_class mclass = GET_MODE_CLASS (mode);
3018 enum machine_mode wider_mode;
3022 temp = expand_unop_direct (mode, unoptab, op0, target, unsignedp);
3026 /* It can't be done in this mode. Can we open-code it in a wider mode? */
3028 /* Widening (or narrowing) clz needs special treatment. */
3029 if (unoptab == clz_optab)
3031 temp = widen_leading (mode, op0, target, unoptab);
3035 if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
3036 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
3038 temp = expand_doubleword_clz (mode, op0, target);
3046 if (unoptab == clrsb_optab)
3048 temp = widen_leading (mode, op0, target, unoptab);
3054 /* Widening (or narrowing) bswap needs special treatment. */
3055 if (unoptab == bswap_optab)
3057 temp = widen_bswap (mode, op0, target);
3061 if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
3062 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
3064 temp = expand_doubleword_bswap (mode, op0, target);
3072 if (CLASS_HAS_WIDER_MODES_P (mclass))
3073 for (wider_mode = GET_MODE_WIDER_MODE (mode);
3074 wider_mode != VOIDmode;
3075 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
3077 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
3080 rtx last = get_last_insn ();
3082 /* For certain operations, we need not actually extend
3083 the narrow operand, as long as we will truncate the
3084 results to the same narrowness. */
3086 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
3087 (unoptab == neg_optab
3088 || unoptab == one_cmpl_optab)
3089 && mclass == MODE_INT);
3091 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
3096 if (mclass != MODE_INT
3097 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
3100 target = gen_reg_rtx (mode);
3101 convert_move (target, temp, 0);
3105 return gen_lowpart (mode, temp);
3108 delete_insns_since (last);
3112 /* These can be done a word at a time. */
3113 if (unoptab == one_cmpl_optab
3114 && mclass == MODE_INT
3115 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
3116 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
3121 if (target == 0 || target == op0 || !valid_multiword_target_p (target))
3122 target = gen_reg_rtx (mode);
3126 /* Do the actual arithmetic. */
3127 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
3129 rtx target_piece = operand_subword (target, i, 1, mode);
3130 rtx x = expand_unop (word_mode, unoptab,
3131 operand_subword_force (op0, i, mode),
3132 target_piece, unsignedp);
3134 if (target_piece != x)
3135 emit_move_insn (target_piece, x);
3138 insns = get_insns ();
3145 if (unoptab->code == NEG)
3147 /* Try negating floating point values by flipping the sign bit. */
3148 if (SCALAR_FLOAT_MODE_P (mode))
3150 temp = expand_absneg_bit (NEG, mode, op0, target);
3155 /* If there is no negation pattern, and we have no negative zero,
3156 try subtracting from zero. */
3157 if (!HONOR_SIGNED_ZEROS (mode))
3159 temp = expand_binop (mode, (unoptab == negv_optab
3160 ? subv_optab : sub_optab),
3161 CONST0_RTX (mode), op0, target,
3162 unsignedp, OPTAB_DIRECT);
3168 /* Try calculating parity (x) as popcount (x) % 2. */
3169 if (unoptab == parity_optab)
3171 temp = expand_parity (mode, op0, target);
3176 /* Try implementing ffs (x) in terms of clz (x). */
3177 if (unoptab == ffs_optab)
3179 temp = expand_ffs (mode, op0, target);
3184 /* Try implementing ctz (x) in terms of clz (x). */
3185 if (unoptab == ctz_optab)
3187 temp = expand_ctz (mode, op0, target);
3193 /* Now try a library call in this mode. */
3194 libfunc = optab_libfunc (unoptab, mode);
3200 enum machine_mode outmode = mode;
3202 /* All of these functions return small values. Thus we choose to
3203 have them return something that isn't a double-word. */
3204 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
3205 || unoptab == clrsb_optab || unoptab == popcount_optab
3206 || unoptab == parity_optab)
3208 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node),
3209 optab_libfunc (unoptab, mode)));
3213 /* Pass 1 for NO_QUEUE so we don't lose any increments
3214 if the libcall is cse'd or moved. */
3215 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, outmode,
3217 insns = get_insns ();
3220 target = gen_reg_rtx (outmode);
3221 eq_value = gen_rtx_fmt_e (unoptab->code, mode, op0);
3222 if (GET_MODE_SIZE (outmode) < GET_MODE_SIZE (mode))
3223 eq_value = simplify_gen_unary (TRUNCATE, outmode, eq_value, mode);
3224 else if (GET_MODE_SIZE (outmode) > GET_MODE_SIZE (mode))
3225 eq_value = simplify_gen_unary (ZERO_EXTEND, outmode, eq_value, mode);
3226 emit_libcall_block (insns, target, value, eq_value);
3231 /* It can't be done in this mode. Can we do it in a wider mode? */
3233 if (CLASS_HAS_WIDER_MODES_P (mclass))
3235 for (wider_mode = GET_MODE_WIDER_MODE (mode);
3236 wider_mode != VOIDmode;
3237 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
3239 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing
3240 || optab_libfunc (unoptab, wider_mode))
3243 rtx last = get_last_insn ();
3245 /* For certain operations, we need not actually extend
3246 the narrow operand, as long as we will truncate the
3247 results to the same narrowness. */
3249 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
3250 (unoptab == neg_optab
3251 || unoptab == one_cmpl_optab)
3252 && mclass == MODE_INT);
3254 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
3257 /* If we are generating clz using wider mode, adjust the
3258 result. Similarly for clrsb. */
3259 if ((unoptab == clz_optab || unoptab == clrsb_optab)
3261 temp = expand_binop (wider_mode, sub_optab, temp,
3262 GEN_INT (GET_MODE_PRECISION (wider_mode)
3263 - GET_MODE_PRECISION (mode)),
3264 target, true, OPTAB_DIRECT);
3268 if (mclass != MODE_INT)
3271 target = gen_reg_rtx (mode);
3272 convert_move (target, temp, 0);
3276 return gen_lowpart (mode, temp);
3279 delete_insns_since (last);
3284 /* One final attempt at implementing negation via subtraction,
3285 this time allowing widening of the operand. */
3286 if (unoptab->code == NEG && !HONOR_SIGNED_ZEROS (mode))
3289 temp = expand_binop (mode,
3290 unoptab == negv_optab ? subv_optab : sub_optab,
3291 CONST0_RTX (mode), op0,
3292 target, unsignedp, OPTAB_LIB_WIDEN);
3300 /* Emit code to compute the absolute value of OP0, with result to
3301 TARGET if convenient. (TARGET may be 0.) The return value says
3302 where the result actually is to be found.
3304 MODE is the mode of the operand; the mode of the result is
3305 different but can be deduced from MODE.
3310 expand_abs_nojump (enum machine_mode mode, rtx op0, rtx target,
3311 int result_unsignedp)
3316 result_unsignedp = 1;
3318 /* First try to do it with a special abs instruction. */
3319 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
3324 /* For floating point modes, try clearing the sign bit. */
3325 if (SCALAR_FLOAT_MODE_P (mode))
3327 temp = expand_absneg_bit (ABS, mode, op0, target);
3332 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3333 if (optab_handler (smax_optab, mode) != CODE_FOR_nothing
3334 && !HONOR_SIGNED_ZEROS (mode))
3336 rtx last = get_last_insn ();
3338 temp = expand_unop (mode, neg_optab, op0, NULL_RTX, 0);
3340 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3346 delete_insns_since (last);
3349 /* If this machine has expensive jumps, we can do integer absolute
3350 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3351 where W is the width of MODE. */
3353 if (GET_MODE_CLASS (mode) == MODE_INT
3354 && BRANCH_COST (optimize_insn_for_speed_p (),
3357 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
3358 GET_MODE_PRECISION (mode) - 1,
3361 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
3364 temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab,
3365 temp, extended, target, 0, OPTAB_LIB_WIDEN);
3375 expand_abs (enum machine_mode mode, rtx op0, rtx target,
3376 int result_unsignedp, int safe)
3381 result_unsignedp = 1;
3383 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
3387 /* If that does not win, use conditional jump and negate. */
3389 /* It is safe to use the target if it is the same
3390 as the source if this is also a pseudo register */
3391 if (op0 == target && REG_P (op0)
3392 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
3395 op1 = gen_label_rtx ();
3396 if (target == 0 || ! safe
3397 || GET_MODE (target) != mode
3398 || (MEM_P (target) && MEM_VOLATILE_P (target))
3400 && REGNO (target) < FIRST_PSEUDO_REGISTER))
3401 target = gen_reg_rtx (mode);
3403 emit_move_insn (target, op0);
3406 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
3407 NULL_RTX, NULL_RTX, op1, -1);
3409 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3412 emit_move_insn (target, op0);
3418 /* Emit code to compute the one's complement absolute value of OP0
3419 (if (OP0 < 0) OP0 = ~OP0), with result to TARGET if convenient.
3420 (TARGET may be NULL_RTX.) The return value says where the result
3421 actually is to be found.
3423 MODE is the mode of the operand; the mode of the result is
3424 different but can be deduced from MODE. */
3427 expand_one_cmpl_abs_nojump (enum machine_mode mode, rtx op0, rtx target)
3431 /* Not applicable for floating point modes. */
3432 if (FLOAT_MODE_P (mode))
3435 /* If we have a MAX insn, we can do this as MAX (x, ~x). */
3436 if (optab_handler (smax_optab, mode) != CODE_FOR_nothing)
3438 rtx last = get_last_insn ();
3440 temp = expand_unop (mode, one_cmpl_optab, op0, NULL_RTX, 0);
3442 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3448 delete_insns_since (last);
3451 /* If this machine has expensive jumps, we can do one's complement
3452 absolute value of X as (((signed) x >> (W-1)) ^ x). */
3454 if (GET_MODE_CLASS (mode) == MODE_INT
3455 && BRANCH_COST (optimize_insn_for_speed_p (),
3458 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
3459 GET_MODE_PRECISION (mode) - 1,
3462 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
3472 /* A subroutine of expand_copysign, perform the copysign operation using the
3473 abs and neg primitives advertised to exist on the target. The assumption
3474 is that we have a split register file, and leaving op0 in fp registers,
3475 and not playing with subregs so much, will help the register allocator. */
3478 expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3479 int bitpos, bool op0_is_abs)
3481 enum machine_mode imode;
3482 enum insn_code icode;
3488 /* Check if the back end provides an insn that handles signbit for the
3490 icode = optab_handler (signbit_optab, mode);
3491 if (icode != CODE_FOR_nothing)
3493 imode = insn_data[(int) icode].operand[0].mode;
3494 sign = gen_reg_rtx (imode);
3495 emit_unop_insn (icode, sign, op1, UNKNOWN);
3501 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3503 imode = int_mode_for_mode (mode);
3504 if (imode == BLKmode)
3506 op1 = gen_lowpart (imode, op1);
3513 if (FLOAT_WORDS_BIG_ENDIAN)
3514 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3516 word = bitpos / BITS_PER_WORD;
3517 bitpos = bitpos % BITS_PER_WORD;
3518 op1 = operand_subword_force (op1, word, mode);
3521 mask = double_int_setbit (double_int_zero, bitpos);
3523 sign = expand_binop (imode, and_optab, op1,
3524 immed_double_int_const (mask, imode),
3525 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3530 op0 = expand_unop (mode, abs_optab, op0, target, 0);
3537 if (target == NULL_RTX)
3538 target = copy_to_reg (op0);
3540 emit_move_insn (target, op0);
3543 label = gen_label_rtx ();
3544 emit_cmp_and_jump_insns (sign, const0_rtx, EQ, NULL_RTX, imode, 1, label);
3546 if (GET_CODE (op0) == CONST_DOUBLE)
3547 op0 = simplify_unary_operation (NEG, mode, op0, mode);
3549 op0 = expand_unop (mode, neg_optab, op0, target, 0);
3551 emit_move_insn (target, op0);
3559 /* A subroutine of expand_copysign, perform the entire copysign operation
3560 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3561 is true if op0 is known to have its sign bit clear. */
3564 expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3565 int bitpos, bool op0_is_abs)
3567 enum machine_mode imode;
3569 int word, nwords, i;
3572 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3574 imode = int_mode_for_mode (mode);
3575 if (imode == BLKmode)
3584 if (FLOAT_WORDS_BIG_ENDIAN)
3585 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3587 word = bitpos / BITS_PER_WORD;
3588 bitpos = bitpos % BITS_PER_WORD;
3589 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
3592 mask = double_int_setbit (double_int_zero, bitpos);
3597 || (nwords > 1 && !valid_multiword_target_p (target)))
3598 target = gen_reg_rtx (mode);
3604 for (i = 0; i < nwords; ++i)
3606 rtx targ_piece = operand_subword (target, i, 1, mode);
3607 rtx op0_piece = operand_subword_force (op0, i, mode);
3613 = expand_binop (imode, and_optab, op0_piece,
3614 immed_double_int_const (double_int_not (mask),
3616 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3618 op1 = expand_binop (imode, and_optab,
3619 operand_subword_force (op1, i, mode),
3620 immed_double_int_const (mask, imode),
3621 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3623 temp = expand_binop (imode, ior_optab, op0_piece, op1,
3624 targ_piece, 1, OPTAB_LIB_WIDEN);
3625 if (temp != targ_piece)
3626 emit_move_insn (targ_piece, temp);
3629 emit_move_insn (targ_piece, op0_piece);
3632 insns = get_insns ();
3639 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
3640 immed_double_int_const (mask, imode),
3641 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3643 op0 = gen_lowpart (imode, op0);
3645 op0 = expand_binop (imode, and_optab, op0,
3646 immed_double_int_const (double_int_not (mask),
3648 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3650 temp = expand_binop (imode, ior_optab, op0, op1,
3651 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3652 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3658 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3659 scalar floating point mode. Return NULL if we do not know how to
3660 expand the operation inline. */
3663 expand_copysign (rtx op0, rtx op1, rtx target)
3665 enum machine_mode mode = GET_MODE (op0);
3666 const struct real_format *fmt;
3670 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3671 gcc_assert (GET_MODE (op1) == mode);
3673 /* First try to do it with a special instruction. */
3674 temp = expand_binop (mode, copysign_optab, op0, op1,
3675 target, 0, OPTAB_DIRECT);
3679 fmt = REAL_MODE_FORMAT (mode);
3680 if (fmt == NULL || !fmt->has_signed_zero)
3684 if (GET_CODE (op0) == CONST_DOUBLE)
3686 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
3687 op0 = simplify_unary_operation (ABS, mode, op0, mode);
3691 if (fmt->signbit_ro >= 0
3692 && (GET_CODE (op0) == CONST_DOUBLE
3693 || (optab_handler (neg_optab, mode) != CODE_FOR_nothing
3694 && optab_handler (abs_optab, mode) != CODE_FOR_nothing)))
3696 temp = expand_copysign_absneg (mode, op0, op1, target,
3697 fmt->signbit_ro, op0_is_abs);
3702 if (fmt->signbit_rw < 0)
3704 return expand_copysign_bit (mode, op0, op1, target,
3705 fmt->signbit_rw, op0_is_abs);
3708 /* Generate an instruction whose insn-code is INSN_CODE,
3709 with two operands: an output TARGET and an input OP0.
3710 TARGET *must* be nonzero, and the output is always stored there.
3711 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3712 the value that is stored into TARGET.
3714 Return false if expansion failed. */
3717 maybe_emit_unop_insn (enum insn_code icode, rtx target, rtx op0,
3720 struct expand_operand ops[2];
3723 create_output_operand (&ops[0], target, GET_MODE (target));
3724 create_input_operand (&ops[1], op0, GET_MODE (op0));
3725 pat = maybe_gen_insn (icode, 2, ops);
3729 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && code != UNKNOWN)
3730 add_equal_note (pat, ops[0].value, code, ops[1].value, NULL_RTX);
3734 if (ops[0].value != target)
3735 emit_move_insn (target, ops[0].value);
3738 /* Generate an instruction whose insn-code is INSN_CODE,
3739 with two operands: an output TARGET and an input OP0.
3740 TARGET *must* be nonzero, and the output is always stored there.
3741 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3742 the value that is stored into TARGET. */
3745 emit_unop_insn (enum insn_code icode, rtx target, rtx op0, enum rtx_code code)
3747 bool ok = maybe_emit_unop_insn (icode, target, op0, code);
3751 struct no_conflict_data
3753 rtx target, first, insn;
3757 /* Called via note_stores by emit_libcall_block. Set P->must_stay if
3758 the currently examined clobber / store has to stay in the list of
3759 insns that constitute the actual libcall block. */
3761 no_conflict_move_test (rtx dest, const_rtx set, void *p0)
3763 struct no_conflict_data *p= (struct no_conflict_data *) p0;
3765 /* If this inns directly contributes to setting the target, it must stay. */
3766 if (reg_overlap_mentioned_p (p->target, dest))
3767 p->must_stay = true;
3768 /* If we haven't committed to keeping any other insns in the list yet,
3769 there is nothing more to check. */
3770 else if (p->insn == p->first)
3772 /* If this insn sets / clobbers a register that feeds one of the insns
3773 already in the list, this insn has to stay too. */
3774 else if (reg_overlap_mentioned_p (dest, PATTERN (p->first))
3775 || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest)))
3776 || reg_used_between_p (dest, p->first, p->insn)
3777 /* Likewise if this insn depends on a register set by a previous
3778 insn in the list, or if it sets a result (presumably a hard
3779 register) that is set or clobbered by a previous insn.
3780 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3781 SET_DEST perform the former check on the address, and the latter
3782 check on the MEM. */
3783 || (GET_CODE (set) == SET
3784 && (modified_in_p (SET_SRC (set), p->first)
3785 || modified_in_p (SET_DEST (set), p->first)
3786 || modified_between_p (SET_SRC (set), p->first, p->insn)
3787 || modified_between_p (SET_DEST (set), p->first, p->insn))))
3788 p->must_stay = true;
3792 /* Emit code to make a call to a constant function or a library call.
3794 INSNS is a list containing all insns emitted in the call.
3795 These insns leave the result in RESULT. Our block is to copy RESULT
3796 to TARGET, which is logically equivalent to EQUIV.
3798 We first emit any insns that set a pseudo on the assumption that these are
3799 loading constants into registers; doing so allows them to be safely cse'ed
3800 between blocks. Then we emit all the other insns in the block, followed by
3801 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3802 note with an operand of EQUIV. */
3805 emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv)
3807 rtx final_dest = target;
3808 rtx next, last, insn;
3810 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3811 into a MEM later. Protect the libcall block from this change. */
3812 if (! REG_P (target) || REG_USERVAR_P (target))
3813 target = gen_reg_rtx (GET_MODE (target));
3815 /* If we're using non-call exceptions, a libcall corresponding to an
3816 operation that may trap may also trap. */
3817 /* ??? See the comment in front of make_reg_eh_region_note. */
3818 if (cfun->can_throw_non_call_exceptions && may_trap_p (equiv))
3820 for (insn = insns; insn; insn = NEXT_INSN (insn))
3823 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3826 int lp_nr = INTVAL (XEXP (note, 0));
3827 if (lp_nr == 0 || lp_nr == INT_MIN)
3828 remove_note (insn, note);
3834 /* Look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3835 reg note to indicate that this call cannot throw or execute a nonlocal
3836 goto (unless there is already a REG_EH_REGION note, in which case
3838 for (insn = insns; insn; insn = NEXT_INSN (insn))
3840 make_reg_eh_region_note_nothrow_nononlocal (insn);
3843 /* First emit all insns that set pseudos. Remove them from the list as
3844 we go. Avoid insns that set pseudos which were referenced in previous
3845 insns. These can be generated by move_by_pieces, for example,
3846 to update an address. Similarly, avoid insns that reference things
3847 set in previous insns. */
3849 for (insn = insns; insn; insn = next)
3851 rtx set = single_set (insn);
3853 next = NEXT_INSN (insn);
3855 if (set != 0 && REG_P (SET_DEST (set))
3856 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3858 struct no_conflict_data data;
3860 data.target = const0_rtx;
3864 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3865 if (! data.must_stay)
3867 if (PREV_INSN (insn))
3868 NEXT_INSN (PREV_INSN (insn)) = next;
3873 PREV_INSN (next) = PREV_INSN (insn);
3879 /* Some ports use a loop to copy large arguments onto the stack.
3880 Don't move anything outside such a loop. */
3885 /* Write the remaining insns followed by the final copy. */
3886 for (insn = insns; insn; insn = next)
3888 next = NEXT_INSN (insn);
3893 last = emit_move_insn (target, result);
3894 set_dst_reg_note (last, REG_EQUAL, copy_rtx (equiv), target);
3896 if (final_dest != target)
3897 emit_move_insn (final_dest, target);
3900 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3901 PURPOSE describes how this comparison will be used. CODE is the rtx
3902 comparison code we will be using.
3904 ??? Actually, CODE is slightly weaker than that. A target is still
3905 required to implement all of the normal bcc operations, but not
3906 required to implement all (or any) of the unordered bcc operations. */
3909 can_compare_p (enum rtx_code code, enum machine_mode mode,
3910 enum can_compare_purpose purpose)
3913 test = gen_rtx_fmt_ee (code, mode, const0_rtx, const0_rtx);
3916 enum insn_code icode;
3918 if (purpose == ccp_jump
3919 && (icode = optab_handler (cbranch_optab, mode)) != CODE_FOR_nothing
3920 && insn_operand_matches (icode, 0, test))
3922 if (purpose == ccp_store_flag
3923 && (icode = optab_handler (cstore_optab, mode)) != CODE_FOR_nothing
3924 && insn_operand_matches (icode, 1, test))
3926 if (purpose == ccp_cmov
3927 && optab_handler (cmov_optab, mode) != CODE_FOR_nothing)
3930 mode = GET_MODE_WIDER_MODE (mode);
3931 PUT_MODE (test, mode);
3933 while (mode != VOIDmode);
3938 /* This function is called when we are going to emit a compare instruction that
3939 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3941 *PMODE is the mode of the inputs (in case they are const_int).
3942 *PUNSIGNEDP nonzero says that the operands are unsigned;
3943 this matters if they need to be widened (as given by METHODS).
3945 If they have mode BLKmode, then SIZE specifies the size of both operands.
3947 This function performs all the setup necessary so that the caller only has
3948 to emit a single comparison insn. This setup can involve doing a BLKmode
3949 comparison or emitting a library call to perform the comparison if no insn
3950 is available to handle it.
3951 The values which are passed in through pointers can be modified; the caller
3952 should perform the comparison on the modified values. Constant
3953 comparisons must have already been folded. */
3956 prepare_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
3957 int unsignedp, enum optab_methods methods,
3958 rtx *ptest, enum machine_mode *pmode)
3960 enum machine_mode mode = *pmode;
3962 enum machine_mode cmp_mode;
3963 enum mode_class mclass;
3965 /* The other methods are not needed. */
3966 gcc_assert (methods == OPTAB_DIRECT || methods == OPTAB_WIDEN
3967 || methods == OPTAB_LIB_WIDEN);
3969 /* If we are optimizing, force expensive constants into a register. */
3970 if (CONSTANT_P (x) && optimize
3971 && (rtx_cost (x, COMPARE, 0, optimize_insn_for_speed_p ())
3972 > COSTS_N_INSNS (1)))
3973 x = force_reg (mode, x);
3975 if (CONSTANT_P (y) && optimize
3976 && (rtx_cost (y, COMPARE, 1, optimize_insn_for_speed_p ())
3977 > COSTS_N_INSNS (1)))
3978 y = force_reg (mode, y);
3981 /* Make sure if we have a canonical comparison. The RTL
3982 documentation states that canonical comparisons are required only
3983 for targets which have cc0. */
3984 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
3987 /* Don't let both operands fail to indicate the mode. */
3988 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
3989 x = force_reg (mode, x);
3990 if (mode == VOIDmode)
3991 mode = GET_MODE (x) != VOIDmode ? GET_MODE (x) : GET_MODE (y);
3993 /* Handle all BLKmode compares. */
3995 if (mode == BLKmode)
3997 enum machine_mode result_mode;
3998 enum insn_code cmp_code;
4003 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
4007 /* Try to use a memory block compare insn - either cmpstr
4008 or cmpmem will do. */
4009 for (cmp_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
4010 cmp_mode != VOIDmode;
4011 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode))
4013 cmp_code = direct_optab_handler (cmpmem_optab, cmp_mode);
4014 if (cmp_code == CODE_FOR_nothing)
4015 cmp_code = direct_optab_handler (cmpstr_optab, cmp_mode);
4016 if (cmp_code == CODE_FOR_nothing)
4017 cmp_code = direct_optab_handler (cmpstrn_optab, cmp_mode);
4018 if (cmp_code == CODE_FOR_nothing)
4021 /* Must make sure the size fits the insn's mode. */
4022 if ((CONST_INT_P (size)
4023 && INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode)))
4024 || (GET_MODE_BITSIZE (GET_MODE (size))
4025 > GET_MODE_BITSIZE (cmp_mode)))
4028 result_mode = insn_data[cmp_code].operand[0].mode;
4029 result = gen_reg_rtx (result_mode);
4030 size = convert_to_mode (cmp_mode, size, 1);
4031 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
4033 *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, result, const0_rtx);
4034 *pmode = result_mode;
4038 if (methods != OPTAB_LIB && methods != OPTAB_LIB_WIDEN)
4041 /* Otherwise call a library function, memcmp. */
4042 libfunc = memcmp_libfunc;
4043 length_type = sizetype;
4044 result_mode = TYPE_MODE (integer_type_node);
4045 cmp_mode = TYPE_MODE (length_type);
4046 size = convert_to_mode (TYPE_MODE (length_type), size,
4047 TYPE_UNSIGNED (length_type));
4049 result = emit_library_call_value (libfunc, 0, LCT_PURE,
4055 *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, result, const0_rtx);
4056 *pmode = result_mode;
4060 /* Don't allow operands to the compare to trap, as that can put the
4061 compare and branch in different basic blocks. */
4062 if (cfun->can_throw_non_call_exceptions)
4065 x = force_reg (mode, x);
4067 y = force_reg (mode, y);
4070 if (GET_MODE_CLASS (mode) == MODE_CC)
4072 gcc_assert (can_compare_p (comparison, CCmode, ccp_jump));
4073 *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, x, y);
4077 mclass = GET_MODE_CLASS (mode);
4078 test = gen_rtx_fmt_ee (comparison, VOIDmode, x, y);
4082 enum insn_code icode;
4083 icode = optab_handler (cbranch_optab, cmp_mode);
4084 if (icode != CODE_FOR_nothing
4085 && insn_operand_matches (icode, 0, test))
4087 rtx last = get_last_insn ();
4088 rtx op0 = prepare_operand (icode, x, 1, mode, cmp_mode, unsignedp);
4089 rtx op1 = prepare_operand (icode, y, 2, mode, cmp_mode, unsignedp);
4091 && insn_operand_matches (icode, 1, op0)
4092 && insn_operand_matches (icode, 2, op1))
4094 XEXP (test, 0) = op0;
4095 XEXP (test, 1) = op1;
4100 delete_insns_since (last);
4103 if (methods == OPTAB_DIRECT || !CLASS_HAS_WIDER_MODES_P (mclass))
4105 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode);
4107 while (cmp_mode != VOIDmode);
4109 if (methods != OPTAB_LIB_WIDEN)
4112 if (!SCALAR_FLOAT_MODE_P (mode))
4116 /* Handle a libcall just for the mode we are using. */
4117 libfunc = optab_libfunc (cmp_optab, mode);
4118 gcc_assert (libfunc);
4120 /* If we want unsigned, and this mode has a distinct unsigned
4121 comparison routine, use that. */
4124 rtx ulibfunc = optab_libfunc (ucmp_optab, mode);
4129 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4130 targetm.libgcc_cmp_return_mode (),
4131 2, x, mode, y, mode);
4133 /* There are two kinds of comparison routines. Biased routines
4134 return 0/1/2, and unbiased routines return -1/0/1. Other parts
4135 of gcc expect that the comparison operation is equivalent
4136 to the modified comparison. For signed comparisons compare the
4137 result against 1 in the biased case, and zero in the unbiased
4138 case. For unsigned comparisons always compare against 1 after
4139 biasing the unbiased result by adding 1. This gives us a way to
4141 The comparisons in the fixed-point helper library are always
4146 if (!TARGET_LIB_INT_CMP_BIASED && !ALL_FIXED_POINT_MODE_P (mode))
4149 x = plus_constant (result, 1);
4155 prepare_cmp_insn (x, y, comparison, NULL_RTX, unsignedp, methods,
4159 prepare_float_lib_cmp (x, y, comparison, ptest, pmode);
4167 /* Before emitting an insn with code ICODE, make sure that X, which is going
4168 to be used for operand OPNUM of the insn, is converted from mode MODE to
4169 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
4170 that it is accepted by the operand predicate. Return the new value. */
4173 prepare_operand (enum insn_code icode, rtx x, int opnum, enum machine_mode mode,
4174 enum machine_mode wider_mode, int unsignedp)
4176 if (mode != wider_mode)
4177 x = convert_modes (wider_mode, mode, x, unsignedp);
4179 if (!insn_operand_matches (icode, opnum, x))
4181 if (reload_completed)
4183 x = copy_to_mode_reg (insn_data[(int) icode].operand[opnum].mode, x);
4189 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
4190 we can do the branch. */
4193 emit_cmp_and_jump_insn_1 (rtx test, enum machine_mode mode, rtx label)
4195 enum machine_mode optab_mode;
4196 enum mode_class mclass;
4197 enum insn_code icode;
4199 mclass = GET_MODE_CLASS (mode);
4200 optab_mode = (mclass == MODE_CC) ? CCmode : mode;
4201 icode = optab_handler (cbranch_optab, optab_mode);
4203 gcc_assert (icode != CODE_FOR_nothing);
4204 gcc_assert (insn_operand_matches (icode, 0, test));
4205 emit_jump_insn (GEN_FCN (icode) (test, XEXP (test, 0), XEXP (test, 1), label));
4208 /* Generate code to compare X with Y so that the condition codes are
4209 set and to jump to LABEL if the condition is true. If X is a
4210 constant and Y is not a constant, then the comparison is swapped to
4211 ensure that the comparison RTL has the canonical form.
4213 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
4214 need to be widened. UNSIGNEDP is also used to select the proper
4215 branch condition code.
4217 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4219 MODE is the mode of the inputs (in case they are const_int).
4221 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
4222 It will be potentially converted into an unsigned variant based on
4223 UNSIGNEDP to select a proper jump instruction. */
4226 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
4227 enum machine_mode mode, int unsignedp, rtx label)
4229 rtx op0 = x, op1 = y;
4232 /* Swap operands and condition to ensure canonical RTL. */
4233 if (swap_commutative_operands_p (x, y)
4234 && can_compare_p (swap_condition (comparison), mode, ccp_jump))
4237 comparison = swap_condition (comparison);
4240 /* If OP0 is still a constant, then both X and Y must be constants
4241 or the opposite comparison is not supported. Force X into a register
4242 to create canonical RTL. */
4243 if (CONSTANT_P (op0))
4244 op0 = force_reg (mode, op0);
4247 comparison = unsigned_condition (comparison);
4249 prepare_cmp_insn (op0, op1, comparison, size, unsignedp, OPTAB_LIB_WIDEN,
4251 emit_cmp_and_jump_insn_1 (test, mode, label);
4255 /* Emit a library call comparison between floating point X and Y.
4256 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4259 prepare_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison,
4260 rtx *ptest, enum machine_mode *pmode)
4262 enum rtx_code swapped = swap_condition (comparison);
4263 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
4264 enum machine_mode orig_mode = GET_MODE (x);
4265 enum machine_mode mode, cmp_mode;
4266 rtx true_rtx, false_rtx;
4267 rtx value, target, insns, equiv;
4269 bool reversed_p = false;
4270 cmp_mode = targetm.libgcc_cmp_return_mode ();
4272 for (mode = orig_mode;
4274 mode = GET_MODE_WIDER_MODE (mode))
4276 if (code_to_optab[comparison]
4277 && (libfunc = optab_libfunc (code_to_optab[comparison], mode)))
4280 if (code_to_optab[swapped]
4281 && (libfunc = optab_libfunc (code_to_optab[swapped], mode)))
4284 tmp = x; x = y; y = tmp;
4285 comparison = swapped;
4289 if (code_to_optab[reversed]
4290 && (libfunc = optab_libfunc (code_to_optab[reversed], mode)))
4292 comparison = reversed;
4298 gcc_assert (mode != VOIDmode);
4300 if (mode != orig_mode)
4302 x = convert_to_mode (mode, x, 0);
4303 y = convert_to_mode (mode, y, 0);
4306 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4307 the RTL. The allows the RTL optimizers to delete the libcall if the
4308 condition can be determined at compile-time. */
4309 if (comparison == UNORDERED
4310 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4312 true_rtx = const_true_rtx;
4313 false_rtx = const0_rtx;
4320 true_rtx = const0_rtx;
4321 false_rtx = const_true_rtx;
4325 true_rtx = const_true_rtx;
4326 false_rtx = const0_rtx;
4330 true_rtx = const1_rtx;
4331 false_rtx = const0_rtx;
4335 true_rtx = const0_rtx;
4336 false_rtx = constm1_rtx;
4340 true_rtx = constm1_rtx;
4341 false_rtx = const0_rtx;
4345 true_rtx = const0_rtx;
4346 false_rtx = const1_rtx;
4354 if (comparison == UNORDERED)
4356 rtx temp = simplify_gen_relational (NE, cmp_mode, mode, x, x);
4357 equiv = simplify_gen_relational (NE, cmp_mode, mode, y, y);
4358 equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4359 temp, const_true_rtx, equiv);
4363 equiv = simplify_gen_relational (comparison, cmp_mode, mode, x, y);
4364 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4365 equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4366 equiv, true_rtx, false_rtx);
4370 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4371 cmp_mode, 2, x, mode, y, mode);
4372 insns = get_insns ();
4375 target = gen_reg_rtx (cmp_mode);
4376 emit_libcall_block (insns, target, value, equiv);
4378 if (comparison == UNORDERED
4379 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison)
4381 *ptest = gen_rtx_fmt_ee (reversed_p ? EQ : NE, VOIDmode, target, false_rtx);
4383 *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, target, const0_rtx);
4388 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4391 emit_indirect_jump (rtx loc)
4393 struct expand_operand ops[1];
4395 create_address_operand (&ops[0], loc);
4396 expand_jump_insn (CODE_FOR_indirect_jump, 1, ops);
4400 #ifdef HAVE_conditional_move
4402 /* Emit a conditional move instruction if the machine supports one for that
4403 condition and machine mode.
4405 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4406 the mode to use should they be constants. If it is VOIDmode, they cannot
4409 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4410 should be stored there. MODE is the mode to use should they be constants.
4411 If it is VOIDmode, they cannot both be constants.
4413 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4414 is not supported. */
4417 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
4418 enum machine_mode cmode, rtx op2, rtx op3,
4419 enum machine_mode mode, int unsignedp)
4421 rtx tem, comparison, last;
4422 enum insn_code icode;
4423 enum rtx_code reversed;
4425 /* If one operand is constant, make it the second one. Only do this
4426 if the other operand is not constant as well. */
4428 if (swap_commutative_operands_p (op0, op1))
4433 code = swap_condition (code);
4436 /* get_condition will prefer to generate LT and GT even if the old
4437 comparison was against zero, so undo that canonicalization here since
4438 comparisons against zero are cheaper. */
4439 if (code == LT && op1 == const1_rtx)
4440 code = LE, op1 = const0_rtx;
4441 else if (code == GT && op1 == constm1_rtx)
4442 code = GE, op1 = const0_rtx;
4444 if (cmode == VOIDmode)
4445 cmode = GET_MODE (op0);
4447 if (swap_commutative_operands_p (op2, op3)
4448 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4457 if (mode == VOIDmode)
4458 mode = GET_MODE (op2);
4460 icode = direct_optab_handler (movcc_optab, mode);
4462 if (icode == CODE_FOR_nothing)
4466 target = gen_reg_rtx (mode);
4468 code = unsignedp ? unsigned_condition (code) : code;
4469 comparison = simplify_gen_relational (code, VOIDmode, cmode, op0, op1);
4471 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4472 return NULL and let the caller figure out how best to deal with this
4474 if (!COMPARISON_P (comparison))
4477 do_pending_stack_adjust ();
4478 last = get_last_insn ();
4479 prepare_cmp_insn (XEXP (comparison, 0), XEXP (comparison, 1),
4480 GET_CODE (comparison), NULL_RTX, unsignedp, OPTAB_WIDEN,
4481 &comparison, &cmode);
4484 struct expand_operand ops[4];
4486 create_output_operand (&ops[0], target, mode);
4487 create_fixed_operand (&ops[1], comparison);
4488 create_input_operand (&ops[2], op2, mode);
4489 create_input_operand (&ops[3], op3, mode);
4490 if (maybe_expand_insn (icode, 4, ops))
4492 if (ops[0].value != target)
4493 convert_move (target, ops[0].value, false);
4497 delete_insns_since (last);
4501 /* Return nonzero if a conditional move of mode MODE is supported.
4503 This function is for combine so it can tell whether an insn that looks
4504 like a conditional move is actually supported by the hardware. If we
4505 guess wrong we lose a bit on optimization, but that's it. */
4506 /* ??? sparc64 supports conditionally moving integers values based on fp
4507 comparisons, and vice versa. How do we handle them? */
4510 can_conditionally_move_p (enum machine_mode mode)
4512 if (direct_optab_handler (movcc_optab, mode) != CODE_FOR_nothing)
4518 #endif /* HAVE_conditional_move */
4520 /* Emit a conditional addition instruction if the machine supports one for that
4521 condition and machine mode.
4523 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4524 the mode to use should they be constants. If it is VOIDmode, they cannot
4527 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
4528 should be stored there. MODE is the mode to use should they be constants.
4529 If it is VOIDmode, they cannot both be constants.
4531 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4532 is not supported. */
4535 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
4536 enum machine_mode cmode, rtx op2, rtx op3,
4537 enum machine_mode mode, int unsignedp)
4539 rtx tem, comparison, last;
4540 enum insn_code icode;
4541 enum rtx_code reversed;
4543 /* If one operand is constant, make it the second one. Only do this
4544 if the other operand is not constant as well. */
4546 if (swap_commutative_operands_p (op0, op1))
4551 code = swap_condition (code);
4554 /* get_condition will prefer to generate LT and GT even if the old
4555 comparison was against zero, so undo that canonicalization here since
4556 comparisons against zero are cheaper. */
4557 if (code == LT && op1 == const1_rtx)
4558 code = LE, op1 = const0_rtx;
4559 else if (code == GT && op1 == constm1_rtx)
4560 code = GE, op1 = const0_rtx;
4562 if (cmode == VOIDmode)
4563 cmode = GET_MODE (op0);
4565 if (swap_commutative_operands_p (op2, op3)
4566 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4575 if (mode == VOIDmode)
4576 mode = GET_MODE (op2);
4578 icode = optab_handler (addcc_optab, mode);
4580 if (icode == CODE_FOR_nothing)
4584 target = gen_reg_rtx (mode);
4586 code = unsignedp ? unsigned_condition (code) : code;
4587 comparison = simplify_gen_relational (code, VOIDmode, cmode, op0, op1);
4589 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4590 return NULL and let the caller figure out how best to deal with this
4592 if (!COMPARISON_P (comparison))
4595 do_pending_stack_adjust ();
4596 last = get_last_insn ();
4597 prepare_cmp_insn (XEXP (comparison, 0), XEXP (comparison, 1),
4598 GET_CODE (comparison), NULL_RTX, unsignedp, OPTAB_WIDEN,
4599 &comparison, &cmode);
4602 struct expand_operand ops[4];
4604 create_output_operand (&ops[0], target, mode);
4605 create_fixed_operand (&ops[1], comparison);
4606 create_input_operand (&ops[2], op2, mode);
4607 create_input_operand (&ops[3], op3, mode);
4608 if (maybe_expand_insn (icode, 4, ops))
4610 if (ops[0].value != target)
4611 convert_move (target, ops[0].value, false);
4615 delete_insns_since (last);
4619 /* These functions attempt to generate an insn body, rather than
4620 emitting the insn, but if the gen function already emits them, we
4621 make no attempt to turn them back into naked patterns. */
4623 /* Generate and return an insn body to add Y to X. */
4626 gen_add2_insn (rtx x, rtx y)
4628 enum insn_code icode = optab_handler (add_optab, GET_MODE (x));
4630 gcc_assert (insn_operand_matches (icode, 0, x));
4631 gcc_assert (insn_operand_matches (icode, 1, x));
4632 gcc_assert (insn_operand_matches (icode, 2, y));
4634 return GEN_FCN (icode) (x, x, y);
4637 /* Generate and return an insn body to add r1 and c,
4638 storing the result in r0. */
4641 gen_add3_insn (rtx r0, rtx r1, rtx c)
4643 enum insn_code icode = optab_handler (add_optab, GET_MODE (r0));
4645 if (icode == CODE_FOR_nothing
4646 || !insn_operand_matches (icode, 0, r0)
4647 || !insn_operand_matches (icode, 1, r1)
4648 || !insn_operand_matches (icode, 2, c))
4651 return GEN_FCN (icode) (r0, r1, c);
4655 have_add2_insn (rtx x, rtx y)
4657 enum insn_code icode;
4659 gcc_assert (GET_MODE (x) != VOIDmode);
4661 icode = optab_handler (add_optab, GET_MODE (x));
4663 if (icode == CODE_FOR_nothing)
4666 if (!insn_operand_matches (icode, 0, x)
4667 || !insn_operand_matches (icode, 1, x)
4668 || !insn_operand_matches (icode, 2, y))
4674 /* Generate and return an insn body to subtract Y from X. */
4677 gen_sub2_insn (rtx x, rtx y)
4679 enum insn_code icode = optab_handler (sub_optab, GET_MODE (x));
4681 gcc_assert (insn_operand_matches (icode, 0, x));
4682 gcc_assert (insn_operand_matches (icode, 1, x));
4683 gcc_assert (insn_operand_matches (icode, 2, y));
4685 return GEN_FCN (icode) (x, x, y);
4688 /* Generate and return an insn body to subtract r1 and c,
4689 storing the result in r0. */
4692 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4694 enum insn_code icode = optab_handler (sub_optab, GET_MODE (r0));
4696 if (icode == CODE_FOR_nothing
4697 || !insn_operand_matches (icode, 0, r0)
4698 || !insn_operand_matches (icode, 1, r1)
4699 || !insn_operand_matches (icode, 2, c))
4702 return GEN_FCN (icode) (r0, r1, c);
4706 have_sub2_insn (rtx x, rtx y)
4708 enum insn_code icode;
4710 gcc_assert (GET_MODE (x) != VOIDmode);
4712 icode = optab_handler (sub_optab, GET_MODE (x));
4714 if (icode == CODE_FOR_nothing)
4717 if (!insn_operand_matches (icode, 0, x)
4718 || !insn_operand_matches (icode, 1, x)
4719 || !insn_operand_matches (icode, 2, y))
4725 /* Generate the body of an instruction to copy Y into X.
4726 It may be a list of insns, if one insn isn't enough. */
4729 gen_move_insn (rtx x, rtx y)
4734 emit_move_insn_1 (x, y);
4740 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4741 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4742 no such operation exists, CODE_FOR_nothing will be returned. */
4745 can_extend_p (enum machine_mode to_mode, enum machine_mode from_mode,
4749 #ifdef HAVE_ptr_extend
4751 return CODE_FOR_ptr_extend;
4754 tab = unsignedp ? zext_optab : sext_optab;
4755 return convert_optab_handler (tab, to_mode, from_mode);
4758 /* Generate the body of an insn to extend Y (with mode MFROM)
4759 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4762 gen_extend_insn (rtx x, rtx y, enum machine_mode mto,
4763 enum machine_mode mfrom, int unsignedp)
4765 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
4766 return GEN_FCN (icode) (x, y);
4769 /* can_fix_p and can_float_p say whether the target machine
4770 can directly convert a given fixed point type to
4771 a given floating point type, or vice versa.
4772 The returned value is the CODE_FOR_... value to use,
4773 or CODE_FOR_nothing if these modes cannot be directly converted.
4775 *TRUNCP_PTR is set to 1 if it is necessary to output
4776 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4778 static enum insn_code
4779 can_fix_p (enum machine_mode fixmode, enum machine_mode fltmode,
4780 int unsignedp, int *truncp_ptr)
4783 enum insn_code icode;
4785 tab = unsignedp ? ufixtrunc_optab : sfixtrunc_optab;
4786 icode = convert_optab_handler (tab, fixmode, fltmode);
4787 if (icode != CODE_FOR_nothing)
4793 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4794 for this to work. We need to rework the fix* and ftrunc* patterns
4795 and documentation. */
4796 tab = unsignedp ? ufix_optab : sfix_optab;
4797 icode = convert_optab_handler (tab, fixmode, fltmode);
4798 if (icode != CODE_FOR_nothing
4799 && optab_handler (ftrunc_optab, fltmode) != CODE_FOR_nothing)
4806 return CODE_FOR_nothing;
4810 can_float_p (enum machine_mode fltmode, enum machine_mode fixmode,
4815 tab = unsignedp ? ufloat_optab : sfloat_optab;
4816 return convert_optab_handler (tab, fltmode, fixmode);
4819 /* Function supportable_convert_operation
4821 Check whether an operation represented by the code CODE is a
4822 convert operation that is supported by the target platform in
4823 vector form (i.e., when operating on arguments of type VECTYPE_IN
4824 producing a result of type VECTYPE_OUT).
4826 Convert operations we currently support directly are FIX_TRUNC and FLOAT.
4827 This function checks if these operations are supported
4828 by the target platform either directly (via vector tree-codes), or via
4832 - CODE1 is code of vector operation to be used when
4833 vectorizing the operation, if available.
4834 - DECL is decl of target builtin functions to be used
4835 when vectorizing the operation, if available. In this case,
4836 CODE1 is CALL_EXPR. */
4839 supportable_convert_operation (enum tree_code code,
4840 tree vectype_out, tree vectype_in,
4841 tree *decl, enum tree_code *code1)
4843 enum machine_mode m1,m2;
4846 m1 = TYPE_MODE (vectype_out);
4847 m2 = TYPE_MODE (vectype_in);
4849 /* First check if we can done conversion directly. */
4850 if ((code == FIX_TRUNC_EXPR
4851 && can_fix_p (m1,m2,TYPE_UNSIGNED (vectype_out), &truncp)
4852 != CODE_FOR_nothing)
4853 || (code == FLOAT_EXPR
4854 && can_float_p (m1,m2,TYPE_UNSIGNED (vectype_in))
4855 != CODE_FOR_nothing))
4861 /* Now check for builtin. */
4862 if (targetm.vectorize.builtin_conversion
4863 && targetm.vectorize.builtin_conversion (code, vectype_out, vectype_in))
4866 *decl = targetm.vectorize.builtin_conversion (code, vectype_out, vectype_in);
4873 /* Generate code to convert FROM to floating point
4874 and store in TO. FROM must be fixed point and not VOIDmode.
4875 UNSIGNEDP nonzero means regard FROM as unsigned.
4876 Normally this is done by correcting the final value
4877 if it is negative. */
4880 expand_float (rtx to, rtx from, int unsignedp)
4882 enum insn_code icode;
4884 enum machine_mode fmode, imode;
4885 bool can_do_signed = false;
4887 /* Crash now, because we won't be able to decide which mode to use. */
4888 gcc_assert (GET_MODE (from) != VOIDmode);
4890 /* Look for an insn to do the conversion. Do it in the specified
4891 modes if possible; otherwise convert either input, output or both to
4892 wider mode. If the integer mode is wider than the mode of FROM,
4893 we can do the conversion signed even if the input is unsigned. */
4895 for (fmode = GET_MODE (to); fmode != VOIDmode;
4896 fmode = GET_MODE_WIDER_MODE (fmode))
4897 for (imode = GET_MODE (from); imode != VOIDmode;
4898 imode = GET_MODE_WIDER_MODE (imode))
4900 int doing_unsigned = unsignedp;
4902 if (fmode != GET_MODE (to)
4903 && significand_size (fmode) < GET_MODE_PRECISION (GET_MODE (from)))
4906 icode = can_float_p (fmode, imode, unsignedp);
4907 if (icode == CODE_FOR_nothing && unsignedp)
4909 enum insn_code scode = can_float_p (fmode, imode, 0);
4910 if (scode != CODE_FOR_nothing)
4911 can_do_signed = true;
4912 if (imode != GET_MODE (from))
4913 icode = scode, doing_unsigned = 0;
4916 if (icode != CODE_FOR_nothing)
4918 if (imode != GET_MODE (from))
4919 from = convert_to_mode (imode, from, unsignedp);
4921 if (fmode != GET_MODE (to))
4922 target = gen_reg_rtx (fmode);
4924 emit_unop_insn (icode, target, from,
4925 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
4928 convert_move (to, target, 0);
4933 /* Unsigned integer, and no way to convert directly. Convert as signed,
4934 then unconditionally adjust the result. */
4935 if (unsignedp && can_do_signed)
4937 rtx label = gen_label_rtx ();
4939 REAL_VALUE_TYPE offset;
4941 /* Look for a usable floating mode FMODE wider than the source and at
4942 least as wide as the target. Using FMODE will avoid rounding woes
4943 with unsigned values greater than the signed maximum value. */
4945 for (fmode = GET_MODE (to); fmode != VOIDmode;
4946 fmode = GET_MODE_WIDER_MODE (fmode))
4947 if (GET_MODE_PRECISION (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
4948 && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
4951 if (fmode == VOIDmode)
4953 /* There is no such mode. Pretend the target is wide enough. */
4954 fmode = GET_MODE (to);
4956 /* Avoid double-rounding when TO is narrower than FROM. */
4957 if ((significand_size (fmode) + 1)
4958 < GET_MODE_PRECISION (GET_MODE (from)))
4961 rtx neglabel = gen_label_rtx ();
4963 /* Don't use TARGET if it isn't a register, is a hard register,
4964 or is the wrong mode. */
4966 || REGNO (target) < FIRST_PSEUDO_REGISTER
4967 || GET_MODE (target) != fmode)
4968 target = gen_reg_rtx (fmode);
4970 imode = GET_MODE (from);
4971 do_pending_stack_adjust ();
4973 /* Test whether the sign bit is set. */
4974 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
4977 /* The sign bit is not set. Convert as signed. */
4978 expand_float (target, from, 0);
4979 emit_jump_insn (gen_jump (label));
4982 /* The sign bit is set.
4983 Convert to a usable (positive signed) value by shifting right
4984 one bit, while remembering if a nonzero bit was shifted
4985 out; i.e., compute (from & 1) | (from >> 1). */
4987 emit_label (neglabel);
4988 temp = expand_binop (imode, and_optab, from, const1_rtx,
4989 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4990 temp1 = expand_shift (RSHIFT_EXPR, imode, from, 1, NULL_RTX, 1);
4991 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
4993 expand_float (target, temp, 0);
4995 /* Multiply by 2 to undo the shift above. */
4996 temp = expand_binop (fmode, add_optab, target, target,
4997 target, 0, OPTAB_LIB_WIDEN);
4999 emit_move_insn (target, temp);
5001 do_pending_stack_adjust ();
5007 /* If we are about to do some arithmetic to correct for an
5008 unsigned operand, do it in a pseudo-register. */
5010 if (GET_MODE (to) != fmode
5011 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
5012 target = gen_reg_rtx (fmode);
5014 /* Convert as signed integer to floating. */
5015 expand_float (target, from, 0);
5017 /* If FROM is negative (and therefore TO is negative),
5018 correct its value by 2**bitwidth. */
5020 do_pending_stack_adjust ();
5021 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from),
5025 real_2expN (&offset, GET_MODE_PRECISION (GET_MODE (from)), fmode);
5026 temp = expand_binop (fmode, add_optab, target,
5027 CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode),
5028 target, 0, OPTAB_LIB_WIDEN);
5030 emit_move_insn (target, temp);
5032 do_pending_stack_adjust ();
5037 /* No hardware instruction available; call a library routine. */
5042 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
5044 if (GET_MODE_SIZE (GET_MODE (from)) < GET_MODE_SIZE (SImode))
5045 from = convert_to_mode (SImode, from, unsignedp);
5047 libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
5048 gcc_assert (libfunc);
5052 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
5053 GET_MODE (to), 1, from,
5055 insns = get_insns ();
5058 emit_libcall_block (insns, target, value,
5059 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FLOAT : FLOAT,
5060 GET_MODE (to), from));
5065 /* Copy result to requested destination
5066 if we have been computing in a temp location. */
5070 if (GET_MODE (target) == GET_MODE (to))
5071 emit_move_insn (to, target);
5073 convert_move (to, target, 0);
5077 /* Generate code to convert FROM to fixed point and store in TO. FROM
5078 must be floating point. */
5081 expand_fix (rtx to, rtx from, int unsignedp)
5083 enum insn_code icode;
5085 enum machine_mode fmode, imode;
5088 /* We first try to find a pair of modes, one real and one integer, at
5089 least as wide as FROM and TO, respectively, in which we can open-code
5090 this conversion. If the integer mode is wider than the mode of TO,
5091 we can do the conversion either signed or unsigned. */
5093 for (fmode = GET_MODE (from); fmode != VOIDmode;
5094 fmode = GET_MODE_WIDER_MODE (fmode))
5095 for (imode = GET_MODE (to); imode != VOIDmode;
5096 imode = GET_MODE_WIDER_MODE (imode))
5098 int doing_unsigned = unsignedp;
5100 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
5101 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
5102 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
5104 if (icode != CODE_FOR_nothing)
5106 rtx last = get_last_insn ();
5107 if (fmode != GET_MODE (from))
5108 from = convert_to_mode (fmode, from, 0);
5112 rtx temp = gen_reg_rtx (GET_MODE (from));
5113 from = expand_unop (GET_MODE (from), ftrunc_optab, from,
5117 if (imode != GET_MODE (to))
5118 target = gen_reg_rtx (imode);
5120 if (maybe_emit_unop_insn (icode, target, from,
5121 doing_unsigned ? UNSIGNED_FIX : FIX))
5124 convert_move (to, target, unsignedp);
5127 delete_insns_since (last);
5131 /* For an unsigned conversion, there is one more way to do it.
5132 If we have a signed conversion, we generate code that compares
5133 the real value to the largest representable positive number. If if
5134 is smaller, the conversion is done normally. Otherwise, subtract
5135 one plus the highest signed number, convert, and add it back.
5137 We only need to check all real modes, since we know we didn't find
5138 anything with a wider integer mode.
5140 This code used to extend FP value into mode wider than the destination.
5141 This is needed for decimal float modes which cannot accurately
5142 represent one plus the highest signed number of the same size, but
5143 not for binary modes. Consider, for instance conversion from SFmode
5146 The hot path through the code is dealing with inputs smaller than 2^63
5147 and doing just the conversion, so there is no bits to lose.
5149 In the other path we know the value is positive in the range 2^63..2^64-1
5150 inclusive. (as for other input overflow happens and result is undefined)
5151 So we know that the most important bit set in mantissa corresponds to
5152 2^63. The subtraction of 2^63 should not generate any rounding as it
5153 simply clears out that bit. The rest is trivial. */
5155 if (unsignedp && GET_MODE_PRECISION (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
5156 for (fmode = GET_MODE (from); fmode != VOIDmode;
5157 fmode = GET_MODE_WIDER_MODE (fmode))
5158 if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0, &must_trunc)
5159 && (!DECIMAL_FLOAT_MODE_P (fmode)
5160 || GET_MODE_BITSIZE (fmode) > GET_MODE_PRECISION (GET_MODE (to))))
5163 REAL_VALUE_TYPE offset;
5164 rtx limit, lab1, lab2, insn;
5166 bitsize = GET_MODE_PRECISION (GET_MODE (to));
5167 real_2expN (&offset, bitsize - 1, fmode);
5168 limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode);
5169 lab1 = gen_label_rtx ();
5170 lab2 = gen_label_rtx ();
5172 if (fmode != GET_MODE (from))
5173 from = convert_to_mode (fmode, from, 0);
5175 /* See if we need to do the subtraction. */
5176 do_pending_stack_adjust ();
5177 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from),
5180 /* If not, do the signed "fix" and branch around fixup code. */
5181 expand_fix (to, from, 0);
5182 emit_jump_insn (gen_jump (lab2));
5185 /* Otherwise, subtract 2**(N-1), convert to signed number,
5186 then add 2**(N-1). Do the addition using XOR since this
5187 will often generate better code. */
5189 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
5190 NULL_RTX, 0, OPTAB_LIB_WIDEN);
5191 expand_fix (to, target, 0);
5192 target = expand_binop (GET_MODE (to), xor_optab, to,
5194 ((HOST_WIDE_INT) 1 << (bitsize - 1),
5196 to, 1, OPTAB_LIB_WIDEN);
5199 emit_move_insn (to, target);
5203 if (optab_handler (mov_optab, GET_MODE (to)) != CODE_FOR_nothing)
5205 /* Make a place for a REG_NOTE and add it. */
5206 insn = emit_move_insn (to, to);
5207 set_dst_reg_note (insn, REG_EQUAL,
5208 gen_rtx_fmt_e (UNSIGNED_FIX, GET_MODE (to),
5216 /* We can't do it with an insn, so use a library call. But first ensure
5217 that the mode of TO is at least as wide as SImode, since those are the
5218 only library calls we know about. */
5220 if (GET_MODE_SIZE (GET_MODE (to)) < GET_MODE_SIZE (SImode))
5222 target = gen_reg_rtx (SImode);
5224 expand_fix (target, from, unsignedp);
5232 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
5233 libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
5234 gcc_assert (libfunc);
5238 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
5239 GET_MODE (to), 1, from,
5241 insns = get_insns ();
5244 emit_libcall_block (insns, target, value,
5245 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
5246 GET_MODE (to), from));
5251 if (GET_MODE (to) == GET_MODE (target))
5252 emit_move_insn (to, target);
5254 convert_move (to, target, 0);
5258 /* Generate code to convert FROM or TO a fixed-point.
5259 If UINTP is true, either TO or FROM is an unsigned integer.
5260 If SATP is true, we need to saturate the result. */
5263 expand_fixed_convert (rtx to, rtx from, int uintp, int satp)
5265 enum machine_mode to_mode = GET_MODE (to);
5266 enum machine_mode from_mode = GET_MODE (from);
5268 enum rtx_code this_code;
5269 enum insn_code code;
5273 if (to_mode == from_mode)
5275 emit_move_insn (to, from);
5281 tab = satp ? satfractuns_optab : fractuns_optab;
5282 this_code = satp ? UNSIGNED_SAT_FRACT : UNSIGNED_FRACT_CONVERT;
5286 tab = satp ? satfract_optab : fract_optab;
5287 this_code = satp ? SAT_FRACT : FRACT_CONVERT;
5289 code = convert_optab_handler (tab, to_mode, from_mode);
5290 if (code != CODE_FOR_nothing)
5292 emit_unop_insn (code, to, from, this_code);
5296 libfunc = convert_optab_libfunc (tab, to_mode, from_mode);
5297 gcc_assert (libfunc);
5300 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, to_mode,
5301 1, from, from_mode);
5302 insns = get_insns ();
5305 emit_libcall_block (insns, to, value,
5306 gen_rtx_fmt_e (tab->code, to_mode, from));
5309 /* Generate code to convert FROM to fixed point and store in TO. FROM
5310 must be floating point, TO must be signed. Use the conversion optab
5311 TAB to do the conversion. */
5314 expand_sfix_optab (rtx to, rtx from, convert_optab tab)
5316 enum insn_code icode;
5318 enum machine_mode fmode, imode;
5320 /* We first try to find a pair of modes, one real and one integer, at
5321 least as wide as FROM and TO, respectively, in which we can open-code
5322 this conversion. If the integer mode is wider than the mode of TO,
5323 we can do the conversion either signed or unsigned. */
5325 for (fmode = GET_MODE (from); fmode != VOIDmode;
5326 fmode = GET_MODE_WIDER_MODE (fmode))
5327 for (imode = GET_MODE (to); imode != VOIDmode;
5328 imode = GET_MODE_WIDER_MODE (imode))
5330 icode = convert_optab_handler (tab, imode, fmode);
5331 if (icode != CODE_FOR_nothing)
5333 rtx last = get_last_insn ();
5334 if (fmode != GET_MODE (from))
5335 from = convert_to_mode (fmode, from, 0);
5337 if (imode != GET_MODE (to))
5338 target = gen_reg_rtx (imode);
5340 if (!maybe_emit_unop_insn (icode, target, from, UNKNOWN))
5342 delete_insns_since (last);
5346 convert_move (to, target, 0);
5354 /* Report whether we have an instruction to perform the operation
5355 specified by CODE on operands of mode MODE. */
5357 have_insn_for (enum rtx_code code, enum machine_mode mode)
5359 return (code_to_optab[(int) code] != 0
5360 && (optab_handler (code_to_optab[(int) code], mode)
5361 != CODE_FOR_nothing));
5364 /* Set all insn_code fields to CODE_FOR_nothing. */
5367 init_insn_codes (void)
5369 memset (optab_table, 0, sizeof (optab_table));
5370 memset (convert_optab_table, 0, sizeof (convert_optab_table));
5371 memset (direct_optab_table, 0, sizeof (direct_optab_table));
5374 /* Initialize OP's code to CODE, and write it into the code_to_optab table. */
5376 init_optab (optab op, enum rtx_code code)
5379 code_to_optab[(int) code] = op;
5382 /* Same, but fill in its code as CODE, and do _not_ write it into
5383 the code_to_optab table. */
5385 init_optabv (optab op, enum rtx_code code)
5390 /* Conversion optabs never go in the code_to_optab table. */
5392 init_convert_optab (convert_optab op, enum rtx_code code)
5397 /* Initialize the libfunc fields of an entire group of entries in some
5398 optab. Each entry is set equal to a string consisting of a leading
5399 pair of underscores followed by a generic operation name followed by
5400 a mode name (downshifted to lowercase) followed by a single character
5401 representing the number of operands for the given operation (which is
5402 usually one of the characters '2', '3', or '4').
5404 OPTABLE is the table in which libfunc fields are to be initialized.
5405 OPNAME is the generic (string) name of the operation.
5406 SUFFIX is the character which specifies the number of operands for
5407 the given generic operation.
5408 MODE is the mode to generate for.
5412 gen_libfunc (optab optable, const char *opname, int suffix, enum machine_mode mode)
5414 unsigned opname_len = strlen (opname);
5415 const char *mname = GET_MODE_NAME (mode);
5416 unsigned mname_len = strlen (mname);
5417 int prefix_len = targetm.libfunc_gnu_prefix ? 6 : 2;
5418 int len = prefix_len + opname_len + mname_len + 1 + 1;
5419 char *libfunc_name = XALLOCAVEC (char, len);
5426 if (targetm.libfunc_gnu_prefix)
5433 for (q = opname; *q; )
5435 for (q = mname; *q; q++)
5436 *p++ = TOLOWER (*q);
5440 set_optab_libfunc (optable, mode,
5441 ggc_alloc_string (libfunc_name, p - libfunc_name));
5444 /* Like gen_libfunc, but verify that integer operation is involved. */
5447 gen_int_libfunc (optab optable, const char *opname, char suffix,
5448 enum machine_mode mode)
5450 int maxsize = 2 * BITS_PER_WORD;
5452 if (GET_MODE_CLASS (mode) != MODE_INT)
5454 if (maxsize < LONG_LONG_TYPE_SIZE)
5455 maxsize = LONG_LONG_TYPE_SIZE;
5456 if (GET_MODE_CLASS (mode) != MODE_INT
5457 || mode < word_mode || GET_MODE_BITSIZE (mode) > maxsize)
5459 gen_libfunc (optable, opname, suffix, mode);
5462 /* Like gen_libfunc, but verify that FP and set decimal prefix if needed. */
5465 gen_fp_libfunc (optab optable, const char *opname, char suffix,
5466 enum machine_mode mode)
5470 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5471 gen_libfunc (optable, opname, suffix, mode);
5472 if (DECIMAL_FLOAT_MODE_P (mode))
5474 dec_opname = XALLOCAVEC (char, sizeof (DECIMAL_PREFIX) + strlen (opname));
5475 /* For BID support, change the name to have either a bid_ or dpd_ prefix
5476 depending on the low level floating format used. */
5477 memcpy (dec_opname, DECIMAL_PREFIX, sizeof (DECIMAL_PREFIX) - 1);
5478 strcpy (dec_opname + sizeof (DECIMAL_PREFIX) - 1, opname);
5479 gen_libfunc (optable, dec_opname, suffix, mode);
5483 /* Like gen_libfunc, but verify that fixed-point operation is involved. */
5486 gen_fixed_libfunc (optab optable, const char *opname, char suffix,
5487 enum machine_mode mode)
5489 if (!ALL_FIXED_POINT_MODE_P (mode))
5491 gen_libfunc (optable, opname, suffix, mode);
5494 /* Like gen_libfunc, but verify that signed fixed-point operation is
5498 gen_signed_fixed_libfunc (optab optable, const char *opname, char suffix,
5499 enum machine_mode mode)
5501 if (!SIGNED_FIXED_POINT_MODE_P (mode))
5503 gen_libfunc (optable, opname, suffix, mode);
5506 /* Like gen_libfunc, but verify that unsigned fixed-point operation is
5510 gen_unsigned_fixed_libfunc (optab optable, const char *opname, char suffix,
5511 enum machine_mode mode)
5513 if (!UNSIGNED_FIXED_POINT_MODE_P (mode))
5515 gen_libfunc (optable, opname, suffix, mode);
5518 /* Like gen_libfunc, but verify that FP or INT operation is involved. */
5521 gen_int_fp_libfunc (optab optable, const char *name, char suffix,
5522 enum machine_mode mode)
5524 if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5525 gen_fp_libfunc (optable, name, suffix, mode);
5526 if (INTEGRAL_MODE_P (mode))
5527 gen_int_libfunc (optable, name, suffix, mode);
5530 /* Like gen_libfunc, but verify that FP or INT operation is involved
5531 and add 'v' suffix for integer operation. */
5534 gen_intv_fp_libfunc (optab optable, const char *name, char suffix,
5535 enum machine_mode mode)
5537 if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5538 gen_fp_libfunc (optable, name, suffix, mode);
5539 if (GET_MODE_CLASS (mode) == MODE_INT)
5541 int len = strlen (name);
5542 char *v_name = XALLOCAVEC (char, len + 2);
5543 strcpy (v_name, name);
5545 v_name[len + 1] = 0;
5546 gen_int_libfunc (optable, v_name, suffix, mode);
5550 /* Like gen_libfunc, but verify that FP or INT or FIXED operation is
5554 gen_int_fp_fixed_libfunc (optab optable, const char *name, char suffix,
5555 enum machine_mode mode)
5557 if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5558 gen_fp_libfunc (optable, name, suffix, mode);
5559 if (INTEGRAL_MODE_P (mode))
5560 gen_int_libfunc (optable, name, suffix, mode);
5561 if (ALL_FIXED_POINT_MODE_P (mode))
5562 gen_fixed_libfunc (optable, name, suffix, mode);
5565 /* Like gen_libfunc, but verify that FP or INT or signed FIXED operation is
5569 gen_int_fp_signed_fixed_libfunc (optab optable, const char *name, char suffix,
5570 enum machine_mode mode)
5572 if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5573 gen_fp_libfunc (optable, name, suffix, mode);
5574 if (INTEGRAL_MODE_P (mode))
5575 gen_int_libfunc (optable, name, suffix, mode);
5576 if (SIGNED_FIXED_POINT_MODE_P (mode))
5577 gen_signed_fixed_libfunc (optable, name, suffix, mode);
5580 /* Like gen_libfunc, but verify that INT or FIXED operation is
5584 gen_int_fixed_libfunc (optab optable, const char *name, char suffix,
5585 enum machine_mode mode)
5587 if (INTEGRAL_MODE_P (mode))
5588 gen_int_libfunc (optable, name, suffix, mode);
5589 if (ALL_FIXED_POINT_MODE_P (mode))
5590 gen_fixed_libfunc (optable, name, suffix, mode);
5593 /* Like gen_libfunc, but verify that INT or signed FIXED operation is
5597 gen_int_signed_fixed_libfunc (optab optable, const char *name, char suffix,
5598 enum machine_mode mode)
5600 if (INTEGRAL_MODE_P (mode))
5601 gen_int_libfunc (optable, name, suffix, mode);
5602 if (SIGNED_FIXED_POINT_MODE_P (mode))
5603 gen_signed_fixed_libfunc (optable, name, suffix, mode);
5606 /* Like gen_libfunc, but verify that INT or unsigned FIXED operation is
5610 gen_int_unsigned_fixed_libfunc (optab optable, const char *name, char suffix,
5611 enum machine_mode mode)
5613 if (INTEGRAL_MODE_P (mode))
5614 gen_int_libfunc (optable, name, suffix, mode);
5615 if (UNSIGNED_FIXED_POINT_MODE_P (mode))
5616 gen_unsigned_fixed_libfunc (optable, name, suffix, mode);
5619 /* Initialize the libfunc fields of an entire group of entries of an
5620 inter-mode-class conversion optab. The string formation rules are
5621 similar to the ones for init_libfuncs, above, but instead of having
5622 a mode name and an operand count these functions have two mode names
5623 and no operand count. */
5626 gen_interclass_conv_libfunc (convert_optab tab,
5628 enum machine_mode tmode,
5629 enum machine_mode fmode)
5631 size_t opname_len = strlen (opname);
5632 size_t mname_len = 0;
5634 const char *fname, *tname;
5636 int prefix_len = targetm.libfunc_gnu_prefix ? 6 : 2;
5637 char *libfunc_name, *suffix;
5638 char *nondec_name, *dec_name, *nondec_suffix, *dec_suffix;
5641 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5642 depends on which underlying decimal floating point format is used. */
5643 const size_t dec_len = sizeof (DECIMAL_PREFIX) - 1;
5645 mname_len = strlen (GET_MODE_NAME (tmode)) + strlen (GET_MODE_NAME (fmode));
5647 nondec_name = XALLOCAVEC (char, prefix_len + opname_len + mname_len + 1 + 1);
5648 nondec_name[0] = '_';
5649 nondec_name[1] = '_';
5650 if (targetm.libfunc_gnu_prefix)
5652 nondec_name[2] = 'g';
5653 nondec_name[3] = 'n';
5654 nondec_name[4] = 'u';
5655 nondec_name[5] = '_';
5658 memcpy (&nondec_name[prefix_len], opname, opname_len);
5659 nondec_suffix = nondec_name + opname_len + prefix_len;
5661 dec_name = XALLOCAVEC (char, 2 + dec_len + opname_len + mname_len + 1 + 1);
5664 memcpy (&dec_name[2], DECIMAL_PREFIX, dec_len);
5665 memcpy (&dec_name[2+dec_len], opname, opname_len);
5666 dec_suffix = dec_name + dec_len + opname_len + 2;
5668 fname = GET_MODE_NAME (fmode);
5669 tname = GET_MODE_NAME (tmode);
5671 if (DECIMAL_FLOAT_MODE_P(fmode) || DECIMAL_FLOAT_MODE_P(tmode))
5673 libfunc_name = dec_name;
5674 suffix = dec_suffix;
5678 libfunc_name = nondec_name;
5679 suffix = nondec_suffix;
5683 for (q = fname; *q; p++, q++)
5685 for (q = tname; *q; p++, q++)
5690 set_conv_libfunc (tab, tmode, fmode,
5691 ggc_alloc_string (libfunc_name, p - libfunc_name));
5694 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5695 int->fp conversion. */
5698 gen_int_to_fp_conv_libfunc (convert_optab tab,
5700 enum machine_mode tmode,
5701 enum machine_mode fmode)
5703 if (GET_MODE_CLASS (fmode) != MODE_INT)
5705 if (GET_MODE_CLASS (tmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (tmode))
5707 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5710 /* ufloat_optab is special by using floatun for FP and floatuns decimal fp
5714 gen_ufloat_conv_libfunc (convert_optab tab,
5715 const char *opname ATTRIBUTE_UNUSED,
5716 enum machine_mode tmode,
5717 enum machine_mode fmode)
5719 if (DECIMAL_FLOAT_MODE_P (tmode))
5720 gen_int_to_fp_conv_libfunc (tab, "floatuns", tmode, fmode);
5722 gen_int_to_fp_conv_libfunc (tab, "floatun", tmode, fmode);
5725 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5726 fp->int conversion. */
5729 gen_int_to_fp_nondecimal_conv_libfunc (convert_optab tab,
5731 enum machine_mode tmode,
5732 enum machine_mode fmode)
5734 if (GET_MODE_CLASS (fmode) != MODE_INT)
5736 if (GET_MODE_CLASS (tmode) != MODE_FLOAT)
5738 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5741 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5742 fp->int conversion with no decimal floating point involved. */
5745 gen_fp_to_int_conv_libfunc (convert_optab tab,
5747 enum machine_mode tmode,
5748 enum machine_mode fmode)
5750 if (GET_MODE_CLASS (fmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (fmode))
5752 if (GET_MODE_CLASS (tmode) != MODE_INT)
5754 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5757 /* Initialize the libfunc fields of an of an intra-mode-class conversion optab.
5758 The string formation rules are
5759 similar to the ones for init_libfunc, above. */
5762 gen_intraclass_conv_libfunc (convert_optab tab, const char *opname,
5763 enum machine_mode tmode, enum machine_mode fmode)
5765 size_t opname_len = strlen (opname);
5766 size_t mname_len = 0;
5768 const char *fname, *tname;
5770 int prefix_len = targetm.libfunc_gnu_prefix ? 6 : 2;
5771 char *nondec_name, *dec_name, *nondec_suffix, *dec_suffix;
5772 char *libfunc_name, *suffix;
5775 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5776 depends on which underlying decimal floating point format is used. */
5777 const size_t dec_len = sizeof (DECIMAL_PREFIX) - 1;
5779 mname_len = strlen (GET_MODE_NAME (tmode)) + strlen (GET_MODE_NAME (fmode));
5781 nondec_name = XALLOCAVEC (char, 2 + opname_len + mname_len + 1 + 1);
5782 nondec_name[0] = '_';
5783 nondec_name[1] = '_';
5784 if (targetm.libfunc_gnu_prefix)
5786 nondec_name[2] = 'g';
5787 nondec_name[3] = 'n';
5788 nondec_name[4] = 'u';
5789 nondec_name[5] = '_';
5791 memcpy (&nondec_name[prefix_len], opname, opname_len);
5792 nondec_suffix = nondec_name + opname_len + prefix_len;
5794 dec_name = XALLOCAVEC (char, 2 + dec_len + opname_len + mname_len + 1 + 1);
5797 memcpy (&dec_name[2], DECIMAL_PREFIX, dec_len);
5798 memcpy (&dec_name[2 + dec_len], opname, opname_len);
5799 dec_suffix = dec_name + dec_len + opname_len + 2;
5801 fname = GET_MODE_NAME (fmode);
5802 tname = GET_MODE_NAME (tmode);
5804 if (DECIMAL_FLOAT_MODE_P(fmode) || DECIMAL_FLOAT_MODE_P(tmode))
5806 libfunc_name = dec_name;
5807 suffix = dec_suffix;
5811 libfunc_name = nondec_name;
5812 suffix = nondec_suffix;
5816 for (q = fname; *q; p++, q++)
5818 for (q = tname; *q; p++, q++)
5824 set_conv_libfunc (tab, tmode, fmode,
5825 ggc_alloc_string (libfunc_name, p - libfunc_name));
5828 /* Pick proper libcall for trunc_optab. We need to chose if we do
5829 truncation or extension and interclass or intraclass. */
5832 gen_trunc_conv_libfunc (convert_optab tab,
5834 enum machine_mode tmode,
5835 enum machine_mode fmode)
5837 if (GET_MODE_CLASS (tmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (tmode))
5839 if (GET_MODE_CLASS (fmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (fmode))
5844 if ((GET_MODE_CLASS (tmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (fmode))
5845 || (GET_MODE_CLASS (fmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (tmode)))
5846 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5848 if (GET_MODE_PRECISION (fmode) <= GET_MODE_PRECISION (tmode))
5851 if ((GET_MODE_CLASS (tmode) == MODE_FLOAT
5852 && GET_MODE_CLASS (fmode) == MODE_FLOAT)
5853 || (DECIMAL_FLOAT_MODE_P (fmode) && DECIMAL_FLOAT_MODE_P (tmode)))
5854 gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
5857 /* Pick proper libcall for extend_optab. We need to chose if we do
5858 truncation or extension and interclass or intraclass. */
5861 gen_extend_conv_libfunc (convert_optab tab,
5862 const char *opname ATTRIBUTE_UNUSED,
5863 enum machine_mode tmode,
5864 enum machine_mode fmode)
5866 if (GET_MODE_CLASS (tmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (tmode))
5868 if (GET_MODE_CLASS (fmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (fmode))
5873 if ((GET_MODE_CLASS (tmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (fmode))
5874 || (GET_MODE_CLASS (fmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (tmode)))
5875 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5877 if (GET_MODE_PRECISION (fmode) > GET_MODE_PRECISION (tmode))
5880 if ((GET_MODE_CLASS (tmode) == MODE_FLOAT
5881 && GET_MODE_CLASS (fmode) == MODE_FLOAT)
5882 || (DECIMAL_FLOAT_MODE_P (fmode) && DECIMAL_FLOAT_MODE_P (tmode)))
5883 gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
5886 /* Pick proper libcall for fract_optab. We need to chose if we do
5887 interclass or intraclass. */
5890 gen_fract_conv_libfunc (convert_optab tab,
5892 enum machine_mode tmode,
5893 enum machine_mode fmode)
5897 if (!(ALL_FIXED_POINT_MODE_P (tmode) || ALL_FIXED_POINT_MODE_P (fmode)))
5900 if (GET_MODE_CLASS (tmode) == GET_MODE_CLASS (fmode))
5901 gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
5903 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5906 /* Pick proper libcall for fractuns_optab. */
5909 gen_fractuns_conv_libfunc (convert_optab tab,
5911 enum machine_mode tmode,
5912 enum machine_mode fmode)
5916 /* One mode must be a fixed-point mode, and the other must be an integer
5918 if (!((ALL_FIXED_POINT_MODE_P (tmode) && GET_MODE_CLASS (fmode) == MODE_INT)
5919 || (ALL_FIXED_POINT_MODE_P (fmode)
5920 && GET_MODE_CLASS (tmode) == MODE_INT)))
5923 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5926 /* Pick proper libcall for satfract_optab. We need to chose if we do
5927 interclass or intraclass. */
5930 gen_satfract_conv_libfunc (convert_optab tab,
5932 enum machine_mode tmode,
5933 enum machine_mode fmode)
5937 /* TMODE must be a fixed-point mode. */
5938 if (!ALL_FIXED_POINT_MODE_P (tmode))
5941 if (GET_MODE_CLASS (tmode) == GET_MODE_CLASS (fmode))
5942 gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
5944 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5947 /* Pick proper libcall for satfractuns_optab. */
5950 gen_satfractuns_conv_libfunc (convert_optab tab,
5952 enum machine_mode tmode,
5953 enum machine_mode fmode)
5957 /* TMODE must be a fixed-point mode, and FMODE must be an integer mode. */
5958 if (!(ALL_FIXED_POINT_MODE_P (tmode) && GET_MODE_CLASS (fmode) == MODE_INT))
5961 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5964 /* A table of previously-created libfuncs, hashed by name. */
5965 static GTY ((param_is (union tree_node))) htab_t libfunc_decls;
5967 /* Hashtable callbacks for libfunc_decls. */
5970 libfunc_decl_hash (const void *entry)
5972 return IDENTIFIER_HASH_VALUE (DECL_NAME ((const_tree) entry));
5976 libfunc_decl_eq (const void *entry1, const void *entry2)
5978 return DECL_NAME ((const_tree) entry1) == (const_tree) entry2;
5981 /* Build a decl for a libfunc named NAME. */
5984 build_libfunc_function (const char *name)
5986 tree decl = build_decl (UNKNOWN_LOCATION, FUNCTION_DECL,
5987 get_identifier (name),
5988 build_function_type (integer_type_node, NULL_TREE));
5989 /* ??? We don't have any type information except for this is
5990 a function. Pretend this is "int foo()". */
5991 DECL_ARTIFICIAL (decl) = 1;
5992 DECL_EXTERNAL (decl) = 1;
5993 TREE_PUBLIC (decl) = 1;
5994 gcc_assert (DECL_ASSEMBLER_NAME (decl));
5996 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
5997 are the flags assigned by targetm.encode_section_info. */
5998 SET_SYMBOL_REF_DECL (XEXP (DECL_RTL (decl), 0), NULL);
6004 init_one_libfunc (const char *name)
6010 if (libfunc_decls == NULL)
6011 libfunc_decls = htab_create_ggc (37, libfunc_decl_hash,
6012 libfunc_decl_eq, NULL);
6014 /* See if we have already created a libfunc decl for this function. */
6015 id = get_identifier (name);
6016 hash = IDENTIFIER_HASH_VALUE (id);
6017 slot = htab_find_slot_with_hash (libfunc_decls, id, hash, INSERT);
6018 decl = (tree) *slot;
6021 /* Create a new decl, so that it can be passed to
6022 targetm.encode_section_info. */
6023 decl = build_libfunc_function (name);
6026 return XEXP (DECL_RTL (decl), 0);
6029 /* Adjust the assembler name of libfunc NAME to ASMSPEC. */
6032 set_user_assembler_libfunc (const char *name, const char *asmspec)
6038 id = get_identifier (name);
6039 hash = IDENTIFIER_HASH_VALUE (id);
6040 slot = htab_find_slot_with_hash (libfunc_decls, id, hash, NO_INSERT);
6042 decl = (tree) *slot;
6043 set_user_assembler_name (decl, asmspec);
6044 return XEXP (DECL_RTL (decl), 0);
6047 /* Call this to reset the function entry for one optab (OPTABLE) in mode
6048 MODE to NAME, which should be either 0 or a string constant. */
6050 set_optab_libfunc (optab optable, enum machine_mode mode, const char *name)
6053 struct libfunc_entry e;
6054 struct libfunc_entry **slot;
6055 e.optab = (size_t) (optable - &optab_table[0]);
6060 val = init_one_libfunc (name);
6063 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, INSERT);
6065 *slot = ggc_alloc_libfunc_entry ();
6066 (*slot)->optab = (size_t) (optable - &optab_table[0]);
6067 (*slot)->mode1 = mode;
6068 (*slot)->mode2 = VOIDmode;
6069 (*slot)->libfunc = val;
6072 /* Call this to reset the function entry for one conversion optab
6073 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
6074 either 0 or a string constant. */
6076 set_conv_libfunc (convert_optab optable, enum machine_mode tmode,
6077 enum machine_mode fmode, const char *name)
6080 struct libfunc_entry e;
6081 struct libfunc_entry **slot;
6082 e.optab = (size_t) (optable - &convert_optab_table[0]);
6087 val = init_one_libfunc (name);
6090 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, INSERT);
6092 *slot = ggc_alloc_libfunc_entry ();
6093 (*slot)->optab = (size_t) (optable - &convert_optab_table[0]);
6094 (*slot)->mode1 = tmode;
6095 (*slot)->mode2 = fmode;
6096 (*slot)->libfunc = val;
6099 /* Call this to initialize the contents of the optabs
6100 appropriately for the current target machine. */
6107 htab_empty (libfunc_hash);
6108 /* We statically initialize the insn_codes with the equivalent of
6109 CODE_FOR_nothing. Repeat the process if reinitialising. */
6113 libfunc_hash = htab_create_ggc (10, hash_libfunc, eq_libfunc, NULL);
6115 init_optab (add_optab, PLUS);
6116 init_optabv (addv_optab, PLUS);
6117 init_optab (sub_optab, MINUS);
6118 init_optabv (subv_optab, MINUS);
6119 init_optab (ssadd_optab, SS_PLUS);
6120 init_optab (usadd_optab, US_PLUS);
6121 init_optab (sssub_optab, SS_MINUS);
6122 init_optab (ussub_optab, US_MINUS);
6123 init_optab (smul_optab, MULT);
6124 init_optab (ssmul_optab, SS_MULT);
6125 init_optab (usmul_optab, US_MULT);
6126 init_optabv (smulv_optab, MULT);
6127 init_optab (smul_highpart_optab, UNKNOWN);
6128 init_optab (umul_highpart_optab, UNKNOWN);
6129 init_optab (smul_widen_optab, UNKNOWN);
6130 init_optab (umul_widen_optab, UNKNOWN);
6131 init_optab (usmul_widen_optab, UNKNOWN);
6132 init_optab (smadd_widen_optab, UNKNOWN);
6133 init_optab (umadd_widen_optab, UNKNOWN);
6134 init_optab (ssmadd_widen_optab, UNKNOWN);
6135 init_optab (usmadd_widen_optab, UNKNOWN);
6136 init_optab (smsub_widen_optab, UNKNOWN);
6137 init_optab (umsub_widen_optab, UNKNOWN);
6138 init_optab (ssmsub_widen_optab, UNKNOWN);
6139 init_optab (usmsub_widen_optab, UNKNOWN);
6140 init_optab (sdiv_optab, DIV);
6141 init_optab (ssdiv_optab, SS_DIV);
6142 init_optab (usdiv_optab, US_DIV);
6143 init_optabv (sdivv_optab, DIV);
6144 init_optab (sdivmod_optab, UNKNOWN);
6145 init_optab (udiv_optab, UDIV);
6146 init_optab (udivmod_optab, UNKNOWN);
6147 init_optab (smod_optab, MOD);
6148 init_optab (umod_optab, UMOD);
6149 init_optab (fmod_optab, UNKNOWN);
6150 init_optab (remainder_optab, UNKNOWN);
6151 init_optab (ftrunc_optab, UNKNOWN);
6152 init_optab (and_optab, AND);
6153 init_optab (ior_optab, IOR);
6154 init_optab (xor_optab, XOR);
6155 init_optab (ashl_optab, ASHIFT);
6156 init_optab (ssashl_optab, SS_ASHIFT);
6157 init_optab (usashl_optab, US_ASHIFT);
6158 init_optab (ashr_optab, ASHIFTRT);
6159 init_optab (lshr_optab, LSHIFTRT);
6160 init_optabv (vashl_optab, ASHIFT);
6161 init_optabv (vashr_optab, ASHIFTRT);
6162 init_optabv (vlshr_optab, LSHIFTRT);
6163 init_optab (rotl_optab, ROTATE);
6164 init_optab (rotr_optab, ROTATERT);
6165 init_optab (smin_optab, SMIN);
6166 init_optab (smax_optab, SMAX);
6167 init_optab (umin_optab, UMIN);
6168 init_optab (umax_optab, UMAX);
6169 init_optab (pow_optab, UNKNOWN);
6170 init_optab (atan2_optab, UNKNOWN);
6171 init_optab (fma_optab, FMA);
6172 init_optab (fms_optab, UNKNOWN);
6173 init_optab (fnma_optab, UNKNOWN);
6174 init_optab (fnms_optab, UNKNOWN);
6176 /* These three have codes assigned exclusively for the sake of
6178 init_optab (mov_optab, SET);
6179 init_optab (movstrict_optab, STRICT_LOW_PART);
6180 init_optab (cbranch_optab, COMPARE);
6182 init_optab (cmov_optab, UNKNOWN);
6183 init_optab (cstore_optab, UNKNOWN);
6184 init_optab (ctrap_optab, UNKNOWN);
6186 init_optab (storent_optab, UNKNOWN);
6188 init_optab (cmp_optab, UNKNOWN);
6189 init_optab (ucmp_optab, UNKNOWN);
6191 init_optab (eq_optab, EQ);
6192 init_optab (ne_optab, NE);
6193 init_optab (gt_optab, GT);
6194 init_optab (ge_optab, GE);
6195 init_optab (lt_optab, LT);
6196 init_optab (le_optab, LE);
6197 init_optab (unord_optab, UNORDERED);
6199 init_optab (neg_optab, NEG);
6200 init_optab (ssneg_optab, SS_NEG);
6201 init_optab (usneg_optab, US_NEG);
6202 init_optabv (negv_optab, NEG);
6203 init_optab (abs_optab, ABS);
6204 init_optabv (absv_optab, ABS);
6205 init_optab (addcc_optab, UNKNOWN);
6206 init_optab (one_cmpl_optab, NOT);
6207 init_optab (bswap_optab, BSWAP);
6208 init_optab (ffs_optab, FFS);
6209 init_optab (clz_optab, CLZ);
6210 init_optab (ctz_optab, CTZ);
6211 init_optab (clrsb_optab, CLRSB);
6212 init_optab (popcount_optab, POPCOUNT);
6213 init_optab (parity_optab, PARITY);
6214 init_optab (sqrt_optab, SQRT);
6215 init_optab (floor_optab, UNKNOWN);
6216 init_optab (ceil_optab, UNKNOWN);
6217 init_optab (round_optab, UNKNOWN);
6218 init_optab (btrunc_optab, UNKNOWN);
6219 init_optab (nearbyint_optab, UNKNOWN);
6220 init_optab (rint_optab, UNKNOWN);
6221 init_optab (sincos_optab, UNKNOWN);
6222 init_optab (sin_optab, UNKNOWN);
6223 init_optab (asin_optab, UNKNOWN);
6224 init_optab (cos_optab, UNKNOWN);
6225 init_optab (acos_optab, UNKNOWN);
6226 init_optab (exp_optab, UNKNOWN);
6227 init_optab (exp10_optab, UNKNOWN);
6228 init_optab (exp2_optab, UNKNOWN);
6229 init_optab (expm1_optab, UNKNOWN);
6230 init_optab (ldexp_optab, UNKNOWN);
6231 init_optab (scalb_optab, UNKNOWN);
6232 init_optab (significand_optab, UNKNOWN);
6233 init_optab (logb_optab, UNKNOWN);
6234 init_optab (ilogb_optab, UNKNOWN);
6235 init_optab (log_optab, UNKNOWN);
6236 init_optab (log10_optab, UNKNOWN);
6237 init_optab (log2_optab, UNKNOWN);
6238 init_optab (log1p_optab, UNKNOWN);
6239 init_optab (tan_optab, UNKNOWN);
6240 init_optab (atan_optab, UNKNOWN);
6241 init_optab (copysign_optab, UNKNOWN);
6242 init_optab (signbit_optab, UNKNOWN);
6244 init_optab (isinf_optab, UNKNOWN);
6246 init_optab (strlen_optab, UNKNOWN);
6247 init_optab (push_optab, UNKNOWN);
6249 init_optab (reduc_smax_optab, UNKNOWN);
6250 init_optab (reduc_umax_optab, UNKNOWN);
6251 init_optab (reduc_smin_optab, UNKNOWN);
6252 init_optab (reduc_umin_optab, UNKNOWN);
6253 init_optab (reduc_splus_optab, UNKNOWN);
6254 init_optab (reduc_uplus_optab, UNKNOWN);
6256 init_optab (ssum_widen_optab, UNKNOWN);
6257 init_optab (usum_widen_optab, UNKNOWN);
6258 init_optab (sdot_prod_optab, UNKNOWN);
6259 init_optab (udot_prod_optab, UNKNOWN);
6261 init_optab (vec_extract_optab, UNKNOWN);
6262 init_optab (vec_extract_even_optab, UNKNOWN);
6263 init_optab (vec_extract_odd_optab, UNKNOWN);
6264 init_optab (vec_set_optab, UNKNOWN);
6265 init_optab (vec_init_optab, UNKNOWN);
6266 init_optab (vec_shl_optab, UNKNOWN);
6267 init_optab (vec_shr_optab, UNKNOWN);
6268 init_optab (vec_realign_load_optab, UNKNOWN);
6269 init_optab (movmisalign_optab, UNKNOWN);
6270 init_optab (vec_widen_umult_hi_optab, UNKNOWN);
6271 init_optab (vec_widen_umult_lo_optab, UNKNOWN);
6272 init_optab (vec_widen_smult_hi_optab, UNKNOWN);
6273 init_optab (vec_widen_smult_lo_optab, UNKNOWN);
6274 init_optab (vec_widen_ushiftl_hi_optab, UNKNOWN);
6275 init_optab (vec_widen_ushiftl_lo_optab, UNKNOWN);
6276 init_optab (vec_widen_sshiftl_hi_optab, UNKNOWN);
6277 init_optab (vec_widen_sshiftl_lo_optab, UNKNOWN);
6278 init_optab (vec_unpacks_hi_optab, UNKNOWN);
6279 init_optab (vec_unpacks_lo_optab, UNKNOWN);
6280 init_optab (vec_unpacku_hi_optab, UNKNOWN);
6281 init_optab (vec_unpacku_lo_optab, UNKNOWN);
6282 init_optab (vec_unpacks_float_hi_optab, UNKNOWN);
6283 init_optab (vec_unpacks_float_lo_optab, UNKNOWN);
6284 init_optab (vec_unpacku_float_hi_optab, UNKNOWN);
6285 init_optab (vec_unpacku_float_lo_optab, UNKNOWN);
6286 init_optab (vec_pack_trunc_optab, UNKNOWN);
6287 init_optab (vec_pack_usat_optab, UNKNOWN);
6288 init_optab (vec_pack_ssat_optab, UNKNOWN);
6289 init_optab (vec_pack_ufix_trunc_optab, UNKNOWN);
6290 init_optab (vec_pack_sfix_trunc_optab, UNKNOWN);
6292 init_optab (powi_optab, UNKNOWN);
6295 init_convert_optab (sext_optab, SIGN_EXTEND);
6296 init_convert_optab (zext_optab, ZERO_EXTEND);
6297 init_convert_optab (trunc_optab, TRUNCATE);
6298 init_convert_optab (sfix_optab, FIX);
6299 init_convert_optab (ufix_optab, UNSIGNED_FIX);
6300 init_convert_optab (sfixtrunc_optab, UNKNOWN);
6301 init_convert_optab (ufixtrunc_optab, UNKNOWN);
6302 init_convert_optab (sfloat_optab, FLOAT);
6303 init_convert_optab (ufloat_optab, UNSIGNED_FLOAT);
6304 init_convert_optab (lrint_optab, UNKNOWN);
6305 init_convert_optab (lround_optab, UNKNOWN);
6306 init_convert_optab (lfloor_optab, UNKNOWN);
6307 init_convert_optab (lceil_optab, UNKNOWN);
6309 init_convert_optab (fract_optab, FRACT_CONVERT);
6310 init_convert_optab (fractuns_optab, UNSIGNED_FRACT_CONVERT);
6311 init_convert_optab (satfract_optab, SAT_FRACT);
6312 init_convert_optab (satfractuns_optab, UNSIGNED_SAT_FRACT);
6314 /* Fill in the optabs with the insns we support. */
6317 /* Initialize the optabs with the names of the library functions. */
6318 add_optab->libcall_basename = "add";
6319 add_optab->libcall_suffix = '3';
6320 add_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6321 addv_optab->libcall_basename = "add";
6322 addv_optab->libcall_suffix = '3';
6323 addv_optab->libcall_gen = gen_intv_fp_libfunc;
6324 ssadd_optab->libcall_basename = "ssadd";
6325 ssadd_optab->libcall_suffix = '3';
6326 ssadd_optab->libcall_gen = gen_signed_fixed_libfunc;
6327 usadd_optab->libcall_basename = "usadd";
6328 usadd_optab->libcall_suffix = '3';
6329 usadd_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6330 sub_optab->libcall_basename = "sub";
6331 sub_optab->libcall_suffix = '3';
6332 sub_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6333 subv_optab->libcall_basename = "sub";
6334 subv_optab->libcall_suffix = '3';
6335 subv_optab->libcall_gen = gen_intv_fp_libfunc;
6336 sssub_optab->libcall_basename = "sssub";
6337 sssub_optab->libcall_suffix = '3';
6338 sssub_optab->libcall_gen = gen_signed_fixed_libfunc;
6339 ussub_optab->libcall_basename = "ussub";
6340 ussub_optab->libcall_suffix = '3';
6341 ussub_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6342 smul_optab->libcall_basename = "mul";
6343 smul_optab->libcall_suffix = '3';
6344 smul_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6345 smulv_optab->libcall_basename = "mul";
6346 smulv_optab->libcall_suffix = '3';
6347 smulv_optab->libcall_gen = gen_intv_fp_libfunc;
6348 ssmul_optab->libcall_basename = "ssmul";
6349 ssmul_optab->libcall_suffix = '3';
6350 ssmul_optab->libcall_gen = gen_signed_fixed_libfunc;
6351 usmul_optab->libcall_basename = "usmul";
6352 usmul_optab->libcall_suffix = '3';
6353 usmul_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6354 sdiv_optab->libcall_basename = "div";
6355 sdiv_optab->libcall_suffix = '3';
6356 sdiv_optab->libcall_gen = gen_int_fp_signed_fixed_libfunc;
6357 sdivv_optab->libcall_basename = "divv";
6358 sdivv_optab->libcall_suffix = '3';
6359 sdivv_optab->libcall_gen = gen_int_libfunc;
6360 ssdiv_optab->libcall_basename = "ssdiv";
6361 ssdiv_optab->libcall_suffix = '3';
6362 ssdiv_optab->libcall_gen = gen_signed_fixed_libfunc;
6363 udiv_optab->libcall_basename = "udiv";
6364 udiv_optab->libcall_suffix = '3';
6365 udiv_optab->libcall_gen = gen_int_unsigned_fixed_libfunc;
6366 usdiv_optab->libcall_basename = "usdiv";
6367 usdiv_optab->libcall_suffix = '3';
6368 usdiv_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6369 sdivmod_optab->libcall_basename = "divmod";
6370 sdivmod_optab->libcall_suffix = '4';
6371 sdivmod_optab->libcall_gen = gen_int_libfunc;
6372 udivmod_optab->libcall_basename = "udivmod";
6373 udivmod_optab->libcall_suffix = '4';
6374 udivmod_optab->libcall_gen = gen_int_libfunc;
6375 smod_optab->libcall_basename = "mod";
6376 smod_optab->libcall_suffix = '3';
6377 smod_optab->libcall_gen = gen_int_libfunc;
6378 umod_optab->libcall_basename = "umod";
6379 umod_optab->libcall_suffix = '3';
6380 umod_optab->libcall_gen = gen_int_libfunc;
6381 ftrunc_optab->libcall_basename = "ftrunc";
6382 ftrunc_optab->libcall_suffix = '2';
6383 ftrunc_optab->libcall_gen = gen_fp_libfunc;
6384 and_optab->libcall_basename = "and";
6385 and_optab->libcall_suffix = '3';
6386 and_optab->libcall_gen = gen_int_libfunc;
6387 ior_optab->libcall_basename = "ior";
6388 ior_optab->libcall_suffix = '3';
6389 ior_optab->libcall_gen = gen_int_libfunc;
6390 xor_optab->libcall_basename = "xor";
6391 xor_optab->libcall_suffix = '3';
6392 xor_optab->libcall_gen = gen_int_libfunc;
6393 ashl_optab->libcall_basename = "ashl";
6394 ashl_optab->libcall_suffix = '3';
6395 ashl_optab->libcall_gen = gen_int_fixed_libfunc;
6396 ssashl_optab->libcall_basename = "ssashl";
6397 ssashl_optab->libcall_suffix = '3';
6398 ssashl_optab->libcall_gen = gen_signed_fixed_libfunc;
6399 usashl_optab->libcall_basename = "usashl";
6400 usashl_optab->libcall_suffix = '3';
6401 usashl_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6402 ashr_optab->libcall_basename = "ashr";
6403 ashr_optab->libcall_suffix = '3';
6404 ashr_optab->libcall_gen = gen_int_signed_fixed_libfunc;
6405 lshr_optab->libcall_basename = "lshr";
6406 lshr_optab->libcall_suffix = '3';
6407 lshr_optab->libcall_gen = gen_int_unsigned_fixed_libfunc;
6408 smin_optab->libcall_basename = "min";
6409 smin_optab->libcall_suffix = '3';
6410 smin_optab->libcall_gen = gen_int_fp_libfunc;
6411 smax_optab->libcall_basename = "max";
6412 smax_optab->libcall_suffix = '3';
6413 smax_optab->libcall_gen = gen_int_fp_libfunc;
6414 umin_optab->libcall_basename = "umin";
6415 umin_optab->libcall_suffix = '3';
6416 umin_optab->libcall_gen = gen_int_libfunc;
6417 umax_optab->libcall_basename = "umax";
6418 umax_optab->libcall_suffix = '3';
6419 umax_optab->libcall_gen = gen_int_libfunc;
6420 neg_optab->libcall_basename = "neg";
6421 neg_optab->libcall_suffix = '2';
6422 neg_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6423 ssneg_optab->libcall_basename = "ssneg";
6424 ssneg_optab->libcall_suffix = '2';
6425 ssneg_optab->libcall_gen = gen_signed_fixed_libfunc;
6426 usneg_optab->libcall_basename = "usneg";
6427 usneg_optab->libcall_suffix = '2';
6428 usneg_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6429 negv_optab->libcall_basename = "neg";
6430 negv_optab->libcall_suffix = '2';
6431 negv_optab->libcall_gen = gen_intv_fp_libfunc;
6432 one_cmpl_optab->libcall_basename = "one_cmpl";
6433 one_cmpl_optab->libcall_suffix = '2';
6434 one_cmpl_optab->libcall_gen = gen_int_libfunc;
6435 ffs_optab->libcall_basename = "ffs";
6436 ffs_optab->libcall_suffix = '2';
6437 ffs_optab->libcall_gen = gen_int_libfunc;
6438 clz_optab->libcall_basename = "clz";
6439 clz_optab->libcall_suffix = '2';
6440 clz_optab->libcall_gen = gen_int_libfunc;
6441 ctz_optab->libcall_basename = "ctz";
6442 ctz_optab->libcall_suffix = '2';
6443 ctz_optab->libcall_gen = gen_int_libfunc;
6444 clrsb_optab->libcall_basename = "clrsb";
6445 clrsb_optab->libcall_suffix = '2';
6446 clrsb_optab->libcall_gen = gen_int_libfunc;
6447 popcount_optab->libcall_basename = "popcount";
6448 popcount_optab->libcall_suffix = '2';
6449 popcount_optab->libcall_gen = gen_int_libfunc;
6450 parity_optab->libcall_basename = "parity";
6451 parity_optab->libcall_suffix = '2';
6452 parity_optab->libcall_gen = gen_int_libfunc;
6454 /* Comparison libcalls for integers MUST come in pairs,
6456 cmp_optab->libcall_basename = "cmp";
6457 cmp_optab->libcall_suffix = '2';
6458 cmp_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6459 ucmp_optab->libcall_basename = "ucmp";
6460 ucmp_optab->libcall_suffix = '2';
6461 ucmp_optab->libcall_gen = gen_int_libfunc;
6463 /* EQ etc are floating point only. */
6464 eq_optab->libcall_basename = "eq";
6465 eq_optab->libcall_suffix = '2';
6466 eq_optab->libcall_gen = gen_fp_libfunc;
6467 ne_optab->libcall_basename = "ne";
6468 ne_optab->libcall_suffix = '2';
6469 ne_optab->libcall_gen = gen_fp_libfunc;
6470 gt_optab->libcall_basename = "gt";
6471 gt_optab->libcall_suffix = '2';
6472 gt_optab->libcall_gen = gen_fp_libfunc;
6473 ge_optab->libcall_basename = "ge";
6474 ge_optab->libcall_suffix = '2';
6475 ge_optab->libcall_gen = gen_fp_libfunc;
6476 lt_optab->libcall_basename = "lt";
6477 lt_optab->libcall_suffix = '2';
6478 lt_optab->libcall_gen = gen_fp_libfunc;
6479 le_optab->libcall_basename = "le";
6480 le_optab->libcall_suffix = '2';
6481 le_optab->libcall_gen = gen_fp_libfunc;
6482 unord_optab->libcall_basename = "unord";
6483 unord_optab->libcall_suffix = '2';
6484 unord_optab->libcall_gen = gen_fp_libfunc;
6486 powi_optab->libcall_basename = "powi";
6487 powi_optab->libcall_suffix = '2';
6488 powi_optab->libcall_gen = gen_fp_libfunc;
6491 sfloat_optab->libcall_basename = "float";
6492 sfloat_optab->libcall_gen = gen_int_to_fp_conv_libfunc;
6493 ufloat_optab->libcall_gen = gen_ufloat_conv_libfunc;
6494 sfix_optab->libcall_basename = "fix";
6495 sfix_optab->libcall_gen = gen_fp_to_int_conv_libfunc;
6496 ufix_optab->libcall_basename = "fixuns";
6497 ufix_optab->libcall_gen = gen_fp_to_int_conv_libfunc;
6498 lrint_optab->libcall_basename = "lrint";
6499 lrint_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6500 lround_optab->libcall_basename = "lround";
6501 lround_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6502 lfloor_optab->libcall_basename = "lfloor";
6503 lfloor_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6504 lceil_optab->libcall_basename = "lceil";
6505 lceil_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6507 /* trunc_optab is also used for FLOAT_EXTEND. */
6508 sext_optab->libcall_basename = "extend";
6509 sext_optab->libcall_gen = gen_extend_conv_libfunc;
6510 trunc_optab->libcall_basename = "trunc";
6511 trunc_optab->libcall_gen = gen_trunc_conv_libfunc;
6513 /* Conversions for fixed-point modes and other modes. */
6514 fract_optab->libcall_basename = "fract";
6515 fract_optab->libcall_gen = gen_fract_conv_libfunc;
6516 satfract_optab->libcall_basename = "satfract";
6517 satfract_optab->libcall_gen = gen_satfract_conv_libfunc;
6518 fractuns_optab->libcall_basename = "fractuns";
6519 fractuns_optab->libcall_gen = gen_fractuns_conv_libfunc;
6520 satfractuns_optab->libcall_basename = "satfractuns";
6521 satfractuns_optab->libcall_gen = gen_satfractuns_conv_libfunc;
6523 /* The ffs function operates on `int'. Fall back on it if we do not
6524 have a libgcc2 function for that width. */
6525 if (INT_TYPE_SIZE < BITS_PER_WORD)
6526 set_optab_libfunc (ffs_optab, mode_for_size (INT_TYPE_SIZE, MODE_INT, 0),
6529 /* Explicitly initialize the bswap libfuncs since we need them to be
6530 valid for things other than word_mode. */
6531 if (targetm.libfunc_gnu_prefix)
6533 set_optab_libfunc (bswap_optab, SImode, "__gnu_bswapsi2");
6534 set_optab_libfunc (bswap_optab, DImode, "__gnu_bswapdi2");
6538 set_optab_libfunc (bswap_optab, SImode, "__bswapsi2");
6539 set_optab_libfunc (bswap_optab, DImode, "__bswapdi2");
6542 /* Use cabs for double complex abs, since systems generally have cabs.
6543 Don't define any libcall for float complex, so that cabs will be used. */
6544 if (complex_double_type_node)
6545 set_optab_libfunc (abs_optab, TYPE_MODE (complex_double_type_node), "cabs");
6547 abort_libfunc = init_one_libfunc ("abort");
6548 memcpy_libfunc = init_one_libfunc ("memcpy");
6549 memmove_libfunc = init_one_libfunc ("memmove");
6550 memcmp_libfunc = init_one_libfunc ("memcmp");
6551 memset_libfunc = init_one_libfunc ("memset");
6552 setbits_libfunc = init_one_libfunc ("__setbits");
6554 #ifndef DONT_USE_BUILTIN_SETJMP
6555 setjmp_libfunc = init_one_libfunc ("__builtin_setjmp");
6556 longjmp_libfunc = init_one_libfunc ("__builtin_longjmp");
6558 setjmp_libfunc = init_one_libfunc ("setjmp");
6559 longjmp_libfunc = init_one_libfunc ("longjmp");
6561 unwind_sjlj_register_libfunc = init_one_libfunc ("_Unwind_SjLj_Register");
6562 unwind_sjlj_unregister_libfunc
6563 = init_one_libfunc ("_Unwind_SjLj_Unregister");
6565 /* For function entry/exit instrumentation. */
6566 profile_function_entry_libfunc
6567 = init_one_libfunc ("__cyg_profile_func_enter");
6568 profile_function_exit_libfunc
6569 = init_one_libfunc ("__cyg_profile_func_exit");
6571 gcov_flush_libfunc = init_one_libfunc ("__gcov_flush");
6573 /* Allow the target to add more libcalls or rename some, etc. */
6574 targetm.init_libfuncs ();
6577 /* A helper function for init_sync_libfuncs. Using the basename BASE,
6578 install libfuncs into TAB for BASE_N for 1 <= N <= MAX. */
6581 init_sync_libfuncs_1 (optab tab, const char *base, int max)
6583 enum machine_mode mode;
6585 size_t len = strlen (base);
6588 gcc_assert (max <= 8);
6589 gcc_assert (len + 3 < sizeof (buf));
6591 memcpy (buf, base, len);
6594 buf[len + 2] = '\0';
6597 for (i = 1; i <= max; i *= 2)
6599 buf[len + 1] = '0' + i;
6600 set_optab_libfunc (tab, mode, buf);
6601 mode = GET_MODE_2XWIDER_MODE (mode);
6606 init_sync_libfuncs (int max)
6608 init_sync_libfuncs_1 (sync_compare_and_swap_optab,
6609 "__sync_val_compare_and_swap", max);
6610 init_sync_libfuncs_1 (sync_lock_test_and_set_optab,
6611 "__sync_lock_test_and_set", max);
6613 init_sync_libfuncs_1 (sync_old_add_optab, "__sync_fetch_and_add", max);
6614 init_sync_libfuncs_1 (sync_old_sub_optab, "__sync_fetch_and_sub", max);
6615 init_sync_libfuncs_1 (sync_old_ior_optab, "__sync_fetch_and_or", max);
6616 init_sync_libfuncs_1 (sync_old_and_optab, "__sync_fetch_and_and", max);
6617 init_sync_libfuncs_1 (sync_old_xor_optab, "__sync_fetch_and_xor", max);
6618 init_sync_libfuncs_1 (sync_old_nand_optab, "__sync_fetch_and_nand", max);
6620 init_sync_libfuncs_1 (sync_new_add_optab, "__sync_add_and_fetch", max);
6621 init_sync_libfuncs_1 (sync_new_sub_optab, "__sync_sub_and_fetch", max);
6622 init_sync_libfuncs_1 (sync_new_ior_optab, "__sync_or_and_fetch", max);
6623 init_sync_libfuncs_1 (sync_new_and_optab, "__sync_and_and_fetch", max);
6624 init_sync_libfuncs_1 (sync_new_xor_optab, "__sync_xor_and_fetch", max);
6625 init_sync_libfuncs_1 (sync_new_nand_optab, "__sync_nand_and_fetch", max);
6628 /* Print information about the current contents of the optabs on
6632 debug_optab_libfuncs (void)
6638 /* Dump the arithmetic optabs. */
6639 for (i = 0; i != (int) OTI_MAX; i++)
6640 for (j = 0; j < NUM_MACHINE_MODES; ++j)
6645 o = &optab_table[i];
6646 l = optab_libfunc (o, (enum machine_mode) j);
6649 gcc_assert (GET_CODE (l) == SYMBOL_REF);
6650 fprintf (stderr, "%s\t%s:\t%s\n",
6651 GET_RTX_NAME (o->code),
6657 /* Dump the conversion optabs. */
6658 for (i = 0; i < (int) COI_MAX; ++i)
6659 for (j = 0; j < NUM_MACHINE_MODES; ++j)
6660 for (k = 0; k < NUM_MACHINE_MODES; ++k)
6665 o = &convert_optab_table[i];
6666 l = convert_optab_libfunc (o, (enum machine_mode) j,
6667 (enum machine_mode) k);
6670 gcc_assert (GET_CODE (l) == SYMBOL_REF);
6671 fprintf (stderr, "%s\t%s\t%s:\t%s\n",
6672 GET_RTX_NAME (o->code),
6681 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
6682 CODE. Return 0 on failure. */
6685 gen_cond_trap (enum rtx_code code, rtx op1, rtx op2, rtx tcode)
6687 enum machine_mode mode = GET_MODE (op1);
6688 enum insn_code icode;
6692 if (mode == VOIDmode)
6695 icode = optab_handler (ctrap_optab, mode);
6696 if (icode == CODE_FOR_nothing)
6699 /* Some targets only accept a zero trap code. */
6700 if (!insn_operand_matches (icode, 3, tcode))
6703 do_pending_stack_adjust ();
6705 prepare_cmp_insn (op1, op2, code, NULL_RTX, false, OPTAB_DIRECT,
6710 insn = GEN_FCN (icode) (trap_rtx, XEXP (trap_rtx, 0), XEXP (trap_rtx, 1),
6713 /* If that failed, then give up. */
6721 insn = get_insns ();
6726 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
6727 or unsigned operation code. */
6729 static enum rtx_code
6730 get_rtx_code (enum tree_code tcode, bool unsignedp)
6742 code = unsignedp ? LTU : LT;
6745 code = unsignedp ? LEU : LE;
6748 code = unsignedp ? GTU : GT;
6751 code = unsignedp ? GEU : GE;
6754 case UNORDERED_EXPR:
6785 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
6786 unsigned operators. Do not generate compare instruction. */
6789 vector_compare_rtx (tree cond, bool unsignedp, enum insn_code icode)
6791 struct expand_operand ops[2];
6792 enum rtx_code rcode;
6794 rtx rtx_op0, rtx_op1;
6796 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
6797 ensures that condition is a relational operation. */
6798 gcc_assert (COMPARISON_CLASS_P (cond));
6800 rcode = get_rtx_code (TREE_CODE (cond), unsignedp);
6801 t_op0 = TREE_OPERAND (cond, 0);
6802 t_op1 = TREE_OPERAND (cond, 1);
6804 /* Expand operands. */
6805 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)),
6807 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)),
6810 create_input_operand (&ops[0], rtx_op0, GET_MODE (rtx_op0));
6811 create_input_operand (&ops[1], rtx_op1, GET_MODE (rtx_op1));
6812 if (!maybe_legitimize_operands (icode, 4, 2, ops))
6814 return gen_rtx_fmt_ee (rcode, VOIDmode, ops[0].value, ops[1].value);
6817 /* Return true if VEC_PERM_EXPR can be expanded using SIMD extensions
6818 of the CPU. SEL may be NULL, which stands for an unknown constant. */
6821 can_vec_perm_p (enum machine_mode mode, bool variable,
6822 const unsigned char *sel)
6824 enum machine_mode qimode;
6826 /* If the target doesn't implement a vector mode for the vector type,
6827 then no operations are supported. */
6828 if (!VECTOR_MODE_P (mode))
6833 if (direct_optab_handler (vec_perm_const_optab, mode) != CODE_FOR_nothing
6835 || targetm.vectorize.vec_perm_const_ok == NULL
6836 || targetm.vectorize.vec_perm_const_ok (mode, sel)))
6840 if (direct_optab_handler (vec_perm_optab, mode) != CODE_FOR_nothing)
6843 /* We allow fallback to a QI vector mode, and adjust the mask. */
6844 if (GET_MODE_INNER (mode) == QImode)
6846 qimode = mode_for_vector (QImode, GET_MODE_SIZE (mode));
6847 if (!VECTOR_MODE_P (qimode))
6850 /* ??? For completeness, we ought to check the QImode version of
6851 vec_perm_const_optab. But all users of this implicit lowering
6852 feature implement the variable vec_perm_optab. */
6853 if (direct_optab_handler (vec_perm_optab, qimode) == CODE_FOR_nothing)
6856 /* In order to support the lowering of variable permutations,
6857 we need to support shifts and adds. */
6860 if (GET_MODE_UNIT_SIZE (mode) > 2
6861 && optab_handler (ashl_optab, mode) == CODE_FOR_nothing
6862 && optab_handler (vashl_optab, mode) == CODE_FOR_nothing)
6864 if (optab_handler (add_optab, qimode) == CODE_FOR_nothing)
6871 /* Return true if we can implement with VEC_PERM_EXPR for this target.
6872 If PSEL is non-null, return the selector for the permutation. */
6875 can_vec_perm_for_code_p (enum tree_code code, enum machine_mode mode,
6878 bool need_sel_test = false;
6879 enum insn_code icode;
6881 /* If the target doesn't implement a vector mode for the vector type,
6882 then no operations are supported. */
6883 if (!VECTOR_MODE_P (mode))
6886 /* Do as many tests as possible without reqiring the selector. */
6887 icode = direct_optab_handler (vec_perm_optab, mode);
6888 if (icode == CODE_FOR_nothing && GET_MODE_INNER (mode) != QImode)
6890 enum machine_mode qimode
6891 = mode_for_vector (QImode, GET_MODE_SIZE (mode));
6892 if (VECTOR_MODE_P (qimode))
6893 icode = direct_optab_handler (vec_perm_optab, qimode);
6895 if (icode == CODE_FOR_nothing)
6897 icode = direct_optab_handler (vec_perm_const_optab, mode);
6898 if (icode != CODE_FOR_nothing
6899 && targetm.vectorize.vec_perm_const_ok != NULL)
6900 need_sel_test = true;
6902 if (icode == CODE_FOR_nothing)
6905 /* If the selector is required, or if we need to test it, build it. */
6906 if (psel || need_sel_test)
6908 int i, nelt = GET_MODE_NUNITS (mode), alt = 0;
6909 unsigned char *data = XALLOCAVEC (unsigned char, nelt);
6913 case VEC_EXTRACT_ODD_EXPR:
6916 case VEC_EXTRACT_EVEN_EXPR:
6917 for (i = 0; i < nelt; ++i)
6918 data[i] = i * 2 + alt;
6926 && !targetm.vectorize.vec_perm_const_ok (mode, data))
6931 rtvec vec = rtvec_alloc (nelt);
6932 enum machine_mode imode = mode;
6934 for (i = 0; i < nelt; ++i)
6935 RTVEC_ELT (vec, i) = GEN_INT (data[i]);
6937 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
6939 imode = int_mode_for_mode (GET_MODE_INNER (mode));
6940 imode = mode_for_vector (imode, nelt);
6941 gcc_assert (GET_MODE_CLASS (imode) == MODE_VECTOR_INT);
6944 *psel = gen_rtx_CONST_VECTOR (imode, vec);
6951 /* A subroutine of expand_vec_perm for expanding one vec_perm insn. */
6954 expand_vec_perm_1 (enum insn_code icode, rtx target,
6955 rtx v0, rtx v1, rtx sel)
6957 enum machine_mode tmode = GET_MODE (target);
6958 enum machine_mode smode = GET_MODE (sel);
6959 struct expand_operand ops[4];
6961 create_output_operand (&ops[0], target, tmode);
6962 create_input_operand (&ops[3], sel, smode);
6964 /* Make an effort to preserve v0 == v1. The target expander is able to
6965 rely on this to determine if we're permuting a single input operand. */
6966 if (rtx_equal_p (v0, v1))
6968 if (!insn_operand_matches (icode, 1, v0))
6969 v0 = force_reg (tmode, v0);
6970 gcc_checking_assert (insn_operand_matches (icode, 1, v0));
6971 gcc_checking_assert (insn_operand_matches (icode, 2, v0));
6973 create_fixed_operand (&ops[1], v0);
6974 create_fixed_operand (&ops[2], v0);
6978 create_input_operand (&ops[1], v0, tmode);
6979 create_input_operand (&ops[2], v1, tmode);
6982 if (maybe_expand_insn (icode, 4, ops))
6983 return ops[0].value;
6987 /* Generate instructions for vec_perm optab given its mode
6988 and three operands. */
6991 expand_vec_perm (enum machine_mode mode, rtx v0, rtx v1, rtx sel, rtx target)
6993 enum insn_code icode;
6994 enum machine_mode qimode;
6995 unsigned int i, w, e, u;
6996 rtx tmp, sel_qi = NULL;
6999 if (!target || GET_MODE (target) != mode)
7000 target = gen_reg_rtx (mode);
7002 w = GET_MODE_SIZE (mode);
7003 e = GET_MODE_NUNITS (mode);
7004 u = GET_MODE_UNIT_SIZE (mode);
7006 /* Set QIMODE to a different vector mode with byte elements.
7007 If no such mode, or if MODE already has byte elements, use VOIDmode. */
7009 if (GET_MODE_INNER (mode) != QImode)
7011 qimode = mode_for_vector (QImode, w);
7012 if (!VECTOR_MODE_P (qimode))
7016 /* If the input is a constant, expand it specially. */
7017 gcc_assert (GET_MODE_CLASS (GET_MODE (sel)) == MODE_VECTOR_INT);
7018 if (GET_CODE (sel) == CONST_VECTOR)
7020 icode = direct_optab_handler (vec_perm_const_optab, mode);
7021 if (icode != CODE_FOR_nothing)
7023 tmp = expand_vec_perm_1 (icode, target, v0, v1, sel);
7028 /* Fall back to a constant byte-based permutation. */
7029 if (qimode != VOIDmode)
7031 vec = rtvec_alloc (w);
7032 for (i = 0; i < e; ++i)
7034 unsigned int j, this_e;
7036 this_e = INTVAL (CONST_VECTOR_ELT (sel, i));
7037 this_e &= 2 * e - 1;
7040 for (j = 0; j < u; ++j)
7041 RTVEC_ELT (vec, i * u + j) = GEN_INT (this_e + j);
7043 sel_qi = gen_rtx_CONST_VECTOR (qimode, vec);
7045 icode = direct_optab_handler (vec_perm_const_optab, qimode);
7046 if (icode != CODE_FOR_nothing)
7048 tmp = expand_vec_perm_1 (icode, gen_lowpart (qimode, target),
7049 gen_lowpart (qimode, v0),
7050 gen_lowpart (qimode, v1), sel_qi);
7052 return gen_lowpart (mode, tmp);
7057 /* Otherwise expand as a fully variable permuation. */
7058 icode = direct_optab_handler (vec_perm_optab, mode);
7059 if (icode != CODE_FOR_nothing)
7061 tmp = expand_vec_perm_1 (icode, target, v0, v1, sel);
7066 /* As a special case to aid several targets, lower the element-based
7067 permutation to a byte-based permutation and try again. */
7068 if (qimode == VOIDmode)
7070 icode = direct_optab_handler (vec_perm_optab, qimode);
7071 if (icode == CODE_FOR_nothing)
7076 /* Multiply each element by its byte size. */
7077 enum machine_mode selmode = GET_MODE (sel);
7079 sel = expand_simple_binop (selmode, PLUS, sel, sel,
7080 sel, 0, OPTAB_DIRECT);
7082 sel = expand_simple_binop (selmode, ASHIFT, sel,
7083 GEN_INT (exact_log2 (u)),
7084 sel, 0, OPTAB_DIRECT);
7085 gcc_assert (sel != NULL);
7087 /* Broadcast the low byte each element into each of its bytes. */
7088 vec = rtvec_alloc (w);
7089 for (i = 0; i < w; ++i)
7091 int this_e = i / u * u;
7092 if (BYTES_BIG_ENDIAN)
7094 RTVEC_ELT (vec, i) = GEN_INT (this_e);
7096 tmp = gen_rtx_CONST_VECTOR (qimode, vec);
7097 sel = gen_lowpart (qimode, sel);
7098 sel = expand_vec_perm (qimode, sel, sel, tmp, NULL);
7099 gcc_assert (sel != NULL);
7101 /* Add the byte offset to each byte element. */
7102 /* Note that the definition of the indicies here is memory ordering,
7103 so there should be no difference between big and little endian. */
7104 vec = rtvec_alloc (w);
7105 for (i = 0; i < w; ++i)
7106 RTVEC_ELT (vec, i) = GEN_INT (i % u);
7107 tmp = gen_rtx_CONST_VECTOR (qimode, vec);
7108 sel_qi = expand_simple_binop (qimode, PLUS, sel, tmp,
7109 sel, 0, OPTAB_DIRECT);
7110 gcc_assert (sel_qi != NULL);
7113 tmp = expand_vec_perm_1 (icode, gen_lowpart (qimode, target),
7114 gen_lowpart (qimode, v0),
7115 gen_lowpart (qimode, v1), sel_qi);
7117 tmp = gen_lowpart (mode, tmp);
7121 /* Return insn code for a conditional operator with a comparison in
7122 mode CMODE, unsigned if UNS is true, resulting in a value of mode VMODE. */
7124 static inline enum insn_code
7125 get_vcond_icode (enum machine_mode vmode, enum machine_mode cmode, bool uns)
7127 enum insn_code icode = CODE_FOR_nothing;
7129 icode = convert_optab_handler (vcondu_optab, vmode, cmode);
7131 icode = convert_optab_handler (vcond_optab, vmode, cmode);
7135 /* Return TRUE iff, appropriate vector insns are available
7136 for vector cond expr with vector type VALUE_TYPE and a comparison
7137 with operand vector types in CMP_OP_TYPE. */
7140 expand_vec_cond_expr_p (tree value_type, tree cmp_op_type)
7142 enum machine_mode value_mode = TYPE_MODE (value_type);
7143 enum machine_mode cmp_op_mode = TYPE_MODE (cmp_op_type);
7144 if (GET_MODE_SIZE (value_mode) != GET_MODE_SIZE (cmp_op_mode)
7145 || GET_MODE_NUNITS (value_mode) != GET_MODE_NUNITS (cmp_op_mode)
7146 || get_vcond_icode (TYPE_MODE (value_type), TYPE_MODE (cmp_op_type),
7147 TYPE_UNSIGNED (cmp_op_type)) == CODE_FOR_nothing)
7152 /* Generate insns for a VEC_COND_EXPR, given its TYPE and its
7156 expand_vec_cond_expr (tree vec_cond_type, tree op0, tree op1, tree op2,
7159 struct expand_operand ops[6];
7160 enum insn_code icode;
7161 rtx comparison, rtx_op1, rtx_op2;
7162 enum machine_mode mode = TYPE_MODE (vec_cond_type);
7163 enum machine_mode cmp_op_mode;
7166 gcc_assert (COMPARISON_CLASS_P (op0));
7168 unsignedp = TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (op0, 0)));
7169 cmp_op_mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (op0, 0)));
7171 gcc_assert (GET_MODE_SIZE (mode) == GET_MODE_SIZE (cmp_op_mode)
7172 && GET_MODE_NUNITS (mode) == GET_MODE_NUNITS (cmp_op_mode));
7174 icode = get_vcond_icode (mode, cmp_op_mode, unsignedp);
7175 if (icode == CODE_FOR_nothing)
7178 comparison = vector_compare_rtx (op0, unsignedp, icode);
7179 rtx_op1 = expand_normal (op1);
7180 rtx_op2 = expand_normal (op2);
7182 create_output_operand (&ops[0], target, mode);
7183 create_input_operand (&ops[1], rtx_op1, mode);
7184 create_input_operand (&ops[2], rtx_op2, mode);
7185 create_fixed_operand (&ops[3], comparison);
7186 create_fixed_operand (&ops[4], XEXP (comparison, 0));
7187 create_fixed_operand (&ops[5], XEXP (comparison, 1));
7188 expand_insn (icode, 6, ops);
7189 return ops[0].value;
7193 /* Return true if there is a compare_and_swap pattern. */
7196 can_compare_and_swap_p (enum machine_mode mode, bool allow_libcall)
7198 enum insn_code icode;
7200 /* Check for __atomic_compare_and_swap. */
7201 icode = direct_optab_handler (atomic_compare_and_swap_optab, mode);
7202 if (icode != CODE_FOR_nothing)
7205 /* Check for __sync_compare_and_swap. */
7206 icode = optab_handler (sync_compare_and_swap_optab, mode);
7207 if (icode != CODE_FOR_nothing)
7209 if (allow_libcall && optab_libfunc (sync_compare_and_swap_optab, mode))
7212 /* No inline compare and swap. */
7216 /* Return true if an atomic exchange can be performed. */
7219 can_atomic_exchange_p (enum machine_mode mode, bool allow_libcall)
7221 enum insn_code icode;
7223 /* Check for __atomic_exchange. */
7224 icode = direct_optab_handler (atomic_exchange_optab, mode);
7225 if (icode != CODE_FOR_nothing)
7228 /* Don't check __sync_test_and_set, as on some platforms that
7229 has reduced functionality. Targets that really do support
7230 a proper exchange should simply be updated to the __atomics. */
7232 return can_compare_and_swap_p (mode, allow_libcall);
7236 /* Helper function to find the MODE_CC set in a sync_compare_and_swap
7240 find_cc_set (rtx x, const_rtx pat, void *data)
7242 if (REG_P (x) && GET_MODE_CLASS (GET_MODE (x)) == MODE_CC
7243 && GET_CODE (pat) == SET)
7245 rtx *p_cc_reg = (rtx *) data;
7246 gcc_assert (!*p_cc_reg);
7251 /* This is a helper function for the other atomic operations. This function
7252 emits a loop that contains SEQ that iterates until a compare-and-swap
7253 operation at the end succeeds. MEM is the memory to be modified. SEQ is
7254 a set of instructions that takes a value from OLD_REG as an input and
7255 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
7256 set to the current contents of MEM. After SEQ, a compare-and-swap will
7257 attempt to update MEM with NEW_REG. The function returns true when the
7258 loop was generated successfully. */
7261 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
7263 enum machine_mode mode = GET_MODE (mem);
7264 rtx label, cmp_reg, success, oldval;
7266 /* The loop we want to generate looks like
7272 (success, cmp_reg) = compare-and-swap(mem, old_reg, new_reg)
7276 Note that we only do the plain load from memory once. Subsequent
7277 iterations use the value loaded by the compare-and-swap pattern. */
7279 label = gen_label_rtx ();
7280 cmp_reg = gen_reg_rtx (mode);
7282 emit_move_insn (cmp_reg, mem);
7284 emit_move_insn (old_reg, cmp_reg);
7290 if (!expand_atomic_compare_and_swap (&success, &oldval, mem, old_reg,
7291 new_reg, false, MEMMODEL_SEQ_CST,
7295 if (oldval != cmp_reg)
7296 emit_move_insn (cmp_reg, oldval);
7298 /* ??? Mark this jump predicted not taken? */
7299 emit_cmp_and_jump_insns (success, const0_rtx, EQ, const0_rtx,
7300 GET_MODE (success), 1, label);
7305 /* This function tries to emit an atomic_exchange intruction. VAL is written
7306 to *MEM using memory model MODEL. The previous contents of *MEM are returned,
7307 using TARGET if possible. */
7310 maybe_emit_atomic_exchange (rtx target, rtx mem, rtx val, enum memmodel model)
7312 enum machine_mode mode = GET_MODE (mem);
7313 enum insn_code icode;
7315 /* If the target supports the exchange directly, great. */
7316 icode = direct_optab_handler (atomic_exchange_optab, mode);
7317 if (icode != CODE_FOR_nothing)
7319 struct expand_operand ops[4];
7321 create_output_operand (&ops[0], target, mode);
7322 create_fixed_operand (&ops[1], mem);
7323 /* VAL may have been promoted to a wider mode. Shrink it if so. */
7324 create_convert_operand_to (&ops[2], val, mode, true);
7325 create_integer_operand (&ops[3], model);
7326 if (maybe_expand_insn (icode, 4, ops))
7327 return ops[0].value;
7333 /* This function tries to implement an atomic exchange operation using
7334 __sync_lock_test_and_set. VAL is written to *MEM using memory model MODEL.
7335 The previous contents of *MEM are returned, using TARGET if possible.
7336 Since this instructionn is an acquire barrier only, stronger memory
7337 models may require additional barriers to be emitted. */
7340 maybe_emit_sync_lock_test_and_set (rtx target, rtx mem, rtx val,
7341 enum memmodel model)
7343 enum machine_mode mode = GET_MODE (mem);
7344 enum insn_code icode;
7345 rtx last_insn = get_last_insn ();
7347 icode = optab_handler (sync_lock_test_and_set_optab, mode);
7349 /* Legacy sync_lock_test_and_set is an acquire barrier. If the pattern
7350 exists, and the memory model is stronger than acquire, add a release
7351 barrier before the instruction. */
7353 if (model == MEMMODEL_SEQ_CST
7354 || model == MEMMODEL_RELEASE
7355 || model == MEMMODEL_ACQ_REL)
7356 expand_mem_thread_fence (model);
7358 if (icode != CODE_FOR_nothing)
7360 struct expand_operand ops[3];
7361 create_output_operand (&ops[0], target, mode);
7362 create_fixed_operand (&ops[1], mem);
7363 /* VAL may have been promoted to a wider mode. Shrink it if so. */
7364 create_convert_operand_to (&ops[2], val, mode, true);
7365 if (maybe_expand_insn (icode, 3, ops))
7366 return ops[0].value;
7369 /* If an external test-and-set libcall is provided, use that instead of
7370 any external compare-and-swap that we might get from the compare-and-
7371 swap-loop expansion later. */
7372 if (!can_compare_and_swap_p (mode, false))
7374 rtx libfunc = optab_libfunc (sync_lock_test_and_set_optab, mode);
7375 if (libfunc != NULL)
7379 addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
7380 return emit_library_call_value (libfunc, NULL_RTX, LCT_NORMAL,
7381 mode, 2, addr, ptr_mode,
7386 /* If the test_and_set can't be emitted, eliminate any barrier that might
7387 have been emitted. */
7388 delete_insns_since (last_insn);
7392 /* This function tries to implement an atomic exchange operation using a
7393 compare_and_swap loop. VAL is written to *MEM. The previous contents of
7394 *MEM are returned, using TARGET if possible. No memory model is required
7395 since a compare_and_swap loop is seq-cst. */
7398 maybe_emit_compare_and_swap_exchange_loop (rtx target, rtx mem, rtx val)
7400 enum machine_mode mode = GET_MODE (mem);
7402 if (can_compare_and_swap_p (mode, true))
7404 if (!target || !register_operand (target, mode))
7405 target = gen_reg_rtx (mode);
7406 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
7407 val = convert_modes (mode, GET_MODE (val), val, 1);
7408 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
7415 #ifndef HAVE_atomic_test_and_set
7416 #define HAVE_atomic_test_and_set 0
7417 #define gen_atomic_test_and_set(x,y,z) (gcc_unreachable (), NULL_RTX)
7420 /* This function expands the legacy _sync_lock test_and_set operation which is
7421 generally an atomic exchange. Some limited targets only allow the
7422 constant 1 to be stored. This is an ACQUIRE operation.
7424 TARGET is an optional place to stick the return value.
7425 MEM is where VAL is stored. */
7428 expand_sync_lock_test_and_set (rtx target, rtx mem, rtx val)
7432 /* Try an atomic_exchange first. */
7433 ret = maybe_emit_atomic_exchange (target, mem, val, MEMMODEL_ACQUIRE);
7436 ret = maybe_emit_sync_lock_test_and_set (target, mem, val,
7439 ret = maybe_emit_compare_and_swap_exchange_loop (target, mem, val);
7441 /* If there are no other options, try atomic_test_and_set if the value
7442 being stored is 1. */
7443 if (!ret && val == const1_rtx && HAVE_atomic_test_and_set)
7445 ret = gen_atomic_test_and_set (target, mem, GEN_INT (MEMMODEL_ACQUIRE));
7452 /* This function expands the atomic test_and_set operation:
7453 atomically store a boolean TRUE into MEM and return the previous value.
7455 MEMMODEL is the memory model variant to use.
7456 TARGET is an optional place to stick the return value. */
7459 expand_atomic_test_and_set (rtx target, rtx mem, enum memmodel model)
7461 enum machine_mode mode = GET_MODE (mem);
7464 if (target == NULL_RTX)
7465 target = gen_reg_rtx (mode);
7467 if (HAVE_atomic_test_and_set)
7469 ret = gen_atomic_test_and_set (target, mem, GEN_INT (MEMMODEL_ACQUIRE));
7474 /* If there is no test and set, try exchange, then a compare_and_swap loop,
7475 then __sync_test_and_set. */
7476 ret = maybe_emit_atomic_exchange (target, mem, const1_rtx, model);
7479 ret = maybe_emit_compare_and_swap_exchange_loop (target, mem, const1_rtx);
7482 ret = maybe_emit_sync_lock_test_and_set (target, mem, const1_rtx, model);
7487 /* Failing all else, assume a single threaded environment and simply perform
7489 emit_move_insn (target, mem);
7490 emit_move_insn (mem, const1_rtx);
7494 /* This function expands the atomic exchange operation:
7495 atomically store VAL in MEM and return the previous value in MEM.
7497 MEMMODEL is the memory model variant to use.
7498 TARGET is an optional place to stick the return value. */
7501 expand_atomic_exchange (rtx target, rtx mem, rtx val, enum memmodel model)
7505 ret = maybe_emit_atomic_exchange (target, mem, val, model);
7507 /* Next try a compare-and-swap loop for the exchange. */
7509 ret = maybe_emit_compare_and_swap_exchange_loop (target, mem, val);
7514 /* This function expands the atomic compare exchange operation:
7516 *PTARGET_BOOL is an optional place to store the boolean success/failure.
7517 *PTARGET_OVAL is an optional place to store the old value from memory.
7518 Both target parameters may be NULL to indicate that we do not care about
7519 that return value. Both target parameters are updated on success to
7520 the actual location of the corresponding result.
7522 MEMMODEL is the memory model variant to use.
7524 The return value of the function is true for success. */
7527 expand_atomic_compare_and_swap (rtx *ptarget_bool, rtx *ptarget_oval,
7528 rtx mem, rtx expected, rtx desired,
7529 bool is_weak, enum memmodel succ_model,
7530 enum memmodel fail_model)
7532 enum machine_mode mode = GET_MODE (mem);
7533 struct expand_operand ops[8];
7534 enum insn_code icode;
7535 rtx target_oval, target_bool = NULL_RTX;
7538 /* Load expected into a register for the compare and swap. */
7539 if (MEM_P (expected))
7540 expected = copy_to_reg (expected);
7542 /* Make sure we always have some place to put the return oldval.
7543 Further, make sure that place is distinct from the input expected,
7544 just in case we need that path down below. */
7545 if (ptarget_oval == NULL
7546 || (target_oval = *ptarget_oval) == NULL
7547 || reg_overlap_mentioned_p (expected, target_oval))
7548 target_oval = gen_reg_rtx (mode);
7550 icode = direct_optab_handler (atomic_compare_and_swap_optab, mode);
7551 if (icode != CODE_FOR_nothing)
7553 enum machine_mode bool_mode = insn_data[icode].operand[0].mode;
7555 /* Make sure we always have a place for the bool operand. */
7556 if (ptarget_bool == NULL
7557 || (target_bool = *ptarget_bool) == NULL
7558 || GET_MODE (target_bool) != bool_mode)
7559 target_bool = gen_reg_rtx (bool_mode);
7561 /* Emit the compare_and_swap. */
7562 create_output_operand (&ops[0], target_bool, bool_mode);
7563 create_output_operand (&ops[1], target_oval, mode);
7564 create_fixed_operand (&ops[2], mem);
7565 create_convert_operand_to (&ops[3], expected, mode, true);
7566 create_convert_operand_to (&ops[4], desired, mode, true);
7567 create_integer_operand (&ops[5], is_weak);
7568 create_integer_operand (&ops[6], succ_model);
7569 create_integer_operand (&ops[7], fail_model);
7570 expand_insn (icode, 8, ops);
7572 /* Return success/failure. */
7573 target_bool = ops[0].value;
7574 target_oval = ops[1].value;
7578 /* Otherwise fall back to the original __sync_val_compare_and_swap
7579 which is always seq-cst. */
7580 icode = optab_handler (sync_compare_and_swap_optab, mode);
7581 if (icode != CODE_FOR_nothing)
7585 create_output_operand (&ops[0], target_oval, mode);
7586 create_fixed_operand (&ops[1], mem);
7587 create_convert_operand_to (&ops[2], expected, mode, true);
7588 create_convert_operand_to (&ops[3], desired, mode, true);
7589 if (!maybe_expand_insn (icode, 4, ops))
7592 target_oval = ops[0].value;
7594 /* If the caller isn't interested in the boolean return value,
7595 skip the computation of it. */
7596 if (ptarget_bool == NULL)
7599 /* Otherwise, work out if the compare-and-swap succeeded. */
7601 if (have_insn_for (COMPARE, CCmode))
7602 note_stores (PATTERN (get_last_insn ()), find_cc_set, &cc_reg);
7605 target_bool = emit_store_flag_force (target_bool, EQ, cc_reg,
7606 const0_rtx, VOIDmode, 0, 1);
7609 goto success_bool_from_val;
7612 /* Also check for library support for __sync_val_compare_and_swap. */
7613 libfunc = optab_libfunc (sync_compare_and_swap_optab, mode);
7614 if (libfunc != NULL)
7616 rtx addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
7617 target_oval = emit_library_call_value (libfunc, NULL_RTX, LCT_NORMAL,
7618 mode, 3, addr, ptr_mode,
7619 expected, mode, desired, mode);
7621 /* Compute the boolean return value only if requested. */
7623 goto success_bool_from_val;
7631 success_bool_from_val:
7632 target_bool = emit_store_flag_force (target_bool, EQ, target_oval,
7633 expected, VOIDmode, 1, 1);
7635 /* Make sure that the oval output winds up where the caller asked. */
7637 *ptarget_oval = target_oval;
7639 *ptarget_bool = target_bool;
7643 /* Generate asm volatile("" : : : "memory") as the memory barrier. */
7646 expand_asm_memory_barrier (void)
7650 asm_op = gen_rtx_ASM_OPERANDS (VOIDmode, empty_string, empty_string, 0,
7651 rtvec_alloc (0), rtvec_alloc (0),
7652 rtvec_alloc (0), UNKNOWN_LOCATION);
7653 MEM_VOLATILE_P (asm_op) = 1;
7655 clob = gen_rtx_SCRATCH (VOIDmode);
7656 clob = gen_rtx_MEM (BLKmode, clob);
7657 clob = gen_rtx_CLOBBER (VOIDmode, clob);
7659 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, asm_op, clob)));
7662 /* This routine will either emit the mem_thread_fence pattern or issue a
7663 sync_synchronize to generate a fence for memory model MEMMODEL. */
7665 #ifndef HAVE_mem_thread_fence
7666 # define HAVE_mem_thread_fence 0
7667 # define gen_mem_thread_fence(x) (gcc_unreachable (), NULL_RTX)
7669 #ifndef HAVE_memory_barrier
7670 # define HAVE_memory_barrier 0
7671 # define gen_memory_barrier() (gcc_unreachable (), NULL_RTX)
7675 expand_mem_thread_fence (enum memmodel model)
7677 if (HAVE_mem_thread_fence)
7678 emit_insn (gen_mem_thread_fence (GEN_INT (model)));
7679 else if (model != MEMMODEL_RELAXED)
7681 if (HAVE_memory_barrier)
7682 emit_insn (gen_memory_barrier ());
7683 else if (synchronize_libfunc != NULL_RTX)
7684 emit_library_call (synchronize_libfunc, LCT_NORMAL, VOIDmode, 0);
7686 expand_asm_memory_barrier ();
7690 /* This routine will either emit the mem_signal_fence pattern or issue a
7691 sync_synchronize to generate a fence for memory model MEMMODEL. */
7693 #ifndef HAVE_mem_signal_fence
7694 # define HAVE_mem_signal_fence 0
7695 # define gen_mem_signal_fence(x) (gcc_unreachable (), NULL_RTX)
7699 expand_mem_signal_fence (enum memmodel model)
7701 if (HAVE_mem_signal_fence)
7702 emit_insn (gen_mem_signal_fence (GEN_INT (model)));
7703 else if (model != MEMMODEL_RELAXED)
7705 /* By default targets are coherent between a thread and the signal
7706 handler running on the same thread. Thus this really becomes a
7707 compiler barrier, in that stores must not be sunk past
7708 (or raised above) a given point. */
7709 expand_asm_memory_barrier ();
7713 /* This function expands the atomic load operation:
7714 return the atomically loaded value in MEM.
7716 MEMMODEL is the memory model variant to use.
7717 TARGET is an option place to stick the return value. */
7720 expand_atomic_load (rtx target, rtx mem, enum memmodel model)
7722 enum machine_mode mode = GET_MODE (mem);
7723 enum insn_code icode;
7725 /* If the target supports the load directly, great. */
7726 icode = direct_optab_handler (atomic_load_optab, mode);
7727 if (icode != CODE_FOR_nothing)
7729 struct expand_operand ops[3];
7731 create_output_operand (&ops[0], target, mode);
7732 create_fixed_operand (&ops[1], mem);
7733 create_integer_operand (&ops[2], model);
7734 if (maybe_expand_insn (icode, 3, ops))
7735 return ops[0].value;
7738 /* If the size of the object is greater than word size on this target,
7739 then we assume that a load will not be atomic. */
7740 if (GET_MODE_PRECISION (mode) > BITS_PER_WORD)
7742 /* Issue val = compare_and_swap (mem, 0, 0).
7743 This may cause the occasional harmless store of 0 when the value is
7744 already 0, but it seems to be OK according to the standards guys. */
7745 expand_atomic_compare_and_swap (NULL, &target, mem, const0_rtx,
7746 const0_rtx, false, model, model);
7750 /* Otherwise assume loads are atomic, and emit the proper barriers. */
7751 if (!target || target == const0_rtx)
7752 target = gen_reg_rtx (mode);
7754 /* Emit the appropriate barrier before the load. */
7755 expand_mem_thread_fence (model);
7757 emit_move_insn (target, mem);
7759 /* For SEQ_CST, also emit a barrier after the load. */
7760 if (model == MEMMODEL_SEQ_CST)
7761 expand_mem_thread_fence (model);
7766 /* This function expands the atomic store operation:
7767 Atomically store VAL in MEM.
7768 MEMMODEL is the memory model variant to use.
7769 USE_RELEASE is true if __sync_lock_release can be used as a fall back.
7770 function returns const0_rtx if a pattern was emitted. */
7773 expand_atomic_store (rtx mem, rtx val, enum memmodel model, bool use_release)
7775 enum machine_mode mode = GET_MODE (mem);
7776 enum insn_code icode;
7777 struct expand_operand ops[3];
7779 /* If the target supports the store directly, great. */
7780 icode = direct_optab_handler (atomic_store_optab, mode);
7781 if (icode != CODE_FOR_nothing)
7783 create_fixed_operand (&ops[0], mem);
7784 create_input_operand (&ops[1], val, mode);
7785 create_integer_operand (&ops[2], model);
7786 if (maybe_expand_insn (icode, 3, ops))
7790 /* If using __sync_lock_release is a viable alternative, try it. */
7793 icode = direct_optab_handler (sync_lock_release_optab, mode);
7794 if (icode != CODE_FOR_nothing)
7796 create_fixed_operand (&ops[0], mem);
7797 create_input_operand (&ops[1], const0_rtx, mode);
7798 if (maybe_expand_insn (icode, 2, ops))
7800 /* lock_release is only a release barrier. */
7801 if (model == MEMMODEL_SEQ_CST)
7802 expand_mem_thread_fence (model);
7808 /* If the size of the object is greater than word size on this target,
7809 a default store will not be atomic, Try a mem_exchange and throw away
7810 the result. If that doesn't work, don't do anything. */
7811 if (GET_MODE_PRECISION(mode) > BITS_PER_WORD)
7813 rtx target = maybe_emit_atomic_exchange (NULL_RTX, mem, val, model);
7815 target = maybe_emit_compare_and_swap_exchange_loop (NULL_RTX, mem, val);
7822 /* If there is no mem_store, default to a move with barriers */
7823 if (model == MEMMODEL_SEQ_CST || model == MEMMODEL_RELEASE)
7824 expand_mem_thread_fence (model);
7826 emit_move_insn (mem, val);
7828 /* For SEQ_CST, also emit a barrier after the load. */
7829 if (model == MEMMODEL_SEQ_CST)
7830 expand_mem_thread_fence (model);
7836 /* Structure containing the pointers and values required to process the
7837 various forms of the atomic_fetch_op and atomic_op_fetch builtins. */
7839 struct atomic_op_functions
7841 direct_optab mem_fetch_before;
7842 direct_optab mem_fetch_after;
7843 direct_optab mem_no_result;
7846 direct_optab no_result;
7847 enum rtx_code reverse_code;
7851 /* Fill in structure pointed to by OP with the various optab entries for an
7852 operation of type CODE. */
7855 get_atomic_op_for_code (struct atomic_op_functions *op, enum rtx_code code)
7857 gcc_assert (op!= NULL);
7859 /* If SWITCHABLE_TARGET is defined, then subtargets can be switched
7860 in the source code during compilation, and the optab entries are not
7861 computable until runtime. Fill in the values at runtime. */
7865 op->mem_fetch_before = atomic_fetch_add_optab;
7866 op->mem_fetch_after = atomic_add_fetch_optab;
7867 op->mem_no_result = atomic_add_optab;
7868 op->fetch_before = sync_old_add_optab;
7869 op->fetch_after = sync_new_add_optab;
7870 op->no_result = sync_add_optab;
7871 op->reverse_code = MINUS;
7874 op->mem_fetch_before = atomic_fetch_sub_optab;
7875 op->mem_fetch_after = atomic_sub_fetch_optab;
7876 op->mem_no_result = atomic_sub_optab;
7877 op->fetch_before = sync_old_sub_optab;
7878 op->fetch_after = sync_new_sub_optab;
7879 op->no_result = sync_sub_optab;
7880 op->reverse_code = PLUS;
7883 op->mem_fetch_before = atomic_fetch_xor_optab;
7884 op->mem_fetch_after = atomic_xor_fetch_optab;
7885 op->mem_no_result = atomic_xor_optab;
7886 op->fetch_before = sync_old_xor_optab;
7887 op->fetch_after = sync_new_xor_optab;
7888 op->no_result = sync_xor_optab;
7889 op->reverse_code = XOR;
7892 op->mem_fetch_before = atomic_fetch_and_optab;
7893 op->mem_fetch_after = atomic_and_fetch_optab;
7894 op->mem_no_result = atomic_and_optab;
7895 op->fetch_before = sync_old_and_optab;
7896 op->fetch_after = sync_new_and_optab;
7897 op->no_result = sync_and_optab;
7898 op->reverse_code = UNKNOWN;
7901 op->mem_fetch_before = atomic_fetch_or_optab;
7902 op->mem_fetch_after = atomic_or_fetch_optab;
7903 op->mem_no_result = atomic_or_optab;
7904 op->fetch_before = sync_old_ior_optab;
7905 op->fetch_after = sync_new_ior_optab;
7906 op->no_result = sync_ior_optab;
7907 op->reverse_code = UNKNOWN;
7910 op->mem_fetch_before = atomic_fetch_nand_optab;
7911 op->mem_fetch_after = atomic_nand_fetch_optab;
7912 op->mem_no_result = atomic_nand_optab;
7913 op->fetch_before = sync_old_nand_optab;
7914 op->fetch_after = sync_new_nand_optab;
7915 op->no_result = sync_nand_optab;
7916 op->reverse_code = UNKNOWN;
7923 /* See if there is a more optimal way to implement the operation "*MEM CODE VAL"
7924 using memory order MODEL. If AFTER is true the operation needs to return
7925 the value of *MEM after the operation, otherwise the previous value.
7926 TARGET is an optional place to place the result. The result is unused if
7928 Return the result if there is a better sequence, otherwise NULL_RTX. */
7931 maybe_optimize_fetch_op (rtx target, rtx mem, rtx val, enum rtx_code code,
7932 enum memmodel model, bool after)
7934 /* If the value is prefetched, or not used, it may be possible to replace
7935 the sequence with a native exchange operation. */
7936 if (!after || target == const0_rtx)
7938 /* fetch_and (&x, 0, m) can be replaced with exchange (&x, 0, m). */
7939 if (code == AND && val == const0_rtx)
7941 if (target == const0_rtx)
7942 target = gen_reg_rtx (GET_MODE (mem));
7943 return maybe_emit_atomic_exchange (target, mem, val, model);
7946 /* fetch_or (&x, -1, m) can be replaced with exchange (&x, -1, m). */
7947 if (code == IOR && val == constm1_rtx)
7949 if (target == const0_rtx)
7950 target = gen_reg_rtx (GET_MODE (mem));
7951 return maybe_emit_atomic_exchange (target, mem, val, model);
7958 /* Try to emit an instruction for a specific operation varaition.
7959 OPTAB contains the OP functions.
7960 TARGET is an optional place to return the result. const0_rtx means unused.
7961 MEM is the memory location to operate on.
7962 VAL is the value to use in the operation.
7963 USE_MEMMODEL is TRUE if the variation with a memory model should be tried.
7964 MODEL is the memory model, if used.
7965 AFTER is true if the returned result is the value after the operation. */
7968 maybe_emit_op (const struct atomic_op_functions *optab, rtx target, rtx mem,
7969 rtx val, bool use_memmodel, enum memmodel model, bool after)
7971 enum machine_mode mode = GET_MODE (mem);
7972 struct expand_operand ops[4];
7973 enum insn_code icode;
7977 /* Check to see if there is a result returned. */
7978 if (target == const0_rtx)
7982 icode = direct_optab_handler (optab->mem_no_result, mode);
7983 create_integer_operand (&ops[2], model);
7988 icode = direct_optab_handler (optab->no_result, mode);
7992 /* Otherwise, we need to generate a result. */
7997 icode = direct_optab_handler (after ? optab->mem_fetch_after
7998 : optab->mem_fetch_before, mode);
7999 create_integer_operand (&ops[3], model);
8004 icode = optab_handler (after ? optab->fetch_after
8005 : optab->fetch_before, mode);
8008 create_output_operand (&ops[op_counter++], target, mode);
8010 if (icode == CODE_FOR_nothing)
8013 create_fixed_operand (&ops[op_counter++], mem);
8014 /* VAL may have been promoted to a wider mode. Shrink it if so. */
8015 create_convert_operand_to (&ops[op_counter++], val, mode, true);
8017 if (maybe_expand_insn (icode, num_ops, ops))
8018 return (target == const0_rtx ? const0_rtx : ops[0].value);
8024 /* This function expands an atomic fetch_OP or OP_fetch operation:
8025 TARGET is an option place to stick the return value. const0_rtx indicates
8026 the result is unused.
8027 atomically fetch MEM, perform the operation with VAL and return it to MEM.
8028 CODE is the operation being performed (OP)
8029 MEMMODEL is the memory model variant to use.
8030 AFTER is true to return the result of the operation (OP_fetch).
8031 AFTER is false to return the value before the operation (fetch_OP). */
8033 expand_atomic_fetch_op (rtx target, rtx mem, rtx val, enum rtx_code code,
8034 enum memmodel model, bool after)
8036 enum machine_mode mode = GET_MODE (mem);
8037 struct atomic_op_functions optab;
8039 bool unused_result = (target == const0_rtx);
8041 get_atomic_op_for_code (&optab, code);
8043 /* Check to see if there are any better instructions. */
8044 result = maybe_optimize_fetch_op (target, mem, val, code, model, after);
8048 /* Check for the case where the result isn't used and try those patterns. */
8051 /* Try the memory model variant first. */
8052 result = maybe_emit_op (&optab, target, mem, val, true, model, true);
8056 /* Next try the old style withuot a memory model. */
8057 result = maybe_emit_op (&optab, target, mem, val, false, model, true);
8061 /* There is no no-result pattern, so try patterns with a result. */
8065 /* Try the __atomic version. */
8066 result = maybe_emit_op (&optab, target, mem, val, true, model, after);
8070 /* Try the older __sync version. */
8071 result = maybe_emit_op (&optab, target, mem, val, false, model, after);
8075 /* If the fetch value can be calculated from the other variation of fetch,
8076 try that operation. */
8077 if (after || unused_result || optab.reverse_code != UNKNOWN)
8079 /* Try the __atomic version, then the older __sync version. */
8080 result = maybe_emit_op (&optab, target, mem, val, true, model, !after);
8082 result = maybe_emit_op (&optab, target, mem, val, false, model, !after);
8086 /* If the result isn't used, no need to do compensation code. */
8090 /* Issue compensation code. Fetch_after == fetch_before OP val.
8091 Fetch_before == after REVERSE_OP val. */
8093 code = optab.reverse_code;
8096 result = expand_simple_binop (mode, AND, result, val, NULL_RTX,
8097 true, OPTAB_LIB_WIDEN);
8098 result = expand_simple_unop (mode, NOT, result, target, true);
8101 result = expand_simple_binop (mode, code, result, val, target,
8102 true, OPTAB_LIB_WIDEN);
8107 /* Try the __sync libcalls only if we can't do compare-and-swap inline. */
8108 if (!can_compare_and_swap_p (mode, false))
8113 libfunc = optab_libfunc (after ? optab.fetch_after
8114 : optab.fetch_before, mode);
8116 && (after || unused_result || optab.reverse_code != UNKNOWN))
8120 code = optab.reverse_code;
8121 libfunc = optab_libfunc (after ? optab.fetch_before
8122 : optab.fetch_after, mode);
8124 if (libfunc != NULL)
8126 rtx addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
8127 result = emit_library_call_value (libfunc, NULL, LCT_NORMAL, mode,
8128 2, addr, ptr_mode, val, mode);
8130 if (!unused_result && fixup)
8131 result = expand_simple_binop (mode, code, result, val, target,
8132 true, OPTAB_LIB_WIDEN);
8137 /* If nothing else has succeeded, default to a compare and swap loop. */
8138 if (can_compare_and_swap_p (mode, true))
8141 rtx t0 = gen_reg_rtx (mode), t1;
8145 /* If the result is used, get a register for it. */
8148 if (!target || !register_operand (target, mode))
8149 target = gen_reg_rtx (mode);
8150 /* If fetch_before, copy the value now. */
8152 emit_move_insn (target, t0);
8155 target = const0_rtx;
8160 t1 = expand_simple_binop (mode, AND, t1, val, NULL_RTX,
8161 true, OPTAB_LIB_WIDEN);
8162 t1 = expand_simple_unop (mode, code, t1, NULL_RTX, true);
8165 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX, true,
8168 /* For after, copy the value now. */
8169 if (!unused_result && after)
8170 emit_move_insn (target, t1);
8171 insn = get_insns ();
8174 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
8181 /* Return true if OPERAND is suitable for operand number OPNO of
8182 instruction ICODE. */
8185 insn_operand_matches (enum insn_code icode, unsigned int opno, rtx operand)
8187 return (!insn_data[(int) icode].operand[opno].predicate
8188 || (insn_data[(int) icode].operand[opno].predicate
8189 (operand, insn_data[(int) icode].operand[opno].mode)));
8192 /* TARGET is a target of a multiword operation that we are going to
8193 implement as a series of word-mode operations. Return true if
8194 TARGET is suitable for this purpose. */
8197 valid_multiword_target_p (rtx target)
8199 enum machine_mode mode;
8202 mode = GET_MODE (target);
8203 for (i = 0; i < GET_MODE_SIZE (mode); i += UNITS_PER_WORD)
8204 if (!validate_subreg (word_mode, mode, target, i))
8209 /* Like maybe_legitimize_operand, but do not change the code of the
8210 current rtx value. */
8213 maybe_legitimize_operand_same_code (enum insn_code icode, unsigned int opno,
8214 struct expand_operand *op)
8216 /* See if the operand matches in its current form. */
8217 if (insn_operand_matches (icode, opno, op->value))
8220 /* If the operand is a memory whose address has no side effects,
8221 try forcing the address into a non-virtual pseudo register.
8222 The check for side effects is important because copy_to_mode_reg
8223 cannot handle things like auto-modified addresses. */
8224 if (insn_data[(int) icode].operand[opno].allows_mem && MEM_P (op->value))
8229 addr = XEXP (mem, 0);
8230 if (!(REG_P (addr) && REGNO (addr) > LAST_VIRTUAL_REGISTER)
8231 && !side_effects_p (addr))
8234 enum machine_mode mode;
8236 last = get_last_insn ();
8237 mode = targetm.addr_space.address_mode (MEM_ADDR_SPACE (mem));
8238 mem = replace_equiv_address (mem, copy_to_mode_reg (mode, addr));
8239 if (insn_operand_matches (icode, opno, mem))
8244 delete_insns_since (last);
8251 /* Try to make OP match operand OPNO of instruction ICODE. Return true
8252 on success, storing the new operand value back in OP. */
8255 maybe_legitimize_operand (enum insn_code icode, unsigned int opno,
8256 struct expand_operand *op)
8258 enum machine_mode mode, imode;
8259 bool old_volatile_ok, result;
8265 old_volatile_ok = volatile_ok;
8267 result = maybe_legitimize_operand_same_code (icode, opno, op);
8268 volatile_ok = old_volatile_ok;
8272 gcc_assert (mode != VOIDmode);
8274 && op->value != const0_rtx
8275 && GET_MODE (op->value) == mode
8276 && maybe_legitimize_operand_same_code (icode, opno, op))
8279 op->value = gen_reg_rtx (mode);
8284 gcc_assert (mode != VOIDmode);
8285 gcc_assert (GET_MODE (op->value) == VOIDmode
8286 || GET_MODE (op->value) == mode);
8287 if (maybe_legitimize_operand_same_code (icode, opno, op))
8290 op->value = copy_to_mode_reg (mode, op->value);
8293 case EXPAND_CONVERT_TO:
8294 gcc_assert (mode != VOIDmode);
8295 op->value = convert_to_mode (mode, op->value, op->unsigned_p);
8298 case EXPAND_CONVERT_FROM:
8299 if (GET_MODE (op->value) != VOIDmode)
8300 mode = GET_MODE (op->value);
8302 /* The caller must tell us what mode this value has. */
8303 gcc_assert (mode != VOIDmode);
8305 imode = insn_data[(int) icode].operand[opno].mode;
8306 if (imode != VOIDmode && imode != mode)
8308 op->value = convert_modes (imode, mode, op->value, op->unsigned_p);
8313 case EXPAND_ADDRESS:
8314 gcc_assert (mode != VOIDmode);
8315 op->value = convert_memory_address (mode, op->value);
8318 case EXPAND_INTEGER:
8319 mode = insn_data[(int) icode].operand[opno].mode;
8320 if (mode != VOIDmode && const_int_operand (op->value, mode))
8324 return insn_operand_matches (icode, opno, op->value);
8327 /* Make OP describe an input operand that should have the same value
8328 as VALUE, after any mode conversion that the target might request.
8329 TYPE is the type of VALUE. */
8332 create_convert_operand_from_type (struct expand_operand *op,
8333 rtx value, tree type)
8335 create_convert_operand_from (op, value, TYPE_MODE (type),
8336 TYPE_UNSIGNED (type));
8339 /* Try to make operands [OPS, OPS + NOPS) match operands [OPNO, OPNO + NOPS)
8340 of instruction ICODE. Return true on success, leaving the new operand
8341 values in the OPS themselves. Emit no code on failure. */
8344 maybe_legitimize_operands (enum insn_code icode, unsigned int opno,
8345 unsigned int nops, struct expand_operand *ops)
8350 last = get_last_insn ();
8351 for (i = 0; i < nops; i++)
8352 if (!maybe_legitimize_operand (icode, opno + i, &ops[i]))
8354 delete_insns_since (last);
8360 /* Try to generate instruction ICODE, using operands [OPS, OPS + NOPS)
8361 as its operands. Return the instruction pattern on success,
8362 and emit any necessary set-up code. Return null and emit no
8366 maybe_gen_insn (enum insn_code icode, unsigned int nops,
8367 struct expand_operand *ops)
8369 gcc_assert (nops == (unsigned int) insn_data[(int) icode].n_generator_args);
8370 if (!maybe_legitimize_operands (icode, 0, nops, ops))
8376 return GEN_FCN (icode) (ops[0].value);
8378 return GEN_FCN (icode) (ops[0].value, ops[1].value);
8380 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value);
8382 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
8385 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
8386 ops[3].value, ops[4].value);
8388 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
8389 ops[3].value, ops[4].value, ops[5].value);
8391 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
8392 ops[3].value, ops[4].value, ops[5].value,
8395 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
8396 ops[3].value, ops[4].value, ops[5].value,
8397 ops[6].value, ops[7].value);
8402 /* Try to emit instruction ICODE, using operands [OPS, OPS + NOPS)
8403 as its operands. Return true on success and emit no code on failure. */
8406 maybe_expand_insn (enum insn_code icode, unsigned int nops,
8407 struct expand_operand *ops)
8409 rtx pat = maybe_gen_insn (icode, nops, ops);
8418 /* Like maybe_expand_insn, but for jumps. */
8421 maybe_expand_jump_insn (enum insn_code icode, unsigned int nops,
8422 struct expand_operand *ops)
8424 rtx pat = maybe_gen_insn (icode, nops, ops);
8427 emit_jump_insn (pat);
8433 /* Emit instruction ICODE, using operands [OPS, OPS + NOPS)
8437 expand_insn (enum insn_code icode, unsigned int nops,
8438 struct expand_operand *ops)
8440 if (!maybe_expand_insn (icode, nops, ops))
8444 /* Like expand_insn, but for jumps. */
8447 expand_jump_insn (enum insn_code icode, unsigned int nops,
8448 struct expand_operand *ops)
8450 if (!maybe_expand_jump_insn (icode, nops, ops))
8454 #include "gt-optabs.h"