X-Git-Url: http://git.sourceforge.jp/view?a=blobdiff_plain;f=gcc%2Foptabs.c;h=7a4bd0f9e9dce47e5ac31f1c70ff661a58cba43e;hb=658fc6b4e6c918222cc8ec22e1f396e0cda0fe28;hp=bde10d4cde4e8a3ee84d9fcd45e124cf6eaac019;hpb=7f3be425ec91d3479f6669c125476f578c497524;p=pf3gnuchains%2Fgcc-fork.git diff --git a/gcc/optabs.c b/gcc/optabs.c index bde10d4cde4..7a4bd0f9e9d 100644 --- a/gcc/optabs.c +++ b/gcc/optabs.c @@ -1,44 +1,48 @@ /* Expand the basic unary and binary arithmetic operations, for GNU compiler. Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, - 1999, 2000, 2001 Free Software Foundation, Inc. + 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc. -This file is part of GNU CC. +This file is part of GCC. -GNU CC is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; either version 2, or (at your option) -any later version. +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 2, or (at your option) any later +version. -GNU CC is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. You should have received a copy of the GNU General Public License -along with GNU CC; see the file COPYING. If not, write to -the Free Software Foundation, 59 Temple Place - Suite 330, -Boston, MA 02111-1307, USA. */ +along with GCC; see the file COPYING. If not, write to the Free +Software Foundation, 59 Temple Place - Suite 330, Boston, MA +02111-1307, USA. */ #include "config.h" #include "system.h" +#include "coretypes.h" +#include "tm.h" #include "toplev.h" /* Include insn-config.h before expr.h so that HAVE_conditional_move - is properly defined. */ + is properly defined. */ #include "insn-config.h" #include "rtl.h" #include "tree.h" #include "tm_p.h" #include "flags.h" -#include "insn-flags.h" -#include "insn-codes.h" #include "function.h" +#include "except.h" #include "expr.h" +#include "optabs.h" +#include "libfuncs.h" #include "recog.h" #include "reload.h" #include "ggc.h" #include "real.h" +#include "basic-block.h" /* Each optab contains info on how this target machine can perform a particular operation @@ -84,35 +88,50 @@ enum insn_code setcc_gen_code[NUM_RTX_CODE]; enum insn_code movcc_gen_code[NUM_MACHINE_MODES]; #endif -static int add_equal_note PARAMS ((rtx, rtx, enum rtx_code, rtx, rtx)); -static rtx widen_operand PARAMS ((rtx, enum machine_mode, - enum machine_mode, int, int)); -static int expand_cmplxdiv_straight PARAMS ((rtx, rtx, rtx, rtx, - rtx, rtx, enum machine_mode, - int, enum optab_methods, - enum mode_class, optab)); -static int expand_cmplxdiv_wide PARAMS ((rtx, rtx, rtx, rtx, - rtx, rtx, enum machine_mode, - int, enum optab_methods, - enum mode_class, optab)); -static enum insn_code can_fix_p PARAMS ((enum machine_mode, enum machine_mode, - int, int *)); -static enum insn_code can_float_p PARAMS ((enum machine_mode, enum machine_mode, - int)); -static rtx ftruncify PARAMS ((rtx)); -static optab init_optab PARAMS ((enum rtx_code)); -static void init_libfuncs PARAMS ((optab, int, int, const char *, int)); -static void init_integral_libfuncs PARAMS ((optab, const char *, int)); -static void init_floating_libfuncs PARAMS ((optab, const char *, int)); -#ifdef HAVE_conditional_trap -static void init_traps PARAMS ((void)); +/* The insn generating function can not take an rtx_code argument. + TRAP_RTX is used as an rtx argument. Its code is replaced with + the code to be used in the trap insn and all other fields are ignored. */ +static GTY(()) rtx trap_rtx; + +static int add_equal_note (rtx, rtx, enum rtx_code, rtx, rtx); +static rtx widen_operand (rtx, enum machine_mode, enum machine_mode, int, + int); +static int expand_cmplxdiv_straight (rtx, rtx, rtx, rtx, rtx, rtx, + enum machine_mode, int, + enum optab_methods, enum mode_class, + optab); +static int expand_cmplxdiv_wide (rtx, rtx, rtx, rtx, rtx, rtx, + enum machine_mode, int, enum optab_methods, + enum mode_class, optab); +static void prepare_cmp_insn (rtx *, rtx *, enum rtx_code *, rtx, + enum machine_mode *, int *, + enum can_compare_purpose); +static enum insn_code can_fix_p (enum machine_mode, enum machine_mode, int, + int *); +static enum insn_code can_float_p (enum machine_mode, enum machine_mode, int); +static rtx ftruncify (rtx); +static optab new_optab (void); +static inline optab init_optab (enum rtx_code); +static inline optab init_optabv (enum rtx_code); +static void init_libfuncs (optab, int, int, const char *, int); +static void init_integral_libfuncs (optab, const char *, int); +static void init_floating_libfuncs (optab, const char *, int); +static void emit_cmp_and_jump_insn_1 (rtx, rtx, enum machine_mode, + enum rtx_code, int, rtx); +static void prepare_float_lib_cmp (rtx *, rtx *, enum rtx_code *, + enum machine_mode *, int *); +static rtx expand_vector_binop (enum machine_mode, optab, rtx, rtx, rtx, int, + enum optab_methods); +static rtx expand_vector_unop (enum machine_mode, optab, rtx, rtx, int); +static rtx widen_clz (enum machine_mode, rtx, rtx); +static rtx expand_parity (enum machine_mode, rtx, rtx); + +#ifndef HAVE_conditional_trap +#define HAVE_conditional_trap 0 +#define gen_conditional_trap(a,b) (abort (), NULL_RTX) #endif -static void emit_cmp_and_jump_insn_1 PARAMS ((rtx, rtx, enum machine_mode, - enum rtx_code, int, rtx)); -static void prepare_float_lib_cmp PARAMS ((rtx *, rtx *, enum rtx_code *, - enum machine_mode *, int *)); -/* Add a REG_EQUAL note to the last insn in SEQ. TARGET is being set to +/* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to the result of operation CODE applied to OP0 (and OP1 if it is a binary operation). @@ -123,68 +142,85 @@ static void prepare_float_lib_cmp PARAMS ((rtx *, rtx *, enum rtx_code *, again, ensuring that TARGET is not one of the operands. */ static int -add_equal_note (seq, target, code, op0, op1) - rtx seq; - rtx target; - enum rtx_code code; - rtx op0, op1; +add_equal_note (rtx insns, rtx target, enum rtx_code code, rtx op0, rtx op1) { - rtx set; - int i; + rtx last_insn, insn, set; rtx note; - if ((GET_RTX_CLASS (code) != '1' && GET_RTX_CLASS (code) != '2' - && GET_RTX_CLASS (code) != 'c' && GET_RTX_CLASS (code) != '<') - || GET_CODE (seq) != SEQUENCE - || (set = single_set (XVECEXP (seq, 0, XVECLEN (seq, 0) - 1))) == 0 - || GET_CODE (target) == ZERO_EXTRACT - || (! rtx_equal_p (SET_DEST (set), target) - /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside the - SUBREG. */ - && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART - || ! rtx_equal_p (SUBREG_REG (XEXP (SET_DEST (set), 0)), - target)))) + if (! insns + || ! INSN_P (insns) + || NEXT_INSN (insns) == NULL_RTX) + abort (); + + if (GET_RTX_CLASS (code) != '1' && GET_RTX_CLASS (code) != '2' + && GET_RTX_CLASS (code) != 'c' && GET_RTX_CLASS (code) != '<') + return 1; + + if (GET_CODE (target) == ZERO_EXTRACT) + return 1; + + for (last_insn = insns; + NEXT_INSN (last_insn) != NULL_RTX; + last_insn = NEXT_INSN (last_insn)) + ; + + set = single_set (last_insn); + if (set == NULL_RTX) + return 1; + + if (! rtx_equal_p (SET_DEST (set), target) + /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */ + && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART + || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target))) return 1; /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET besides the last insn. */ if (reg_overlap_mentioned_p (target, op0) || (op1 && reg_overlap_mentioned_p (target, op1))) - for (i = XVECLEN (seq, 0) - 2; i >= 0; i--) - if (reg_set_p (target, XVECEXP (seq, 0, i))) - return 0; + { + insn = PREV_INSN (last_insn); + while (insn != NULL_RTX) + { + if (reg_set_p (target, insn)) + return 0; + + insn = PREV_INSN (insn); + } + } if (GET_RTX_CLASS (code) == '1') note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0)); else note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1)); - set_unique_reg_note (XVECEXP (seq, 0, XVECLEN (seq, 0) - 1), REG_EQUAL, note); + set_unique_reg_note (last_insn, REG_EQUAL, note); return 1; } /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need - not actually do a sign-extend or zero-extend, but can leave the + not actually do a sign-extend or zero-extend, but can leave the higher-order bits of the result rtx undefined, for example, in the case of logical operations, but not right shifts. */ static rtx -widen_operand (op, mode, oldmode, unsignedp, no_extend) - rtx op; - enum machine_mode mode, oldmode; - int unsignedp; - int no_extend; +widen_operand (rtx op, enum machine_mode mode, enum machine_mode oldmode, + int unsignedp, int no_extend) { rtx result; - /* If we must extend do so. If OP is either a constant or a SUBREG - for a promoted object, also extend since it will be more efficient to - do so. */ + /* If we don't have to extend and this is a constant, return it. */ + if (no_extend && GET_MODE (op) == VOIDmode) + return op; + + /* If we must extend do so. If OP is a SUBREG for a promoted object, also + extend since it will be more efficient to do so unless the signedness of + a promoted object differs from our extension. */ if (! no_extend - || GET_MODE (op) == VOIDmode - || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op))) + || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op) + && SUBREG_PROMOTED_UNSIGNED_P (op) == unsignedp)) return convert_modes (mode, oldmode, op, unsignedp); /* If MODE is no wider than a single word, we return a paradoxical @@ -204,14 +240,10 @@ widen_operand (op, mode, oldmode, unsignedp, no_extend) /* Generate code to perform a straightforward complex divide. */ static int -expand_cmplxdiv_straight (real0, real1, imag0, imag1, realr, imagr, submode, - unsignedp, methods, class, binoptab) - rtx real0, real1, imag0, imag1, realr, imagr; - enum machine_mode submode; - int unsignedp; - enum optab_methods methods; - enum mode_class class; - optab binoptab; +expand_cmplxdiv_straight (rtx real0, rtx real1, rtx imag0, rtx imag1, + rtx realr, rtx imagr, enum machine_mode submode, + int unsignedp, enum optab_methods methods, + enum mode_class class, optab binoptab) { rtx divisor; rtx real_t, imag_t; @@ -221,7 +253,7 @@ expand_cmplxdiv_straight (real0, real1, imag0, imag1, realr, imagr, submode, optab this_sub_optab = sub_optab; optab this_neg_optab = neg_optab; optab this_mul_optab = smul_optab; - + if (binoptab == sdivv_optab) { this_add_optab = addv_optab; @@ -262,7 +294,7 @@ expand_cmplxdiv_straight (real0, real1, imag0, imag1, realr, imagr, submode, /* Calculate the dividend. */ real_t = expand_binop (submode, this_mul_optab, real0, real1, NULL_RTX, unsignedp, methods); - + imag_t = expand_binop (submode, this_mul_optab, real0, imag1, NULL_RTX, unsignedp, methods); @@ -287,7 +319,7 @@ expand_cmplxdiv_straight (real0, real1, imag0, imag1, realr, imagr, submode, real_t = expand_binop (submode, this_add_optab, temp1, temp2, NULL_RTX, unsignedp, methods); - + temp1 = expand_binop (submode, this_mul_optab, imag0, real1, NULL_RTX, unsignedp, methods); @@ -336,20 +368,15 @@ expand_cmplxdiv_straight (real0, real1, imag0, imag1, realr, imagr, submode, /* Generate code to perform a wide-input-range-acceptable complex divide. */ static int -expand_cmplxdiv_wide (real0, real1, imag0, imag1, realr, imagr, submode, - unsignedp, methods, class, binoptab) - rtx real0, real1, imag0, imag1, realr, imagr; - enum machine_mode submode; - int unsignedp; - enum optab_methods methods; - enum mode_class class; - optab binoptab; +expand_cmplxdiv_wide (rtx real0, rtx real1, rtx imag0, rtx imag1, rtx realr, + rtx imagr, enum machine_mode submode, int unsignedp, + enum optab_methods methods, enum mode_class class, + optab binoptab) { rtx ratio, divisor; rtx real_t, imag_t; rtx temp1, temp2, lab1, lab2; enum machine_mode mode; - int align; rtx res; optab this_add_optab = add_optab; optab this_sub_optab = sub_optab; @@ -363,7 +390,7 @@ expand_cmplxdiv_wide (real0, real1, imag0, imag1, realr, imagr, submode, this_neg_optab = negv_optab; this_mul_optab = smulv_optab; } - + /* Don't fetch these from memory more than once. */ real0 = force_reg (submode, real0); real1 = force_reg (submode, real1); @@ -389,10 +416,9 @@ expand_cmplxdiv_wide (real0, real1, imag0, imag1, realr, imagr, submode, return 0; mode = GET_MODE (temp1); - align = GET_MODE_ALIGNMENT (mode); lab1 = gen_label_rtx (); emit_cmp_and_jump_insns (temp1, temp2, LT, NULL_RTX, - mode, unsignedp, align, lab1); + mode, unsignedp, lab1); /* |c| >= |d|; use ratio d/c to scale dividend and divisor. */ @@ -598,6 +624,21 @@ expand_cmplxdiv_wide (real0, real1, imag0, imag1, realr, imagr, submode, return 1; } +/* Wrapper around expand_binop which takes an rtx code to specify + the operation to perform, not an optab pointer. All other + arguments are the same. */ +rtx +expand_simple_binop (enum machine_mode mode, enum rtx_code code, rtx op0, + rtx op1, rtx target, int unsignedp, + enum optab_methods methods) +{ + optab binop = code_to_optab[(int) code]; + if (binop == 0) + abort (); + + return expand_binop (mode, binop, op0, op1, target, unsignedp, methods); +} + /* Generate code to perform an operation specified by BINOPTAB on operands OP0 and OP1, with result having machine-mode MODE. @@ -610,22 +651,17 @@ expand_cmplxdiv_wide (real0, real1, imag0, imag1, realr, imagr, submode, this may or may not be TARGET. */ rtx -expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods) - enum machine_mode mode; - optab binoptab; - rtx op0, op1; - rtx target; - int unsignedp; - enum optab_methods methods; +expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1, + rtx target, int unsignedp, enum optab_methods methods) { enum optab_methods next_methods = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN ? OPTAB_WIDEN : methods); enum mode_class class; enum machine_mode wider_mode; - register rtx temp; + rtx temp; int commutative_op = 0; - int shift_op = (binoptab->code == ASHIFT + int shift_op = (binoptab->code == ASHIFT || binoptab->code == ASHIFTRT || binoptab->code == LSHIFTRT || binoptab->code == ROTATE @@ -642,8 +678,17 @@ expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods) if (flag_force_mem) { - op0 = force_not_mem (op0); - op1 = force_not_mem (op1); + /* Load duplicate non-volatile operands once. */ + if (rtx_equal_p (op0, op1) && ! volatile_refs_p (op0)) + { + op0 = force_not_mem (op0); + op1 = op0; + } + else + { + op0 = force_not_mem (op0); + op1 = force_not_mem (op1); + } } /* If subtracting an integer constant, convert this into an addition of @@ -716,7 +761,7 @@ expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods) if (GET_MODE (op0) != mode0 && GET_MODE (op1) != mode1 && GET_MODE (op0) == mode1 && GET_MODE (op1) == mode0) { - register rtx tmp; + rtx tmp; tmp = op0; op0 = op1; op1 = tmp; tmp = xop0; xop0 = xop1; xop1 = tmp; @@ -724,17 +769,24 @@ expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods) } /* In case the insn wants input operands in modes different from - the result, convert the operands. */ - - if (GET_MODE (op0) != VOIDmode - && GET_MODE (op0) != mode0 - && mode0 != VOIDmode) - xop0 = convert_to_mode (mode0, xop0, unsignedp); - - if (GET_MODE (xop1) != VOIDmode - && GET_MODE (xop1) != mode1 - && mode1 != VOIDmode) - xop1 = convert_to_mode (mode1, xop1, unsignedp); + those of the actual operands, convert the operands. It would + seem that we don't need to convert CONST_INTs, but we do, so + that they're properly zero-extended, sign-extended or truncated + for their mode. */ + + if (GET_MODE (op0) != mode0 && mode0 != VOIDmode) + xop0 = convert_modes (mode0, + GET_MODE (op0) != VOIDmode + ? GET_MODE (op0) + : mode, + xop0, unsignedp); + + if (GET_MODE (op1) != mode1 && mode1 != VOIDmode) + xop1 = convert_modes (mode1, + GET_MODE (op1) != VOIDmode + ? GET_MODE (op1) + : mode, + xop1, unsignedp); /* Now, if insn's predicates don't allow our operands, put them into pseudo regs. */ @@ -753,10 +805,10 @@ expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods) pat = GEN_FCN (icode) (temp, xop0, xop1); if (pat) { - /* If PAT is a multi-insn sequence, try to add an appropriate + /* If PAT is composed of more than one insn, try to add an appropriate REG_EQUAL note to it. If we can't because TEMP conflicts with an operand, call ourselves again, this time without a target. */ - if (GET_CODE (pat) == SEQUENCE + if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && ! add_equal_note (pat, temp, binoptab->code, xop0, xop1)) { delete_insns_since (last); @@ -813,7 +865,7 @@ expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods) /* For certain integer operations, we need not actually extend the narrow operands, as long as we will truncate - the results to the same narrowness. */ + the results to the same narrowness. */ if ((binoptab == ior_optab || binoptab == and_optab || binoptab == xor_optab @@ -853,7 +905,7 @@ expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods) && GET_MODE_SIZE (mode) > UNITS_PER_WORD && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing) { - unsigned int i; + int i; rtx insns; rtx equiv_value; @@ -996,7 +1048,7 @@ expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods) if (inter != 0) inter = expand_binop (word_mode, binoptab, outof_input, op1, outof_target, unsignedp, next_methods); - + if (inter != 0 && inter != outof_target) emit_move_insn (outof_target, inter); } @@ -1075,7 +1127,7 @@ expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods) if (shift_count > BITS_PER_WORD) { first_shift_count = GEN_INT (shift_count - BITS_PER_WORD); - second_shift_count = GEN_INT (2*BITS_PER_WORD - shift_count); + second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count); } else { @@ -1088,7 +1140,7 @@ expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods) NULL_RTX, unsignedp, next_methods); into_temp2 = expand_binop (word_mode, reverse_unsigned_shift, into_input, second_shift_count, - into_target, unsignedp, next_methods); + NULL_RTX, unsignedp, next_methods); if (into_temp1 != 0 && into_temp2 != 0) inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2, @@ -1104,7 +1156,7 @@ expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods) NULL_RTX, unsignedp, next_methods); outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift, outof_input, second_shift_count, - outof_target, unsignedp, next_methods); + NULL_RTX, unsignedp, next_methods); if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0) inter = expand_binop (word_mode, ior_optab, @@ -1131,7 +1183,7 @@ expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods) if (shift_count != BITS_PER_WORD) emit_no_conflict_block (insns, target, op0, op1, equiv_value); else - emit_insns (insns); + emit_insn (insns); return target; @@ -1145,11 +1197,10 @@ expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods) && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing) { unsigned int i; - rtx carry_tmp = gen_reg_rtx (word_mode); optab otheroptab = binoptab == add_optab ? sub_optab : add_optab; - unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD; + const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD; rtx carry_in = NULL_RTX, carry_out = NULL_RTX; - rtx xop0, xop1; + rtx xop0, xop1, xtarget; /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG value is one of those, use it. Otherwise, use 1 since it is the @@ -1164,19 +1215,20 @@ expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods) xop0 = force_reg (mode, op0); xop1 = force_reg (mode, op1); - if (target == 0 || GET_CODE (target) != REG - || target == xop0 || target == xop1) - target = gen_reg_rtx (mode); + xtarget = gen_reg_rtx (mode); + + if (target == 0 || GET_CODE (target) != REG) + target = xtarget; /* Indicate for flow that the entire target reg is being set. */ if (GET_CODE (target) == REG) - emit_insn (gen_rtx_CLOBBER (VOIDmode, target)); + emit_insn (gen_rtx_CLOBBER (VOIDmode, xtarget)); /* Do the actual arithmetic. */ for (i = 0; i < nwords; i++) { int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i); - rtx target_piece = operand_subword (target, index, 1, mode); + rtx target_piece = operand_subword (xtarget, index, 1, mode); rtx op0_piece = operand_subword_force (xop0, index, mode); rtx op1_piece = operand_subword_force (xop1, index, mode); rtx x; @@ -1201,24 +1253,22 @@ expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods) if (i > 0) { + rtx newx; + /* Add/subtract previous carry to main result. */ - x = expand_binop (word_mode, - normalizep == 1 ? binoptab : otheroptab, - x, carry_in, - target_piece, 1, next_methods); - if (x == 0) - break; - else if (target_piece != x) - emit_move_insn (target_piece, x); + newx = expand_binop (word_mode, + normalizep == 1 ? binoptab : otheroptab, + x, carry_in, + NULL_RTX, 1, next_methods); if (i + 1 < nwords) { - /* THIS CODE HAS NOT BEEN TESTED. */ /* Get out carry from adding/subtracting carry in. */ + rtx carry_tmp = gen_reg_rtx (word_mode); carry_tmp = emit_store_flag_force (carry_tmp, - binoptab == add_optab - ? LT : GT, - x, carry_in, + (binoptab == add_optab + ? LT : GT), + newx, x, word_mode, 1, normalizep); /* Logical-ior the two poss. carry together. */ @@ -1228,23 +1278,27 @@ expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods) if (carry_out == 0) break; } + emit_move_insn (target_piece, newx); } carry_in = carry_out; - } + } - if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD) + if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD) { - if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing) + if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing + || ! rtx_equal_p (target, xtarget)) { - rtx temp = emit_move_insn (target, target); + rtx temp = emit_move_insn (target, xtarget); set_unique_reg_note (temp, - REG_EQUAL, + REG_EQUAL, gen_rtx_fmt_ee (binoptab->code, mode, copy_rtx (xop0), copy_rtx (xop1))); } + else + target = xtarget; return target; } @@ -1256,7 +1310,7 @@ expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods) /* If we want to multiply two two-word values and have normal and widening multiplies of single-word values, we can do this with three smaller multiplications. Note that we do not make a REG_NO_CONFLICT block here - because we are not operating on one word at a time. + because we are not operating on one word at a time. The multiplication proceeds as follows: _______________________ @@ -1399,6 +1453,9 @@ expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods) rtx temp = expand_binop (word_mode, binoptab, op0_low, op1_xhigh, NULL_RTX, 0, OPTAB_DIRECT); + if (!REG_P (product_high)) + product_high = force_reg (word_mode, product_high); + if (temp != 0) temp = expand_binop (word_mode, add_optab, temp, product_high, product_high, 0, next_methods); @@ -1407,7 +1464,7 @@ expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods) emit_move_insn (product_high, temp); if (temp != 0) - temp = expand_binop (word_mode, binoptab, op1_low, op0_xhigh, + temp = expand_binop (word_mode, binoptab, op1_low, op0_xhigh, NULL_RTX, 0, OPTAB_DIRECT); if (temp != 0) @@ -1418,13 +1475,15 @@ expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods) if (temp != 0 && temp != product_high) emit_move_insn (product_high, temp); + emit_move_insn (operand_subword (product, high, 1, mode), product_high); + if (temp != 0) { if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing) { temp = emit_move_insn (product, product); set_unique_reg_note (temp, - REG_EQUAL, + REG_EQUAL, gen_rtx_fmt_ee (MULT, mode, copy_rtx (op0), copy_rtx (op1))); @@ -1441,6 +1500,12 @@ expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods) delete_insns_since (last); } + /* Open-code the vector operations if we have no hardware support + for them. */ + if (class == MODE_VECTOR_INT || class == MODE_VECTOR_FLOAT) + return expand_vector_binop (mode, binoptab, op0, op1, target, + unsignedp, methods); + /* We need to open-code the complex type operations: '+, -, * and /' */ /* At this point we allow operations between two similar complex @@ -1458,11 +1523,8 @@ expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods) rtx equiv_value; int ok = 0; - /* Find the correct mode for the real and imaginary parts */ - enum machine_mode submode - = mode_for_size (GET_MODE_UNIT_SIZE (mode) * BITS_PER_UNIT, - class == MODE_COMPLEX_INT ? MODE_INT : MODE_FLOAT, - 0); + /* Find the correct mode for the real and imaginary parts. */ + enum machine_mode submode = GET_MODE_INNER(mode); if (submode == BLKmode) abort (); @@ -1491,7 +1553,7 @@ expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods) else real1 = op1; - if (real0 == 0 || real1 == 0 || ! (imag0 != 0|| imag1 != 0)) + if (real0 == 0 || real1 == 0 || ! (imag0 != 0 || imag1 != 0)) abort (); switch (binoptab->code) @@ -1508,10 +1570,10 @@ expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods) else if (res != realr) emit_move_insn (realr, res); - if (imag0 && imag1) + if (imag0 != 0 && imag1 != 0) res = expand_binop (submode, binoptab, imag0, imag1, imagr, unsignedp, methods); - else if (imag0) + else if (imag0 != 0) res = imag0; else if (binoptab->code == MINUS) res = expand_unop (submode, @@ -1531,7 +1593,7 @@ expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods) case MULT: /* (a+ib) * (c+id) = (ac-bd) + i(ad+cb) */ - if (imag0 && imag1) + if (imag0 != 0 && imag1 != 0) { rtx temp1, temp2; @@ -1563,11 +1625,16 @@ expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods) temp1 = expand_binop (submode, binoptab, real0, imag1, NULL_RTX, unsignedp, methods); - temp2 = expand_binop (submode, binoptab, real1, imag0, - NULL_RTX, unsignedp, methods); + /* Avoid expanding redundant multiplication for the common + case of squaring a complex number. */ + if (rtx_equal_p (real0, real1) && rtx_equal_p (imag0, imag1)) + temp2 = temp1; + else + temp2 = expand_binop (submode, binoptab, real1, imag0, + NULL_RTX, unsignedp, methods); if (temp1 == 0 || temp2 == 0) - break; + break; res = (expand_binop (submode, @@ -1612,7 +1679,7 @@ expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods) case DIV: /* (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd)) */ - + if (imag1 == 0) { /* (a+ib) / (c+i0) = (a/c) + i(b/c) */ @@ -1670,7 +1737,7 @@ expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods) } } break; - + default: abort (); } @@ -1686,9 +1753,9 @@ expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods) copy_rtx (op0), copy_rtx (op1)); else equiv_value = 0; - + emit_no_conflict_block (seq, target, op0, op1, equiv_value); - + return target; } } @@ -1808,6 +1875,224 @@ expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods) delete_insns_since (entry_last); return 0; } + +/* Like expand_binop, but for open-coding vectors binops. */ + +static rtx +expand_vector_binop (enum machine_mode mode, optab binoptab, rtx op0, + rtx op1, rtx target, int unsignedp, + enum optab_methods methods) +{ + enum machine_mode submode, tmode; + int size, elts, subsize, subbitsize, i; + rtx t, a, b, res, seq; + enum mode_class class; + + class = GET_MODE_CLASS (mode); + + size = GET_MODE_SIZE (mode); + submode = GET_MODE_INNER (mode); + + /* Search for the widest vector mode with the same inner mode that is + still narrower than MODE and that allows to open-code this operator. + Note, if we find such a mode and the handler later decides it can't + do the expansion, we'll be called recursively with the narrower mode. */ + for (tmode = GET_CLASS_NARROWEST_MODE (class); + GET_MODE_SIZE (tmode) < GET_MODE_SIZE (mode); + tmode = GET_MODE_WIDER_MODE (tmode)) + { + if (GET_MODE_INNER (tmode) == GET_MODE_INNER (mode) + && binoptab->handlers[(int) tmode].insn_code != CODE_FOR_nothing) + submode = tmode; + } + + switch (binoptab->code) + { + case AND: + case IOR: + case XOR: + tmode = int_mode_for_mode (mode); + if (tmode != BLKmode) + submode = tmode; + case PLUS: + case MINUS: + case MULT: + case DIV: + subsize = GET_MODE_SIZE (submode); + subbitsize = GET_MODE_BITSIZE (submode); + elts = size / subsize; + + /* If METHODS is OPTAB_DIRECT, we don't insist on the exact mode, + but that we operate on more than one element at a time. */ + if (subsize == GET_MODE_UNIT_SIZE (mode) && methods == OPTAB_DIRECT) + return 0; + + start_sequence (); + + /* Errors can leave us with a const0_rtx as operand. */ + if (GET_MODE (op0) != mode) + op0 = copy_to_mode_reg (mode, op0); + if (GET_MODE (op1) != mode) + op1 = copy_to_mode_reg (mode, op1); + + if (!target) + target = gen_reg_rtx (mode); + + for (i = 0; i < elts; ++i) + { + /* If this is part of a register, and not the first item in the + word, we can't store using a SUBREG - that would clobber + previous results. + And storing with a SUBREG is only possible for the least + significant part, hence we can't do it for big endian + (unless we want to permute the evaluation order. */ + if (GET_CODE (target) == REG + && (BYTES_BIG_ENDIAN + ? subsize < UNITS_PER_WORD + : ((i * subsize) % UNITS_PER_WORD) != 0)) + t = NULL_RTX; + else + t = simplify_gen_subreg (submode, target, mode, i * subsize); + if (CONSTANT_P (op0)) + a = simplify_gen_subreg (submode, op0, mode, i * subsize); + else + a = extract_bit_field (op0, subbitsize, i * subbitsize, unsignedp, + NULL_RTX, submode, submode, size); + if (CONSTANT_P (op1)) + b = simplify_gen_subreg (submode, op1, mode, i * subsize); + else + b = extract_bit_field (op1, subbitsize, i * subbitsize, unsignedp, + NULL_RTX, submode, submode, size); + + if (binoptab->code == DIV) + { + if (class == MODE_VECTOR_FLOAT) + res = expand_binop (submode, binoptab, a, b, t, + unsignedp, methods); + else + res = expand_divmod (0, TRUNC_DIV_EXPR, submode, + a, b, t, unsignedp); + } + else + res = expand_binop (submode, binoptab, a, b, t, + unsignedp, methods); + + if (res == 0) + break; + + if (t) + emit_move_insn (t, res); + else + store_bit_field (target, subbitsize, i * subbitsize, submode, res, + size); + } + break; + + default: + abort (); + } + + seq = get_insns (); + end_sequence (); + emit_insn (seq); + + return target; +} + +/* Like expand_unop but for open-coding vector unops. */ + +static rtx +expand_vector_unop (enum machine_mode mode, optab unoptab, rtx op0, + rtx target, int unsignedp) +{ + enum machine_mode submode, tmode; + int size, elts, subsize, subbitsize, i; + rtx t, a, res, seq; + + size = GET_MODE_SIZE (mode); + submode = GET_MODE_INNER (mode); + + /* Search for the widest vector mode with the same inner mode that is + still narrower than MODE and that allows to open-code this operator. + Note, if we find such a mode and the handler later decides it can't + do the expansion, we'll be called recursively with the narrower mode. */ + for (tmode = GET_CLASS_NARROWEST_MODE (GET_MODE_CLASS (mode)); + GET_MODE_SIZE (tmode) < GET_MODE_SIZE (mode); + tmode = GET_MODE_WIDER_MODE (tmode)) + { + if (GET_MODE_INNER (tmode) == GET_MODE_INNER (mode) + && unoptab->handlers[(int) tmode].insn_code != CODE_FOR_nothing) + submode = tmode; + } + /* If there is no negate operation, try doing a subtract from zero. */ + if (unoptab == neg_optab && GET_MODE_CLASS (submode) == MODE_INT + /* Avoid infinite recursion when an + error has left us with the wrong mode. */ + && GET_MODE (op0) == mode) + { + rtx temp; + temp = expand_binop (mode, sub_optab, CONST0_RTX (mode), op0, + target, unsignedp, OPTAB_DIRECT); + if (temp) + return temp; + } + + if (unoptab == one_cmpl_optab) + { + tmode = int_mode_for_mode (mode); + if (tmode != BLKmode) + submode = tmode; + } + + subsize = GET_MODE_SIZE (submode); + subbitsize = GET_MODE_BITSIZE (submode); + elts = size / subsize; + + /* Errors can leave us with a const0_rtx as operand. */ + if (GET_MODE (op0) != mode) + op0 = copy_to_mode_reg (mode, op0); + + if (!target) + target = gen_reg_rtx (mode); + + start_sequence (); + + for (i = 0; i < elts; ++i) + { + /* If this is part of a register, and not the first item in the + word, we can't store using a SUBREG - that would clobber + previous results. + And storing with a SUBREG is only possible for the least + significant part, hence we can't do it for big endian + (unless we want to permute the evaluation order. */ + if (GET_CODE (target) == REG + && (BYTES_BIG_ENDIAN + ? subsize < UNITS_PER_WORD + : ((i * subsize) % UNITS_PER_WORD) != 0)) + t = NULL_RTX; + else + t = simplify_gen_subreg (submode, target, mode, i * subsize); + if (CONSTANT_P (op0)) + a = simplify_gen_subreg (submode, op0, mode, i * subsize); + else + a = extract_bit_field (op0, subbitsize, i * subbitsize, unsignedp, + t, submode, submode, size); + + res = expand_unop (submode, unoptab, a, t, unsignedp); + + if (t) + emit_move_insn (t, res); + else + store_bit_field (target, subbitsize, i * subbitsize, submode, res, + size); + } + + seq = get_insns (); + end_sequence (); + emit_insn (seq); + + return target; +} /* Expand a binary operator which has both signed and unsigned forms. UOPTAB is the optab for unsigned operations, and SOPTAB is for @@ -1817,14 +2102,11 @@ expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods) of an unsigned wider operation, since the result would be the same. */ rtx -sign_expand_binop (mode, uoptab, soptab, op0, op1, target, unsignedp, methods) - enum machine_mode mode; - optab uoptab, soptab; - rtx op0, op1, target; - int unsignedp; - enum optab_methods methods; +sign_expand_binop (enum machine_mode mode, optab uoptab, optab soptab, + rtx op0, rtx op1, rtx target, int unsignedp, + enum optab_methods methods) { - register rtx temp; + rtx temp; optab direct_optab = unsignedp ? uoptab : soptab; struct optab wide_soptab; @@ -1879,11 +2161,8 @@ sign_expand_binop (mode, uoptab, soptab, op0, op1, target, unsignedp, methods) Returns 1 if this operation can be performed; 0 if not. */ int -expand_twoval_binop (binoptab, op0, op1, targ0, targ1, unsignedp) - optab binoptab; - rtx op0, op1; - rtx targ0, targ1; - int unsignedp; +expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1, + int unsignedp) { enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1); enum mode_class class; @@ -1932,13 +2211,25 @@ expand_twoval_binop (binoptab, op0, op1, targ0, targ1, unsignedp) rtx pat; rtx xop0 = op0, xop1 = op1; - /* In case this insn wants input operands in modes different from the - result, convert the operands. */ - if (GET_MODE (op0) != VOIDmode && GET_MODE (op0) != mode0) - xop0 = convert_to_mode (mode0, xop0, unsignedp); - - if (GET_MODE (op1) != VOIDmode && GET_MODE (op1) != mode1) - xop1 = convert_to_mode (mode1, xop1, unsignedp); + /* In case the insn wants input operands in modes different from + those of the actual operands, convert the operands. It would + seem that we don't need to convert CONST_INTs, but we do, so + that they're properly zero-extended, sign-extended or truncated + for their mode. */ + + if (GET_MODE (op0) != mode0 && mode0 != VOIDmode) + xop0 = convert_modes (mode0, + GET_MODE (op0) != VOIDmode + ? GET_MODE (op0) + : mode, + xop0, unsignedp); + + if (GET_MODE (op1) != mode1 && mode1 != VOIDmode) + xop1 = convert_modes (mode1, + GET_MODE (op1) != VOIDmode + ? GET_MODE (op1) + : mode, + xop1, unsignedp); /* Now, if insn doesn't accept these operands, put them into pseudos. */ if (! (*insn_data[icode].operand[1].predicate) (xop0, mode0)) @@ -1952,7 +2243,7 @@ expand_twoval_binop (binoptab, op0, op1, targ0, targ1, unsignedp) if (! (*insn_data[icode].operand[0].predicate) (targ0, mode) || ! (*insn_data[icode].operand[3].predicate) (targ1, mode)) abort (); - + pat = GEN_FCN (icode) (targ0, xop0, xop1, targ1); if (pat) { @@ -1973,14 +2264,12 @@ expand_twoval_binop (binoptab, op0, op1, targ0, targ1, unsignedp) if (binoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing) { - register rtx t0 = gen_reg_rtx (wider_mode); - register rtx t1 = gen_reg_rtx (wider_mode); - - if (expand_twoval_binop (binoptab, - convert_modes (wider_mode, mode, op0, - unsignedp), - convert_modes (wider_mode, mode, op1, - unsignedp), + rtx t0 = gen_reg_rtx (wider_mode); + rtx t1 = gen_reg_rtx (wider_mode); + rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp); + rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp); + + if (expand_twoval_binop (binoptab, cop0, cop1, t0, t1, unsignedp)) { convert_move (targ0, t0, unsignedp); @@ -1997,6 +2286,97 @@ expand_twoval_binop (binoptab, op0, op1, targ0, targ1, unsignedp) return 0; } +/* Wrapper around expand_unop which takes an rtx code to specify + the operation to perform, not an optab pointer. All other + arguments are the same. */ +rtx +expand_simple_unop (enum machine_mode mode, enum rtx_code code, rtx op0, + rtx target, int unsignedp) +{ + optab unop = code_to_optab[(int) code]; + if (unop == 0) + abort (); + + return expand_unop (mode, unop, op0, target, unsignedp); +} + +/* Try calculating + (clz:narrow x) + as + (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */ +static rtx +widen_clz (enum machine_mode mode, rtx op0, rtx target) +{ + enum mode_class class = GET_MODE_CLASS (mode); + if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT) + { + enum machine_mode wider_mode; + for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode; + wider_mode = GET_MODE_WIDER_MODE (wider_mode)) + { + if (clz_optab->handlers[(int) wider_mode].insn_code + != CODE_FOR_nothing) + { + rtx xop0, temp, last; + + last = get_last_insn (); + + if (target == 0) + target = gen_reg_rtx (mode); + xop0 = widen_operand (op0, wider_mode, mode, true, false); + temp = expand_unop (wider_mode, clz_optab, xop0, NULL_RTX, true); + if (temp != 0) + temp = expand_binop (wider_mode, sub_optab, temp, + GEN_INT (GET_MODE_BITSIZE (wider_mode) + - GET_MODE_BITSIZE (mode)), + target, true, OPTAB_DIRECT); + if (temp == 0) + delete_insns_since (last); + + return temp; + } + } + } + return 0; +} + +/* Try calculating (parity x) as (and (popcount x) 1), where + popcount can also be done in a wider mode. */ +static rtx +expand_parity (enum machine_mode mode, rtx op0, rtx target) +{ + enum mode_class class = GET_MODE_CLASS (mode); + if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT) + { + enum machine_mode wider_mode; + for (wider_mode = mode; wider_mode != VOIDmode; + wider_mode = GET_MODE_WIDER_MODE (wider_mode)) + { + if (popcount_optab->handlers[(int) wider_mode].insn_code + != CODE_FOR_nothing) + { + rtx xop0, temp, last; + + last = get_last_insn (); + + if (target == 0) + target = gen_reg_rtx (mode); + xop0 = widen_operand (op0, wider_mode, mode, true, false); + temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX, + true); + if (temp != 0) + temp = expand_binop (wider_mode, and_optab, temp, GEN_INT (1), + target, true, OPTAB_DIRECT); + if (temp == 0) + delete_insns_since (last); + + return temp; + } + } + } + return 0; +} + /* Generate code to perform an operation specified by UNOPTAB on operand OP0, with result having machine-mode MODE. @@ -2009,16 +2389,12 @@ expand_twoval_binop (binoptab, op0, op1, targ0, targ1, unsignedp) this may or may not be TARGET. */ rtx -expand_unop (mode, unoptab, op0, target, unsignedp) - enum machine_mode mode; - optab unoptab; - rtx op0; - rtx target; - int unsignedp; +expand_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target, + int unsignedp) { enum mode_class class; enum machine_mode wider_mode; - register rtx temp; + rtx temp; rtx last = get_last_insn (); rtx pat; @@ -2060,7 +2436,7 @@ expand_unop (mode, unoptab, op0, target, unsignedp) pat = GEN_FCN (icode) (temp, xop0); if (pat) { - if (GET_CODE (pat) == SEQUENCE + if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && ! add_equal_note (pat, temp, unoptab->code, xop0, NULL_RTX)) { delete_insns_since (last); @@ -2068,7 +2444,7 @@ expand_unop (mode, unoptab, op0, target, unsignedp) } emit_insn (pat); - + return temp; } else @@ -2077,6 +2453,16 @@ expand_unop (mode, unoptab, op0, target, unsignedp) /* It can't be done in this mode. Can we open-code it in a wider mode? */ + /* Widening clz needs special treatment. */ + if (unoptab == clz_optab) + { + temp = widen_clz (mode, op0, target); + if (temp) + return temp; + else + goto try_libcall; + } + if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT) for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode; wider_mode = GET_MODE_WIDER_MODE (wider_mode)) @@ -2093,7 +2479,7 @@ expand_unop (mode, unoptab, op0, target, unsignedp) (unoptab == neg_optab || unoptab == one_cmpl_optab) && class == MODE_INT); - + temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX, unsignedp); @@ -2120,7 +2506,7 @@ expand_unop (mode, unoptab, op0, target, unsignedp) && GET_MODE_SIZE (mode) > UNITS_PER_WORD && unoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing) { - unsigned int i; + int i; rtx insns; if (target == 0 || target == op0) @@ -2135,6 +2521,7 @@ expand_unop (mode, unoptab, op0, target, unsignedp) rtx x = expand_unop (word_mode, unoptab, operand_subword_force (op0, i, mode), target_piece, unsignedp); + if (target_piece != x) emit_move_insn (target_piece, x); } @@ -2156,18 +2543,15 @@ expand_unop (mode, unoptab, op0, target, unsignedp) rtx x; rtx seq; - /* Find the correct mode for the real and imaginary parts */ - enum machine_mode submode - = mode_for_size (GET_MODE_UNIT_SIZE (mode) * BITS_PER_UNIT, - class == MODE_COMPLEX_INT ? MODE_INT : MODE_FLOAT, - 0); + /* Find the correct mode for the real and imaginary parts. */ + enum machine_mode submode = GET_MODE_INNER (mode); if (submode == BLKmode) abort (); if (target == 0) target = gen_reg_rtx (mode); - + start_sequence (); target_piece = gen_imagpart (submode, target); @@ -2193,28 +2577,89 @@ expand_unop (mode, unoptab, op0, target, unsignedp) return target; } + /* Try negating floating point values by flipping the sign bit. */ + if (unoptab->code == NEG && class == MODE_FLOAT + && GET_MODE_BITSIZE (mode) <= 2 * HOST_BITS_PER_WIDE_INT) + { + const struct real_format *fmt = real_format_for_mode[mode - QFmode]; + enum machine_mode imode = int_mode_for_mode (mode); + int bitpos = (fmt != 0) ? fmt->signbit : -1; + + if (imode != BLKmode && bitpos >= 0 && fmt->has_signed_zero) + { + HOST_WIDE_INT hi, lo; + rtx last = get_last_insn (); + + /* Handle targets with different FP word orders. */ + if (FLOAT_WORDS_BIG_ENDIAN != WORDS_BIG_ENDIAN) + { + int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD; + int word = nwords - (bitpos / BITS_PER_WORD) - 1; + bitpos = word * BITS_PER_WORD + bitpos % BITS_PER_WORD; + } + + if (bitpos < HOST_BITS_PER_WIDE_INT) + { + hi = 0; + lo = (HOST_WIDE_INT) 1 << bitpos; + } + else + { + hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT); + lo = 0; + } + temp = expand_binop (imode, xor_optab, + gen_lowpart (imode, op0), + immed_double_const (lo, hi, imode), + NULL_RTX, 1, OPTAB_LIB_WIDEN); + if (temp != 0) + return gen_lowpart (mode, temp); + delete_insns_since (last); + } + } + + /* Try calculating parity (x) as popcount (x) % 2. */ + if (unoptab == parity_optab) + { + temp = expand_parity (mode, op0, target); + if (temp) + return temp; + } + + try_libcall: /* Now try a library call in this mode. */ if (unoptab->handlers[(int) mode].libfunc) { rtx insns; rtx value; + enum machine_mode outmode = mode; + + /* All of these functions return small values. Thus we choose to + have them return something that isn't a double-word. */ + if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab + || unoptab == popcount_optab || unoptab == parity_optab) + outmode = TYPE_MODE (integer_type_node); start_sequence (); /* Pass 1 for NO_QUEUE so we don't lose any increments if the libcall is cse'd or moved. */ value = emit_library_call_value (unoptab->handlers[(int) mode].libfunc, - NULL_RTX, LCT_CONST, mode, 1, op0, mode); + NULL_RTX, LCT_CONST, outmode, + 1, op0, mode); insns = get_insns (); end_sequence (); - target = gen_reg_rtx (mode); + target = gen_reg_rtx (outmode); emit_libcall_block (insns, target, value, gen_rtx_fmt_e (unoptab->code, mode, op0)); return target; } + if (class == MODE_VECTOR_FLOAT || class == MODE_VECTOR_INT) + return expand_vector_unop (mode, unoptab, op0, target, unsignedp); + /* It can't be done in this mode. Can we do it in a wider mode? */ if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT) @@ -2236,10 +2681,18 @@ expand_unop (mode, unoptab, op0, target, unsignedp) (unoptab == neg_optab || unoptab == one_cmpl_optab) && class == MODE_INT); - + temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX, unsignedp); + /* If we are generating clz using wider mode, adjust the + result. */ + if (unoptab == clz_optab && temp != 0) + temp = expand_binop (wider_mode, sub_optab, temp, + GEN_INT (GET_MODE_BITSIZE (wider_mode) + - GET_MODE_BITSIZE (mode)), + target, true, OPTAB_DIRECT); + if (temp) { if (class != MODE_INT) @@ -2261,7 +2714,7 @@ expand_unop (mode, unoptab, op0, target, unsignedp) /* If there is no negate operation, try doing a subtract from zero. The US Software GOFAST library needs this. */ if (unoptab->code == NEG) - { + { rtx temp; temp = expand_binop (mode, unoptab == negv_optab ? subv_optab : sub_optab, @@ -2270,7 +2723,7 @@ expand_unop (mode, unoptab, op0, target, unsignedp) if (temp) return temp; } - + return 0; } @@ -2284,14 +2737,10 @@ expand_unop (mode, unoptab, op0, target, unsignedp) */ rtx -expand_abs (mode, op0, target, result_unsignedp, safe) - enum machine_mode mode; - rtx op0; - rtx target; - int result_unsignedp; - int safe; +expand_abs_nojump (enum machine_mode mode, rtx op0, rtx target, + int result_unsignedp) { - rtx temp, op1; + rtx temp; if (! flag_trapv) result_unsignedp = 1; @@ -2302,6 +2751,47 @@ expand_abs (mode, op0, target, result_unsignedp, safe) if (temp != 0) return temp; + /* For floating point modes, try clearing the sign bit. */ + if (GET_MODE_CLASS (mode) == MODE_FLOAT + && GET_MODE_BITSIZE (mode) <= 2 * HOST_BITS_PER_WIDE_INT) + { + const struct real_format *fmt = real_format_for_mode[mode - QFmode]; + enum machine_mode imode = int_mode_for_mode (mode); + int bitpos = (fmt != 0) ? fmt->signbit : -1; + + if (imode != BLKmode && bitpos >= 0) + { + HOST_WIDE_INT hi, lo; + rtx last = get_last_insn (); + + /* Handle targets with different FP word orders. */ + if (FLOAT_WORDS_BIG_ENDIAN != WORDS_BIG_ENDIAN) + { + int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD; + int word = nwords - (bitpos / BITS_PER_WORD) - 1; + bitpos = word * BITS_PER_WORD + bitpos % BITS_PER_WORD; + } + + if (bitpos < HOST_BITS_PER_WIDE_INT) + { + hi = 0; + lo = (HOST_WIDE_INT) 1 << bitpos; + } + else + { + hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT); + lo = 0; + } + temp = expand_binop (imode, and_optab, + gen_lowpart (imode, op0), + immed_double_const (~lo, ~hi, imode), + NULL_RTX, 1, OPTAB_LIB_WIDEN); + if (temp != 0) + return gen_lowpart (mode, temp); + delete_insns_since (last); + } + } + /* If we have a MAX insn, we can do this as MAX (x, -x). */ if (smax_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing) { @@ -2338,10 +2828,26 @@ expand_abs (mode, op0, target, result_unsignedp, safe) return temp; } - /* If that does not win, use conditional jump and negate. */ + return NULL_RTX; +} - /* It is safe to use the target if it is the same - as the source if this is also a pseudo register */ +rtx +expand_abs (enum machine_mode mode, rtx op0, rtx target, + int result_unsignedp, int safe) +{ + rtx temp, op1; + + if (! flag_trapv) + result_unsignedp = 1; + + temp = expand_abs_nojump (mode, op0, target, result_unsignedp); + if (temp != 0) + return temp; + + /* If that does not win, use conditional jump and negate. */ + + /* It is safe to use the target if it is the same + as the source if this is also a pseudo register */ if (op0 == target && GET_CODE (op0) == REG && REGNO (op0) >= FIRST_PSEUDO_REGISTER) safe = 1; @@ -2361,11 +2867,11 @@ expand_abs (mode, op0, target, result_unsignedp, safe) compare word by word. Rely on CSE to optimize constant cases. */ if (GET_MODE_CLASS (mode) == MODE_INT && ! can_compare_p (GE, mode, ccp_jump)) - do_jump_by_parts_greater_rtx (mode, 0, target, const0_rtx, + do_jump_by_parts_greater_rtx (mode, 0, target, const0_rtx, NULL_RTX, op1); else do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode, - NULL_RTX, 0, NULL_RTX, op1); + NULL_RTX, NULL_RTX, op1); op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab, target, target, 0); @@ -2386,25 +2892,19 @@ expand_abs (mode, op0, target, result_unsignedp, safe) UNSIGNEDP is relevant for complex integer modes. */ rtx -expand_complex_abs (mode, op0, target, unsignedp) - enum machine_mode mode; - rtx op0; - rtx target; - int unsignedp; +expand_complex_abs (enum machine_mode mode, rtx op0, rtx target, + int unsignedp) { enum mode_class class = GET_MODE_CLASS (mode); enum machine_mode wider_mode; - register rtx temp; + rtx temp; rtx entry_last = get_last_insn (); rtx last; rtx pat; optab this_abs_optab; /* Find the correct mode for the real and imaginary parts. */ - enum machine_mode submode - = mode_for_size (GET_MODE_UNIT_SIZE (mode) * BITS_PER_UNIT, - class == MODE_COMPLEX_INT ? MODE_INT : MODE_FLOAT, - 0); + enum machine_mode submode = GET_MODE_INNER (mode); if (submode == BLKmode) abort (); @@ -2451,17 +2951,17 @@ expand_complex_abs (mode, op0, target, unsignedp) pat = GEN_FCN (icode) (temp, xop0); if (pat) { - if (GET_CODE (pat) == SEQUENCE - && ! add_equal_note (pat, temp, this_abs_optab->code, xop0, + if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX + && ! add_equal_note (pat, temp, this_abs_optab->code, xop0, NULL_RTX)) { delete_insns_since (last); - return expand_unop (mode, this_abs_optab, op0, NULL_RTX, + return expand_unop (mode, this_abs_optab, op0, NULL_RTX, unsignedp); } emit_insn (pat); - + return temp; } else @@ -2473,7 +2973,7 @@ expand_complex_abs (mode, op0, target, unsignedp) for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode; wider_mode = GET_MODE_WIDER_MODE (wider_mode)) { - if (this_abs_optab->handlers[(int) wider_mode].insn_code + if (this_abs_optab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing) { rtx xop0 = op0; @@ -2589,13 +3089,9 @@ expand_complex_abs (mode, op0, target, unsignedp) the value that is stored into TARGET. */ void -emit_unop_insn (icode, target, op0, code) - int icode; - rtx target; - rtx op0; - enum rtx_code code; +emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code) { - register rtx temp; + rtx temp; enum machine_mode mode0 = insn_data[icode].operand[1].mode; rtx pat; @@ -2620,9 +3116,9 @@ emit_unop_insn (icode, target, op0, code) pat = GEN_FCN (icode) (temp, op0); - if (GET_CODE (pat) == SEQUENCE && code != UNKNOWN) + if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && code != UNKNOWN) add_equal_note (pat, temp, code, op0, NULL_RTX); - + emit_insn (pat); if (temp != target) @@ -2643,12 +3139,12 @@ emit_unop_insn (icode, target, op0, code) INSNS is a block of code generated to perform the operation, not including the CLOBBER and final copy. All insns that compute intermediate values - are first emitted, followed by the block as described above. + are first emitted, followed by the block as described above. TARGET, OP0, and OP1 are the output and inputs of the operations, respectively. OP1 may be zero for a unary operation. - EQUIV, if non-zero, is an expression to be placed into a REG_EQUAL note + EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note on the last insn. If TARGET is not a register, INSNS is simply emitted with no special @@ -2658,31 +3154,34 @@ emit_unop_insn (icode, target, op0, code) The final insn emitted is returned. */ rtx -emit_no_conflict_block (insns, target, op0, op1, equiv) - rtx insns; - rtx target; - rtx op0, op1; - rtx equiv; +emit_no_conflict_block (rtx insns, rtx target, rtx op0, rtx op1, rtx equiv) { rtx prev, next, first, last, insn; if (GET_CODE (target) != REG || reload_in_progress) - return emit_insns (insns); + return emit_insn (insns); else for (insn = insns; insn; insn = NEXT_INSN (insn)) if (GET_CODE (insn) != INSN || find_reg_note (insn, REG_LIBCALL, NULL_RTX)) - return emit_insns (insns); + return emit_insn (insns); /* First emit all insns that do not store into words of the output and remove these from the list. */ for (insn = insns; insn; insn = next) { - rtx set = 0; + rtx set = 0, note; int i; next = NEXT_INSN (insn); + /* Some ports (cris) create an libcall regions at their own. We must + avoid any potential nesting of LIBCALLs. */ + if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL) + remove_note (insn, note); + if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL) + remove_note (insn, note); + if (GET_CODE (PATTERN (insn)) == SET || GET_CODE (PATTERN (insn)) == USE || GET_CODE (PATTERN (insn)) == CLOBBER) set = PATTERN (insn); @@ -2792,11 +3291,7 @@ emit_no_conflict_block (insns, target, op0, op1, equiv) block is delimited by REG_RETVAL and REG_LIBCALL notes. */ void -emit_libcall_block (insns, target, result, equiv) - rtx insns; - rtx target; - rtx result; - rtx equiv; +emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv) { rtx final_dest = target; rtx prev, next, first, last, insn; @@ -2806,23 +3301,35 @@ emit_libcall_block (insns, target, result, equiv) if (! REG_P (target) || REG_USERVAR_P (target)) target = gen_reg_rtx (GET_MODE (target)); + /* If we're using non-call exceptions, a libcall corresponding to an + operation that may trap may also trap. */ + if (flag_non_call_exceptions && may_trap_p (equiv)) + { + for (insn = insns; insn; insn = NEXT_INSN (insn)) + if (GET_CODE (insn) == CALL_INSN) + { + rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX); + + if (note != 0 && INTVAL (XEXP (note, 0)) <= 0) + remove_note (insn, note); + } + } + else /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION reg note to indicate that this call cannot throw or execute a nonlocal goto (unless there is already a REG_EH_REGION note, in which case - we update it). Also set the CONST_CALL_P flag. */ + we update it). */ + for (insn = insns; insn; insn = NEXT_INSN (insn)) + if (GET_CODE (insn) == CALL_INSN) + { + rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX); - for (insn = insns; insn; insn = NEXT_INSN (insn)) - if (GET_CODE (insn) == CALL_INSN) - { - rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX); - - CONST_CALL_P (insn) = 1; - if (note != 0) - XEXP (note, 0) = GEN_INT (-1); - else - REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EH_REGION, GEN_INT (-1), - REG_NOTES (insn)); - } + if (note != 0) + XEXP (note, 0) = GEN_INT (-1); + else + REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EH_REGION, GEN_INT (-1), + REG_NOTES (insn)); + } /* First emit all insns that set pseudos. Remove them from the list as we go. Avoid insns that set pseudos which were referenced in previous @@ -2833,6 +3340,14 @@ emit_libcall_block (insns, target, result, equiv) for (insn = insns; insn; insn = next) { rtx set = single_set (insn); + rtx note; + + /* Some ports (cris) create an libcall regions at their own. We must + avoid any potential nesting of LIBCALLs. */ + if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL) + remove_note (insn, note); + if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL) + remove_note (insn, note); next = NEXT_INSN (insn); @@ -2855,6 +3370,11 @@ emit_libcall_block (insns, target, result, equiv) add_insn (insn); } + + /* Some ports use a loop to copy large arguments onto the stack. + Don't move anything outside such a loop. */ + if (GET_CODE (insn) == CODE_LABEL) + break; } prev = get_last_insn (); @@ -2891,16 +3411,35 @@ emit_libcall_block (insns, target, result, equiv) first = NEXT_INSN (prev); /* Encapsulate the block so it gets manipulated as a unit. */ - REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last, - REG_NOTES (first)); - REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first, REG_NOTES (last)); + if (!flag_non_call_exceptions || !may_trap_p (equiv)) + { + /* We can't attach the REG_LIBCALL and REG_RETVAL notes + when the encapsulated region would not be in one basic block, + i.e. when there is a control_flow_insn_p insn between FIRST and LAST. + */ + bool attach_libcall_retval_notes = true; + next = NEXT_INSN (last); + for (insn = first; insn != next; insn = NEXT_INSN (insn)) + if (control_flow_insn_p (insn)) + { + attach_libcall_retval_notes = false; + break; + } + + if (attach_libcall_retval_notes) + { + REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last, + REG_NOTES (first)); + REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first, + REG_NOTES (last)); + } + } } /* Generate code to store zero in X. */ void -emit_clr_insn (x) - rtx x; +emit_clr_insn (rtx x) { emit_move_insn (x, const0_rtx); } @@ -2909,8 +3448,7 @@ emit_clr_insn (x) assuming it contains zero beforehand. */ void -emit_0_to_1_insn (x) - rtx x; +emit_0_to_1_insn (rtx x) { emit_move_insn (x, const1_rtx); } @@ -2920,35 +3458,33 @@ emit_0_to_1_insn (x) comparison code we will be using. ??? Actually, CODE is slightly weaker than that. A target is still - required to implement all of the normal bcc operations, but not + required to implement all of the normal bcc operations, but not required to implement all (or any) of the unordered bcc operations. */ - + int -can_compare_p (code, mode, purpose) - enum rtx_code code; - enum machine_mode mode; - enum can_compare_purpose purpose; +can_compare_p (enum rtx_code code, enum machine_mode mode, + enum can_compare_purpose purpose) { do { - if (cmp_optab->handlers[(int)mode].insn_code != CODE_FOR_nothing) + if (cmp_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing) { if (purpose == ccp_jump) - return bcc_gen_fctn[(int)code] != NULL; + return bcc_gen_fctn[(int) code] != NULL; else if (purpose == ccp_store_flag) - return setcc_gen_code[(int)code] != CODE_FOR_nothing; + return setcc_gen_code[(int) code] != CODE_FOR_nothing; else /* There's only one cmov entry point, and it's allowed to fail. */ return 1; } if (purpose == ccp_jump - && cbranch_optab->handlers[(int)mode].insn_code != CODE_FOR_nothing) + && cbranch_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing) return 1; if (purpose == ccp_cmov - && cmov_optab->handlers[(int)mode].insn_code != CODE_FOR_nothing) + && cmov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing) return 1; if (purpose == ccp_store_flag - && cstore_optab->handlers[(int)mode].insn_code != CODE_FOR_nothing) + && cstore_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing) return 1; mode = GET_MODE_WIDER_MODE (mode); @@ -2965,8 +3501,7 @@ can_compare_p (code, mode, purpose) *PUNSIGNEDP nonzero says that the operands are unsigned; this matters if they need to be widened. - If they have mode BLKmode, then SIZE specifies the size of both operands, - and ALIGN specifies the known shared alignment of the operands. + If they have mode BLKmode, then SIZE specifies the size of both operands. This function performs all the setup necessary so that the caller only has to emit a single comparison insn. This setup can involve doing a BLKmode @@ -2975,22 +3510,15 @@ can_compare_p (code, mode, purpose) The values which are passed in through pointers can be modified; the caller should perform the comparison on the modified values. */ -void -prepare_cmp_insn (px, py, pcomparison, size, pmode, punsignedp, align, - purpose) - rtx *px, *py; - enum rtx_code *pcomparison; - rtx size; - enum machine_mode *pmode; - int *punsignedp; - int align ATTRIBUTE_UNUSED; - enum can_compare_purpose purpose; +static void +prepare_cmp_insn (rtx *px, rtx *py, enum rtx_code *pcomparison, rtx size, + enum machine_mode *pmode, int *punsignedp, + enum can_compare_purpose purpose) { enum machine_mode mode = *pmode; rtx x = *px, y = *py; int unsignedp = *punsignedp; enum mode_class class; - rtx opalign ATTRIBUTE_UNUSED = GEN_INT (align / BITS_PER_UNIT);; class = GET_MODE_CLASS (mode); @@ -3001,8 +3529,17 @@ prepare_cmp_insn (px, py, pcomparison, size, pmode, punsignedp, align, if (mode != BLKmode && flag_force_mem) { - x = force_not_mem (x); - y = force_not_mem (y); + /* Load duplicate non-volatile operands once. */ + if (rtx_equal_p (x, y) && ! volatile_refs_p (x)) + { + x = force_not_mem (x); + y = x; + } + else + { + x = force_not_mem (x); + y = force_not_mem (y); + } } /* If we are inside an appropriately-short loop and one operand is an @@ -3020,7 +3557,7 @@ prepare_cmp_insn (px, py, pcomparison, size, pmode, punsignedp, align, states that canonical comparisons are required only for targets which have cc0. */ if (CONSTANT_P (x) && ! CONSTANT_P (y)) - abort(); + abort (); #endif /* Don't let both operands fail to indicate the mode. */ @@ -3033,6 +3570,8 @@ prepare_cmp_insn (px, py, pcomparison, size, pmode, punsignedp, align, { rtx result; enum machine_mode result_mode; + rtx opalign ATTRIBUTE_UNUSED + = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT); emit_queue (); x = protect_from_queue (x, 0); @@ -3040,6 +3579,40 @@ prepare_cmp_insn (px, py, pcomparison, size, pmode, punsignedp, align, if (size == 0) abort (); +#ifdef HAVE_cmpmemqi + if (HAVE_cmpmemqi + && GET_CODE (size) == CONST_INT + && INTVAL (size) < (1 << GET_MODE_BITSIZE (QImode))) + { + result_mode = insn_data[(int) CODE_FOR_cmpmemqi].operand[0].mode; + result = gen_reg_rtx (result_mode); + emit_insn (gen_cmpmemqi (result, x, y, size, opalign)); + } + else +#endif +#ifdef HAVE_cmpmemhi + if (HAVE_cmpmemhi + && GET_CODE (size) == CONST_INT + && INTVAL (size) < (1 << GET_MODE_BITSIZE (HImode))) + { + result_mode = insn_data[(int) CODE_FOR_cmpmemhi].operand[0].mode; + result = gen_reg_rtx (result_mode); + emit_insn (gen_cmpmemhi (result, x, y, size, opalign)); + } + else +#endif +#ifdef HAVE_cmpmemsi + if (HAVE_cmpmemsi) + { + result_mode = insn_data[(int) CODE_FOR_cmpmemsi].operand[0].mode; + result = gen_reg_rtx (result_mode); + size = protect_from_queue (size, 0); + emit_insn (gen_cmpmemsi (result, x, y, + convert_to_mode (SImode, size, 1), + opalign)); + } + else +#endif #ifdef HAVE_cmpstrqi if (HAVE_cmpstrqi && GET_CODE (size) == CONST_INT @@ -3076,29 +3649,23 @@ prepare_cmp_insn (px, py, pcomparison, size, pmode, punsignedp, align, #endif { #ifdef TARGET_MEM_FUNCTIONS - emit_library_call (memcmp_libfunc, LCT_PURE_MAKE_BLOCK, - TYPE_MODE (integer_type_node), 3, - XEXP (x, 0), Pmode, XEXP (y, 0), Pmode, - convert_to_mode (TYPE_MODE (sizetype), size, - TREE_UNSIGNED (sizetype)), - TYPE_MODE (sizetype)); + result = emit_library_call_value (memcmp_libfunc, NULL_RTX, LCT_PURE_MAKE_BLOCK, + TYPE_MODE (integer_type_node), 3, + XEXP (x, 0), Pmode, XEXP (y, 0), Pmode, + convert_to_mode (TYPE_MODE (sizetype), size, + TREE_UNSIGNED (sizetype)), + TYPE_MODE (sizetype)); #else - emit_library_call (bcmp_libfunc, LCT_PURE_MAKE_BLOCK, - TYPE_MODE (integer_type_node), 3, - XEXP (x, 0), Pmode, XEXP (y, 0), Pmode, - convert_to_mode (TYPE_MODE (integer_type_node), - size, - TREE_UNSIGNED (integer_type_node)), - TYPE_MODE (integer_type_node)); + result = emit_library_call_value (bcmp_libfunc, NULL_RTX, LCT_PURE_MAKE_BLOCK, + TYPE_MODE (integer_type_node), 3, + XEXP (x, 0), Pmode, XEXP (y, 0), Pmode, + convert_to_mode (TYPE_MODE (integer_type_node), + size, + TREE_UNSIGNED (integer_type_node)), + TYPE_MODE (integer_type_node)); #endif - /* Immediately move the result of the libcall into a pseudo - register so reload doesn't clobber the value if it needs - the return register for a spill reg. */ - result = gen_reg_rtx (TYPE_MODE (integer_type_node)); result_mode = TYPE_MODE (integer_type_node); - emit_move_insn (result, - hard_libcall_value (result_mode)); } *px = result; *py = const0_rtx; @@ -3123,14 +3690,8 @@ prepare_cmp_insn (px, py, pcomparison, size, pmode, punsignedp, align, if (unsignedp && ucmp_optab->handlers[(int) mode].libfunc) libfunc = ucmp_optab->handlers[(int) mode].libfunc; - emit_library_call (libfunc, 1, - word_mode, 2, x, mode, y, mode); - - /* Immediately move the result of the libcall into a pseudo - register so reload doesn't clobber the value if it needs - the return register for a spill reg. */ - result = gen_reg_rtx (word_mode); - emit_move_insn (result, hard_libcall_value (word_mode)); + result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST_MAKE_BLOCK, + word_mode, 2, x, mode, y, mode); /* Integer comparison returns a result that must be compared against 1, so that even if we do an unsigned compare afterward, @@ -3150,16 +3711,12 @@ prepare_cmp_insn (px, py, pcomparison, size, pmode, punsignedp, align, /* Before emitting an insn with code ICODE, make sure that X, which is going to be used for operand OPNUM of the insn, is converted from mode MODE to - WIDER_MODE (UNSIGNEDP determines whether it is a unsigned conversion), and + WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and that it is accepted by the operand predicate. Return the new value. */ rtx -prepare_operand (icode, x, opnum, mode, wider_mode, unsignedp) - int icode; - rtx x; - int opnum; - enum machine_mode mode, wider_mode; - int unsignedp; +prepare_operand (int icode, rtx x, int opnum, enum machine_mode mode, + enum machine_mode wider_mode, int unsignedp) { x = protect_from_queue (x, 0); @@ -3168,7 +3725,12 @@ prepare_operand (icode, x, opnum, mode, wider_mode, unsignedp) if (! (*insn_data[icode].operand[opnum].predicate) (x, insn_data[icode].operand[opnum].mode)) - x = copy_to_mode_reg (insn_data[icode].operand[opnum].mode, x); + { + if (no_new_pseudos) + return NULL_RTX; + x = copy_to_mode_reg (insn_data[icode].operand[opnum].mode, x); + } + return x; } @@ -3178,12 +3740,8 @@ prepare_operand (icode, x, opnum, mode, wider_mode, unsignedp) be NULL_RTX which indicates that only a comparison is to be generated. */ static void -emit_cmp_and_jump_insn_1 (x, y, mode, comparison, unsignedp, label) - rtx x, y; - enum machine_mode mode; - enum rtx_code comparison; - int unsignedp; - rtx label; +emit_cmp_and_jump_insn_1 (rtx x, rtx y, enum machine_mode mode, + enum rtx_code comparison, int unsignedp, rtx label) { rtx test = gen_rtx_fmt_ee (comparison, mode, x, y); enum mode_class class = GET_MODE_CLASS (mode); @@ -3196,9 +3754,9 @@ emit_cmp_and_jump_insn_1 (x, y, mode, comparison, unsignedp, label) PUT_MODE (test, wider_mode); if (label) - { - icode = cbranch_optab->handlers[(int)wider_mode].insn_code; - + { + icode = cbranch_optab->handlers[(int) wider_mode].insn_code; + if (icode != CODE_FOR_nothing && (*insn_data[icode].operand[0].predicate) (test, wider_mode)) { @@ -3238,7 +3796,8 @@ emit_cmp_and_jump_insn_1 (x, y, mode, comparison, unsignedp, label) break; wider_mode = GET_MODE_WIDER_MODE (wider_mode); - } while (wider_mode != VOIDmode); + } + while (wider_mode != VOIDmode); abort (); } @@ -3252,8 +3811,7 @@ emit_cmp_and_jump_insn_1 (x, y, mode, comparison, unsignedp, label) need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select the proper branch condition code. - If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y, - and ALIGN specifies the known shared alignment of X and Y. + If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y. MODE is the mode of the inputs (in case they are const_int). @@ -3262,31 +3820,22 @@ emit_cmp_and_jump_insn_1 (x, y, mode, comparison, unsignedp, label) unsigned variant based on UNSIGNEDP to select a proper jump instruction. */ void -emit_cmp_and_jump_insns (x, y, comparison, size, mode, unsignedp, align, label) - rtx x, y; - enum rtx_code comparison; - rtx size; - enum machine_mode mode; - int unsignedp; - unsigned int align; - rtx label; +emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size, + enum machine_mode mode, int unsignedp, rtx label) { - rtx op0; - rtx op1; - - if ((CONSTANT_P (x) && ! CONSTANT_P (y)) - || (GET_CODE (x) == CONST_INT && GET_CODE (y) != CONST_INT)) + rtx op0 = x, op1 = y; + + /* Swap operands and condition to ensure canonical RTL. */ + if (swap_commutative_operands_p (x, y)) { - /* Swap operands and condition to ensure canonical RTL. */ - op0 = y; - op1 = x; + /* If we're not emitting a branch, this means some caller + is out of sync. */ + if (! label) + abort (); + + op0 = y, op1 = x; comparison = swap_condition (comparison); } - else - { - op0 = x; - op1 = y; - } #ifdef HAVE_cc0 /* If OP0 is still a constant, then both X and Y must be constants. Force @@ -3299,7 +3848,8 @@ emit_cmp_and_jump_insns (x, y, comparison, size, mode, unsignedp, align, label) emit_queue (); if (unsignedp) comparison = unsigned_condition (comparison); - prepare_cmp_insn (&op0, &op1, &comparison, size, &mode, &unsignedp, align, + + prepare_cmp_insn (&op0, &op1, &comparison, size, &mode, &unsignedp, ccp_jump); emit_cmp_and_jump_insn_1 (op0, op1, mode, comparison, unsignedp, label); } @@ -3307,28 +3857,21 @@ emit_cmp_and_jump_insns (x, y, comparison, size, mode, unsignedp, align, label) /* Like emit_cmp_and_jump_insns, but generate only the comparison. */ void -emit_cmp_insn (x, y, comparison, size, mode, unsignedp, align) - rtx x, y; - enum rtx_code comparison; - rtx size; - enum machine_mode mode; - int unsignedp; - unsigned int align; +emit_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size, + enum machine_mode mode, int unsignedp) { - emit_cmp_and_jump_insns (x, y, comparison, size, mode, unsignedp, align, 0); + emit_cmp_and_jump_insns (x, y, comparison, size, mode, unsignedp, 0); } /* Emit a library call comparison between floating point X and Y. COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */ static void -prepare_float_lib_cmp (px, py, pcomparison, pmode, punsignedp) - rtx *px, *py; - enum rtx_code *pcomparison; - enum machine_mode *pmode; - int *punsignedp; +prepare_float_lib_cmp (rtx *px, rtx *py, enum rtx_code *pcomparison, + enum machine_mode *pmode, int *punsignedp) { enum rtx_code comparison = *pcomparison; + rtx tmp; rtx x = *px = protect_from_queue (*px, 0); rtx y = *py = protect_from_queue (*py, 0); enum machine_mode mode = GET_MODE (x); @@ -3348,18 +3891,42 @@ prepare_float_lib_cmp (px, py, pcomparison, pmode, punsignedp) case GT: libfunc = gthf2_libfunc; + if (libfunc == NULL_RTX) + { + tmp = x; x = y; y = tmp; + *pcomparison = LT; + libfunc = lthf2_libfunc; + } break; case GE: libfunc = gehf2_libfunc; + if (libfunc == NULL_RTX) + { + tmp = x; x = y; y = tmp; + *pcomparison = LE; + libfunc = lehf2_libfunc; + } break; case LT: libfunc = lthf2_libfunc; + if (libfunc == NULL_RTX) + { + tmp = x; x = y; y = tmp; + *pcomparison = GT; + libfunc = gthf2_libfunc; + } break; case LE: libfunc = lehf2_libfunc; + if (libfunc == NULL_RTX) + { + tmp = x; x = y; y = tmp; + *pcomparison = GE; + libfunc = gehf2_libfunc; + } break; case UNORDERED: @@ -3382,18 +3949,42 @@ prepare_float_lib_cmp (px, py, pcomparison, pmode, punsignedp) case GT: libfunc = gtsf2_libfunc; + if (libfunc == NULL_RTX) + { + tmp = x; x = y; y = tmp; + *pcomparison = LT; + libfunc = ltsf2_libfunc; + } break; case GE: libfunc = gesf2_libfunc; + if (libfunc == NULL_RTX) + { + tmp = x; x = y; y = tmp; + *pcomparison = LE; + libfunc = lesf2_libfunc; + } break; case LT: libfunc = ltsf2_libfunc; + if (libfunc == NULL_RTX) + { + tmp = x; x = y; y = tmp; + *pcomparison = GT; + libfunc = gtsf2_libfunc; + } break; case LE: libfunc = lesf2_libfunc; + if (libfunc == NULL_RTX) + { + tmp = x; x = y; y = tmp; + *pcomparison = GE; + libfunc = gesf2_libfunc; + } break; case UNORDERED: @@ -3416,18 +4007,42 @@ prepare_float_lib_cmp (px, py, pcomparison, pmode, punsignedp) case GT: libfunc = gtdf2_libfunc; + if (libfunc == NULL_RTX) + { + tmp = x; x = y; y = tmp; + *pcomparison = LT; + libfunc = ltdf2_libfunc; + } break; case GE: libfunc = gedf2_libfunc; + if (libfunc == NULL_RTX) + { + tmp = x; x = y; y = tmp; + *pcomparison = LE; + libfunc = ledf2_libfunc; + } break; case LT: libfunc = ltdf2_libfunc; + if (libfunc == NULL_RTX) + { + tmp = x; x = y; y = tmp; + *pcomparison = GT; + libfunc = gtdf2_libfunc; + } break; case LE: libfunc = ledf2_libfunc; + if (libfunc == NULL_RTX) + { + tmp = x; x = y; y = tmp; + *pcomparison = GE; + libfunc = gedf2_libfunc; + } break; case UNORDERED: @@ -3450,18 +4065,42 @@ prepare_float_lib_cmp (px, py, pcomparison, pmode, punsignedp) case GT: libfunc = gtxf2_libfunc; + if (libfunc == NULL_RTX) + { + tmp = x; x = y; y = tmp; + *pcomparison = LT; + libfunc = ltxf2_libfunc; + } break; case GE: libfunc = gexf2_libfunc; + if (libfunc == NULL_RTX) + { + tmp = x; x = y; y = tmp; + *pcomparison = LE; + libfunc = lexf2_libfunc; + } break; case LT: libfunc = ltxf2_libfunc; + if (libfunc == NULL_RTX) + { + tmp = x; x = y; y = tmp; + *pcomparison = GT; + libfunc = gtxf2_libfunc; + } break; case LE: libfunc = lexf2_libfunc; + if (libfunc == NULL_RTX) + { + tmp = x; x = y; y = tmp; + *pcomparison = GE; + libfunc = gexf2_libfunc; + } break; case UNORDERED: @@ -3484,18 +4123,42 @@ prepare_float_lib_cmp (px, py, pcomparison, pmode, punsignedp) case GT: libfunc = gttf2_libfunc; + if (libfunc == NULL_RTX) + { + tmp = x; x = y; y = tmp; + *pcomparison = LT; + libfunc = lttf2_libfunc; + } break; case GE: libfunc = getf2_libfunc; + if (libfunc == NULL_RTX) + { + tmp = x; x = y; y = tmp; + *pcomparison = LE; + libfunc = letf2_libfunc; + } break; case LT: libfunc = lttf2_libfunc; + if (libfunc == NULL_RTX) + { + tmp = x; x = y; y = tmp; + *pcomparison = GT; + libfunc = gttf2_libfunc; + } break; case LE: libfunc = letf2_libfunc; + if (libfunc == NULL_RTX) + { + tmp = x; x = y; y = tmp; + *pcomparison = GE; + libfunc = getf2_libfunc; + } break; case UNORDERED: @@ -3530,14 +4193,8 @@ prepare_float_lib_cmp (px, py, pcomparison, pmode, punsignedp) if (libfunc == 0) abort (); - emit_library_call (libfunc, LCT_CONST_MAKE_BLOCK, word_mode, 2, x, mode, y, - mode); - - /* Immediately move the result of the libcall into a pseudo - register so reload doesn't clobber the value if it needs - the return register for a spill reg. */ - result = gen_reg_rtx (word_mode); - emit_move_insn (result, hard_libcall_value (word_mode)); + result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST_MAKE_BLOCK, + word_mode, 2, x, mode, y, mode); *px = result; *py = const0_rtx; *pmode = word_mode; @@ -3553,10 +4210,9 @@ prepare_float_lib_cmp (px, py, pcomparison, pmode, punsignedp) /* Generate code to indirectly jump to a location given in the rtx LOC. */ void -emit_indirect_jump (loc) - rtx loc; +emit_indirect_jump (rtx loc) { - if (! ((*insn_data[(int)CODE_FOR_indirect_jump].operand[0].predicate) + if (! ((*insn_data[(int) CODE_FOR_indirect_jump].operand[0].predicate) (loc, Pmode))) loc = copy_to_mode_reg (Pmode, loc); @@ -3581,24 +4237,18 @@ emit_indirect_jump (loc) is not supported. */ rtx -emit_conditional_move (target, code, op0, op1, cmode, op2, op3, mode, - unsignedp) - rtx target; - enum rtx_code code; - rtx op0, op1; - enum machine_mode cmode; - rtx op2, op3; - enum machine_mode mode; - int unsignedp; +emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1, + enum machine_mode cmode, rtx op2, rtx op3, + enum machine_mode mode, int unsignedp) { rtx tem, subtarget, comparison, insn; enum insn_code icode; + enum rtx_code reversed; /* If one operand is constant, make it the second one. Only do this if the other operand is not constant as well. */ - if ((CONSTANT_P (op0) && ! CONSTANT_P (op1)) - || (GET_CODE (op0) == CONST_INT && GET_CODE (op1) != CONST_INT)) + if (swap_commutative_operands_p (op0, op1)) { tem = op0; op0 = op1; @@ -3609,24 +4259,22 @@ emit_conditional_move (target, code, op0, op1, cmode, op2, op3, mode, /* get_condition will prefer to generate LT and GT even if the old comparison was against zero, so undo that canonicalization here since comparisons against zero are cheaper. */ - if (code == LT && GET_CODE (op1) == CONST_INT && INTVAL (op1) == 1) + if (code == LT && op1 == const1_rtx) code = LE, op1 = const0_rtx; - else if (code == GT && GET_CODE (op1) == CONST_INT && INTVAL (op1) == -1) + else if (code == GT && op1 == constm1_rtx) code = GE, op1 = const0_rtx; if (cmode == VOIDmode) cmode = GET_MODE (op0); - if (((CONSTANT_P (op2) && ! CONSTANT_P (op3)) - || (GET_CODE (op2) == CONST_INT && GET_CODE (op3) != CONST_INT)) - && (GET_MODE_CLASS (GET_MODE (op1)) != MODE_FLOAT - || TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT - || flag_unsafe_math_optimizations)) + if (swap_commutative_operands_p (op2, op3) + && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL)) + != UNKNOWN)) { tem = op2; op2 = op3; op3 = tem; - code = reverse_condition (code); + code = reversed; } if (mode == VOIDmode) @@ -3672,8 +4320,8 @@ emit_conditional_move (target, code, op0, op1, cmode, op2, op3, mode, /* Everything should now be in the suitable form, so emit the compare insn and then the conditional move. */ - comparison - = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX, 0); + comparison + = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX); /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */ /* We can get const0_rtx or const_true_rtx in some circumstances. Just @@ -3681,7 +4329,7 @@ emit_conditional_move (target, code, op0, op1, cmode, op2, op3, mode, situation. */ if (GET_CODE (comparison) != code) return NULL_RTX; - + insn = GEN_FCN (icode) (subtarget, comparison, op2, op3); /* If that failed, then give up. */ @@ -3696,7 +4344,7 @@ emit_conditional_move (target, code, op0, op1, cmode, op2, op3, mode, return target; } -/* Return non-zero if a conditional move of mode MODE is supported. +/* Return nonzero if a conditional move of mode MODE is supported. This function is for combine so it can tell whether an insn that looks like a conditional move is actually supported by the hardware. If we @@ -3705,8 +4353,7 @@ emit_conditional_move (target, code, op0, op1, cmode, op2, op3, mode, comparisons, and vice versa. How do we handle them? */ int -can_conditionally_move_p (mode) - enum machine_mode mode; +can_conditionally_move_p (enum machine_mode mode) { if (movcc_gen_code[mode] != CODE_FOR_nothing) return 1; @@ -3715,9 +4362,132 @@ can_conditionally_move_p (mode) } #endif /* HAVE_conditional_move */ + +/* Emit a conditional addition instruction if the machine supports one for that + condition and machine mode. + + OP0 and OP1 are the operands that should be compared using CODE. CMODE is + the mode to use should they be constants. If it is VOIDmode, they cannot + both be constants. + + OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3 + should be stored there. MODE is the mode to use should they be constants. + If it is VOIDmode, they cannot both be constants. + + The result is either TARGET (perhaps modified) or NULL_RTX if the operation + is not supported. */ + +rtx +emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1, + enum machine_mode cmode, rtx op2, rtx op3, + enum machine_mode mode, int unsignedp) +{ + rtx tem, subtarget, comparison, insn; + enum insn_code icode; + enum rtx_code reversed; + + /* If one operand is constant, make it the second one. Only do this + if the other operand is not constant as well. */ + + if (swap_commutative_operands_p (op0, op1)) + { + tem = op0; + op0 = op1; + op1 = tem; + code = swap_condition (code); + } + + /* get_condition will prefer to generate LT and GT even if the old + comparison was against zero, so undo that canonicalization here since + comparisons against zero are cheaper. */ + if (code == LT && op1 == const1_rtx) + code = LE, op1 = const0_rtx; + else if (code == GT && op1 == constm1_rtx) + code = GE, op1 = const0_rtx; + + if (cmode == VOIDmode) + cmode = GET_MODE (op0); + + if (swap_commutative_operands_p (op2, op3) + && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL)) + != UNKNOWN)) + { + tem = op2; + op2 = op3; + op3 = tem; + code = reversed; + } + + if (mode == VOIDmode) + mode = GET_MODE (op2); + + icode = addcc_optab->handlers[(int) mode].insn_code; + + if (icode == CODE_FOR_nothing) + return 0; + + if (flag_force_mem) + { + op2 = force_not_mem (op2); + op3 = force_not_mem (op3); + } + + if (target) + target = protect_from_queue (target, 1); + else + target = gen_reg_rtx (mode); + + subtarget = target; + + emit_queue (); + + op2 = protect_from_queue (op2, 0); + op3 = protect_from_queue (op3, 0); + + /* If the insn doesn't accept these operands, put them in pseudos. */ + + if (! (*insn_data[icode].operand[0].predicate) + (subtarget, insn_data[icode].operand[0].mode)) + subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode); + + if (! (*insn_data[icode].operand[2].predicate) + (op2, insn_data[icode].operand[2].mode)) + op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2); + + if (! (*insn_data[icode].operand[3].predicate) + (op3, insn_data[icode].operand[3].mode)) + op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3); + + /* Everything should now be in the suitable form, so emit the compare insn + and then the conditional move. */ + + comparison + = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX); + + /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */ + /* We can get const0_rtx or const_true_rtx in some circumstances. Just + return NULL and let the caller figure out how best to deal with this + situation. */ + if (GET_CODE (comparison) != code) + return NULL_RTX; + + insn = GEN_FCN (icode) (subtarget, comparison, op2, op3); + + /* If that failed, then give up. */ + if (insn == 0) + return 0; + + emit_insn (insn); + + if (subtarget != target) + convert_move (target, subtarget, 0); + + return target; +} -/* These three functions generate an insn body and return it - rather than emitting the insn. +/* These functions attempt to generate an insn body, rather than + emitting the insn, but if the gen function already emits them, we + make no attempt to turn them back into naked patterns. They do not protect from queued increments, because they may be used 1) in protect_from_queue itself @@ -3726,10 +4496,9 @@ can_conditionally_move_p (mode) /* Generate and return an insn body to add Y to X. */ rtx -gen_add2_insn (x, y) - rtx x, y; +gen_add2_insn (rtx x, rtx y) { - int icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code; + int icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code; if (! ((*insn_data[icode].operand[0].predicate) (x, insn_data[icode].operand[0].mode)) @@ -3742,20 +4511,55 @@ gen_add2_insn (x, y) return (GEN_FCN (icode) (x, x, y)); } +/* Generate and return an insn body to add r1 and c, + storing the result in r0. */ +rtx +gen_add3_insn (rtx r0, rtx r1, rtx c) +{ + int icode = (int) add_optab->handlers[(int) GET_MODE (r0)].insn_code; + + if (icode == CODE_FOR_nothing + || ! ((*insn_data[icode].operand[0].predicate) + (r0, insn_data[icode].operand[0].mode)) + || ! ((*insn_data[icode].operand[1].predicate) + (r1, insn_data[icode].operand[1].mode)) + || ! ((*insn_data[icode].operand[2].predicate) + (c, insn_data[icode].operand[2].mode))) + return NULL_RTX; + + return (GEN_FCN (icode) (r0, r1, c)); +} + int -have_add2_insn (mode) - enum machine_mode mode; +have_add2_insn (rtx x, rtx y) { - return add_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing; + int icode; + + if (GET_MODE (x) == VOIDmode) + abort (); + + icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code; + + if (icode == CODE_FOR_nothing) + return 0; + + if (! ((*insn_data[icode].operand[0].predicate) + (x, insn_data[icode].operand[0].mode)) + || ! ((*insn_data[icode].operand[1].predicate) + (x, insn_data[icode].operand[1].mode)) + || ! ((*insn_data[icode].operand[2].predicate) + (y, insn_data[icode].operand[2].mode))) + return 0; + + return 1; } /* Generate and return an insn body to subtract Y from X. */ rtx -gen_sub2_insn (x, y) - rtx x, y; +gen_sub2_insn (rtx x, rtx y) { - int icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code; + int icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code; if (! ((*insn_data[icode].operand[0].predicate) (x, insn_data[icode].operand[0].mode)) @@ -3768,88 +4572,60 @@ gen_sub2_insn (x, y) return (GEN_FCN (icode) (x, x, y)); } -int -have_sub2_insn (mode) - enum machine_mode mode; -{ - return sub_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing; -} - -/* Generate the body of an instruction to copy Y into X. - It may be a SEQUENCE, if one insn isn't enough. */ - +/* Generate and return an insn body to subtract r1 and c, + storing the result in r0. */ rtx -gen_move_insn (x, y) - rtx x, y; +gen_sub3_insn (rtx r0, rtx r1, rtx c) { - register enum machine_mode mode = GET_MODE (x); - enum insn_code insn_code; - rtx seq; + int icode = (int) sub_optab->handlers[(int) GET_MODE (r0)].insn_code; - if (mode == VOIDmode) - mode = GET_MODE (y); + if (icode == CODE_FOR_nothing + || ! ((*insn_data[icode].operand[0].predicate) + (r0, insn_data[icode].operand[0].mode)) + || ! ((*insn_data[icode].operand[1].predicate) + (r1, insn_data[icode].operand[1].mode)) + || ! ((*insn_data[icode].operand[2].predicate) + (c, insn_data[icode].operand[2].mode))) + return NULL_RTX; - insn_code = mov_optab->handlers[(int) mode].insn_code; + return (GEN_FCN (icode) (r0, r1, c)); +} - /* Handle MODE_CC modes: If we don't have a special move insn for this mode, - find a mode to do it in. If we have a movcc, use it. Otherwise, - find the MODE_INT mode of the same width. */ +int +have_sub2_insn (rtx x, rtx y) +{ + int icode; - if (GET_MODE_CLASS (mode) == MODE_CC && insn_code == CODE_FOR_nothing) - { - enum machine_mode tmode = VOIDmode; - rtx x1 = x, y1 = y; + if (GET_MODE (x) == VOIDmode) + abort (); - if (mode != CCmode - && mov_optab->handlers[(int) CCmode].insn_code != CODE_FOR_nothing) - tmode = CCmode; - else - for (tmode = QImode; tmode != VOIDmode; - tmode = GET_MODE_WIDER_MODE (tmode)) - if (GET_MODE_SIZE (tmode) == GET_MODE_SIZE (mode)) - break; + icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code; - if (tmode == VOIDmode) - abort (); + if (icode == CODE_FOR_nothing) + return 0; - /* Get X and Y in TMODE. We can't use gen_lowpart here because it - may call change_address which is not appropriate if we were - called when a reload was in progress. We don't have to worry - about changing the address since the size in bytes is supposed to - be the same. Copy the MEM to change the mode and move any - substitutions from the old MEM to the new one. */ + if (! ((*insn_data[icode].operand[0].predicate) + (x, insn_data[icode].operand[0].mode)) + || ! ((*insn_data[icode].operand[1].predicate) + (x, insn_data[icode].operand[1].mode)) + || ! ((*insn_data[icode].operand[2].predicate) + (y, insn_data[icode].operand[2].mode))) + return 0; - if (reload_in_progress) - { - x = gen_lowpart_common (tmode, x1); - if (x == 0 && GET_CODE (x1) == MEM) - { - x = gen_rtx_MEM (tmode, XEXP (x1, 0)); - MEM_COPY_ATTRIBUTES (x, x1); - copy_replacements (x1, x); - } + return 1; +} - y = gen_lowpart_common (tmode, y1); - if (y == 0 && GET_CODE (y1) == MEM) - { - y = gen_rtx_MEM (tmode, XEXP (y1, 0)); - MEM_COPY_ATTRIBUTES (y, y1); - copy_replacements (y1, y); - } - } - else - { - x = gen_lowpart (tmode, x); - y = gen_lowpart (tmode, y); - } - - insn_code = mov_optab->handlers[(int) tmode].insn_code; - return (GEN_FCN (insn_code) (x, y)); - } +/* Generate the body of an instruction to copy Y into X. + It may be a list of insns, if one insn isn't enough. */ + +rtx +gen_move_insn (rtx x, rtx y) +{ + rtx seq; start_sequence (); emit_move_insn_1 (x, y); - seq = gen_sequence (); + seq = get_insns (); end_sequence (); return seq; } @@ -3859,21 +4635,23 @@ gen_move_insn (x, y) no such operation exists, CODE_FOR_nothing will be returned. */ enum insn_code -can_extend_p (to_mode, from_mode, unsignedp) - enum machine_mode to_mode, from_mode; - int unsignedp; +can_extend_p (enum machine_mode to_mode, enum machine_mode from_mode, + int unsignedp) { - return extendtab[(int) to_mode][(int) from_mode][unsignedp != 0]; +#ifdef HAVE_ptr_extend + if (unsignedp < 0) + return CODE_FOR_ptr_extend; + else +#endif + return extendtab[(int) to_mode][(int) from_mode][unsignedp != 0]; } /* Generate the body of an insn to extend Y (with mode MFROM) into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */ rtx -gen_extend_insn (x, y, mto, mfrom, unsignedp) - rtx x, y; - enum machine_mode mto, mfrom; - int unsignedp; +gen_extend_insn (rtx x, rtx y, enum machine_mode mto, + enum machine_mode mfrom, int unsignedp) { return (GEN_FCN (extendtab[(int) mto][(int) mfrom][unsignedp != 0]) (x, y)); } @@ -3888,10 +4666,8 @@ gen_extend_insn (x, y, mto, mfrom, unsignedp) an explicit FTRUNC insn before the fix insn; otherwise 0. */ static enum insn_code -can_fix_p (fixmode, fltmode, unsignedp, truncp_ptr) - enum machine_mode fltmode, fixmode; - int unsignedp; - int *truncp_ptr; +can_fix_p (enum machine_mode fixmode, enum machine_mode fltmode, + int unsignedp, int *truncp_ptr) { *truncp_ptr = 0; if (fixtrunctab[(int) fltmode][(int) fixmode][unsignedp != 0] @@ -3907,9 +4683,8 @@ can_fix_p (fixmode, fltmode, unsignedp, truncp_ptr) } static enum insn_code -can_float_p (fltmode, fixmode, unsignedp) - enum machine_mode fixmode, fltmode; - int unsignedp; +can_float_p (enum machine_mode fltmode, enum machine_mode fixmode, + int unsignedp) { return floattab[(int) fltmode][(int) fixmode][unsignedp != 0]; } @@ -3921,12 +4696,10 @@ can_float_p (fltmode, fixmode, unsignedp) if it is negative. */ void -expand_float (to, from, unsignedp) - rtx to, from; - int unsignedp; +expand_float (rtx to, rtx from, int unsignedp) { enum insn_code icode; - register rtx target = to; + rtx target = to; enum machine_mode fmode, imode; /* Crash now, because we won't be able to decide which mode to use. */ @@ -3938,10 +4711,10 @@ expand_float (to, from, unsignedp) wider mode. If the integer mode is wider than the mode of FROM, we can do the conversion signed even if the input is unsigned. */ - for (imode = GET_MODE (from); imode != VOIDmode; - imode = GET_MODE_WIDER_MODE (imode)) - for (fmode = GET_MODE (to); fmode != VOIDmode; - fmode = GET_MODE_WIDER_MODE (fmode)) + for (fmode = GET_MODE (to); fmode != VOIDmode; + fmode = GET_MODE_WIDER_MODE (fmode)) + for (imode = GET_MODE (from); imode != VOIDmode; + imode = GET_MODE_WIDER_MODE (imode)) { int doing_unsigned = unsignedp; @@ -3971,9 +4744,7 @@ expand_float (to, from, unsignedp) convert_move (to, target, 0); return; } - } - -#if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC) + } /* Unsigned integer, and no way to convert directly. Convert as signed, then conditionally adjust the result. */ @@ -4013,7 +4784,7 @@ expand_float (to, from, unsignedp) rtx temp1; rtx neglabel = gen_label_rtx (); - /* Don't use TARGET if it isn't a register, is a hard register, + /* Don't use TARGET if it isn't a register, is a hard register, or is the wrong mode. */ if (GET_CODE (target) != REG || REGNO (target) < FIRST_PSEUDO_REGISTER @@ -4025,7 +4796,7 @@ expand_float (to, from, unsignedp) /* Test whether the sign bit is set. */ emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode, - 0, 0, neglabel); + 0, neglabel); /* The sign bit is not set. Convert as signed. */ expand_float (target, from, 0); @@ -4042,13 +4813,13 @@ expand_float (to, from, unsignedp) NULL_RTX, 1, OPTAB_LIB_WIDEN); temp1 = expand_shift (RSHIFT_EXPR, imode, from, integer_one_node, NULL_RTX, 1); - temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1, + temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1, OPTAB_LIB_WIDEN); expand_float (target, temp, 0); /* Multiply by 2 to undo the shift above. */ temp = expand_binop (fmode, add_optab, target, target, - target, 0, OPTAB_LIB_WIDEN); + target, 0, OPTAB_LIB_WIDEN); if (temp != target) emit_move_insn (target, temp); @@ -4073,12 +4844,10 @@ expand_float (to, from, unsignedp) do_pending_stack_adjust (); emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from), - 0, 0, label); + 0, label); + - /* On SCO 3.2.1, ldexp rejects values outside [0.5, 1). - Rather than setting up a dconst_dot_5, let's hope SCO - fixes the bug. */ - offset = REAL_VALUE_LDEXP (dconst1, GET_MODE_BITSIZE (GET_MODE (from))); + real_2expN (&offset, GET_MODE_BITSIZE (GET_MODE (from))); temp = expand_binop (fmode, add_optab, target, CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode), target, 0, OPTAB_LIB_WIDEN); @@ -4089,7 +4858,6 @@ expand_float (to, from, unsignedp) emit_label (label); goto done; } -#endif /* No hardware instruction available; call a library routine to convert from SImode, DImode, or TImode into SFmode, DFmode, XFmode, or TFmode. */ @@ -4184,20 +4952,17 @@ expand_float (to, from, unsignedp) and store in TO. FROM must be floating point. */ static rtx -ftruncify (x) - rtx x; +ftruncify (rtx x) { rtx temp = gen_reg_rtx (GET_MODE (x)); return expand_unop (GET_MODE (x), ftrunc_optab, x, temp, 0); } void -expand_fix (to, from, unsignedp) - register rtx to, from; - int unsignedp; +expand_fix (rtx to, rtx from, int unsignedp) { enum insn_code icode; - register rtx target = to; + rtx target = to; enum machine_mode fmode, imode; int must_trunc = 0; rtx libfcn = 0; @@ -4207,10 +4972,10 @@ expand_fix (to, from, unsignedp) this conversion. If the integer mode is wider than the mode of TO, we can do the conversion either signed or unsigned. */ - for (imode = GET_MODE (to); imode != VOIDmode; - imode = GET_MODE_WIDER_MODE (imode)) - for (fmode = GET_MODE (from); fmode != VOIDmode; - fmode = GET_MODE_WIDER_MODE (fmode)) + for (fmode = GET_MODE (from); fmode != VOIDmode; + fmode = GET_MODE_WIDER_MODE (fmode)) + for (imode = GET_MODE (to); imode != VOIDmode; + imode = GET_MODE_WIDER_MODE (imode)) { int doing_unsigned = unsignedp; @@ -4240,7 +5005,6 @@ expand_fix (to, from, unsignedp) } } -#if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC) /* For an unsigned conversion, there is one more way to do it. If we have a signed conversion, we generate code that compares the real value to the largest representable positive number. If if @@ -4248,22 +5012,33 @@ expand_fix (to, from, unsignedp) one plus the highest signed number, convert, and add it back. We only need to check all real modes, since we know we didn't find - anything with a wider integer mode. */ + anything with a wider integer mode. + + This code used to extend FP value into mode wider than the destination. + This is not needed. Consider, for instance conversion from SFmode + into DImode. + + The hot path trought the code is dealing with inputs smaller than 2^63 + and doing just the conversion, so there is no bits to lose. + + In the other path we know the value is positive in the range 2^63..2^64-1 + inclusive. (as for other imput overflow happens and result is undefined) + So we know that the most important bit set in mantissa corresponds to + 2^63. The subtraction of 2^63 should not generate any rounding as it + simply clears out that bit. The rest is trivial. */ if (unsignedp && GET_MODE_BITSIZE (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT) for (fmode = GET_MODE (from); fmode != VOIDmode; fmode = GET_MODE_WIDER_MODE (fmode)) - /* Make sure we won't lose significant bits doing this. */ - if (GET_MODE_BITSIZE (fmode) > GET_MODE_BITSIZE (GET_MODE (to)) - && CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0, - &must_trunc)) + if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0, + &must_trunc)) { int bitsize; REAL_VALUE_TYPE offset; rtx limit, lab1, lab2, insn; bitsize = GET_MODE_BITSIZE (GET_MODE (to)); - offset = REAL_VALUE_LDEXP (dconst1, bitsize - 1); + real_2expN (&offset, bitsize - 1); limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode); lab1 = gen_label_rtx (); lab2 = gen_label_rtx (); @@ -4281,7 +5056,7 @@ expand_fix (to, from, unsignedp) /* See if we need to do the subtraction. */ do_pending_stack_adjust (); emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from), - 0, 0, lab1); + 0, lab1); /* If not, do the signed "fix" and branch around fixup code. */ expand_fix (to, from, 0); @@ -4296,7 +5071,9 @@ expand_fix (to, from, unsignedp) NULL_RTX, 0, OPTAB_LIB_WIDEN); expand_fix (to, target, 0); target = expand_binop (GET_MODE (to), xor_optab, to, - GEN_INT ((HOST_WIDE_INT) 1 << (bitsize - 1)), + gen_int_mode + ((HOST_WIDE_INT) 1 << (bitsize - 1), + GET_MODE (to)), to, 1, OPTAB_LIB_WIDEN); if (target != to) @@ -4318,7 +5095,6 @@ expand_fix (to, from, unsignedp) return; } -#endif /* We can't do it with an insn, so use a library call. But first ensure that the mode of TO is at least as wide as SImode, since those are the @@ -4400,7 +5176,7 @@ expand_fix (to, from, unsignedp) gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX, GET_MODE (to), from)); } - + if (target != to) { if (GET_MODE (to) == GET_MODE (target)) @@ -4410,22 +5186,49 @@ expand_fix (to, from, unsignedp) } } +/* Report whether we have an instruction to perform the operation + specified by CODE on operands of mode MODE. */ +int +have_insn_for (enum rtx_code code, enum machine_mode mode) +{ + return (code_to_optab[(int) code] != 0 + && (code_to_optab[(int) code]->handlers[(int) mode].insn_code + != CODE_FOR_nothing)); +} + +/* Create a blank optab. */ static optab -init_optab (code) - enum rtx_code code; +new_optab (void) { int i; - optab op = (optab) xmalloc (sizeof (struct optab)); - op->code = code; + optab op = ggc_alloc (sizeof (struct optab)); for (i = 0; i < NUM_MACHINE_MODES; i++) { op->handlers[i].insn_code = CODE_FOR_nothing; op->handlers[i].libfunc = 0; } - if (code != UNKNOWN) - code_to_optab[(int) code] = op; + return op; +} + +/* Same, but fill in its code as CODE, and write it into the + code_to_optab table. */ +static inline optab +init_optab (enum rtx_code code) +{ + optab op = new_optab (); + op->code = code; + code_to_optab[(int) code] = op; + return op; +} +/* Same, but fill in its code as CODE, and do _not_ write it into + the code_to_optab table. */ +static inline optab +init_optabv (enum rtx_code code) +{ + optab op = new_optab (); + op->code = code; return op; } @@ -4447,24 +5250,20 @@ init_optab (code) */ static void -init_libfuncs (optable, first_mode, last_mode, opname, suffix) - register optab optable; - register int first_mode; - register int last_mode; - register const char *opname; - register int suffix; +init_libfuncs (optab optable, int first_mode, int last_mode, + const char *opname, int suffix) { - register int mode; - register unsigned opname_len = strlen (opname); + int mode; + unsigned opname_len = strlen (opname); for (mode = first_mode; (int) mode <= (int) last_mode; mode = (enum machine_mode) ((int) mode + 1)) { - register const char *mname = GET_MODE_NAME(mode); - register unsigned mname_len = strlen (mname); - register char *libfunc_name = alloca (2 + opname_len + mname_len + 1 + 1); - register char *p; - register const char *q; + const char *mname = GET_MODE_NAME (mode); + unsigned mname_len = strlen (mname); + char *libfunc_name = alloca (2 + opname_len + mname_len + 1 + 1); + char *p; + const char *q; p = libfunc_name; *p++ = '_'; @@ -4477,8 +5276,7 @@ init_libfuncs (optable, first_mode, last_mode, opname, suffix) *p = '\0'; optable->handlers[(int) mode].libfunc - = gen_rtx_SYMBOL_REF (Pmode, ggc_alloc_string (libfunc_name, - p - libfunc_name)); + = init_one_libfunc (ggc_alloc_string (libfunc_name, p - libfunc_name)); } } @@ -4488,12 +5286,14 @@ init_libfuncs (optable, first_mode, last_mode, opname, suffix) routine. (See above). */ static void -init_integral_libfuncs (optable, opname, suffix) - register optab optable; - register const char *opname; - register int suffix; +init_integral_libfuncs (optab optable, const char *opname, int suffix) { - init_libfuncs (optable, SImode, TImode, opname, suffix); + int maxsize = 2*BITS_PER_WORD; + if (maxsize < LONG_LONG_TYPE_SIZE) + maxsize = LONG_LONG_TYPE_SIZE; + init_libfuncs (optable, word_mode, + mode_for_size (maxsize, MODE_INT, 0), + opname, suffix); } /* Initialize the libfunc fields of an entire group of entries in some @@ -4502,41 +5302,51 @@ init_integral_libfuncs (optable, opname, suffix) routine. (See above). */ static void -init_floating_libfuncs (optable, opname, suffix) - register optab optable; - register const char *opname; - register int suffix; +init_floating_libfuncs (optab optable, const char *opname, int suffix) { - init_libfuncs (optable, SFmode, TFmode, opname, suffix); + enum machine_mode fmode, dmode, lmode; + + fmode = float_type_node ? TYPE_MODE (float_type_node) : VOIDmode; + dmode = double_type_node ? TYPE_MODE (double_type_node) : VOIDmode; + lmode = long_double_type_node ? TYPE_MODE (long_double_type_node) : VOIDmode; + + if (fmode != VOIDmode) + init_libfuncs (optable, fmode, fmode, opname, suffix); + if (dmode != fmode && dmode != VOIDmode) + init_libfuncs (optable, dmode, dmode, opname, suffix); + if (lmode != dmode && lmode != VOIDmode) + init_libfuncs (optable, lmode, lmode, opname, suffix); } rtx -init_one_libfunc (name) - register const char *name; +init_one_libfunc (const char *name) { - name = ggc_strdup (name); + rtx symbol; - return gen_rtx_SYMBOL_REF (Pmode, name); -} + /* Create a FUNCTION_DECL that can be passed to + targetm.encode_section_info. */ + /* ??? We don't have any type information except for this is + a function. Pretend this is "int foo()". */ + tree decl = build_decl (FUNCTION_DECL, get_identifier (name), + build_function_type (integer_type_node, NULL_TREE)); + DECL_ARTIFICIAL (decl) = 1; + DECL_EXTERNAL (decl) = 1; + TREE_PUBLIC (decl) = 1; -/* Mark ARG (which is really an OPTAB *) for GC. */ + symbol = XEXP (DECL_RTL (decl), 0); -void -mark_optab (arg) - void *arg; -{ - optab o = *(optab *) arg; - int i; + /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with + are the flags assigned by targetm.encode_section_info. */ + SYMBOL_REF_DECL (symbol) = 0; - for (i = 0; i < NUM_MACHINE_MODES; ++i) - ggc_mark_rtx (o->handlers[i].libfunc); + return symbol; } /* Call this once to initialize the contents of the optabs appropriately for the current target machine. */ void -init_optabs () +init_optabs (void) { unsigned int i, j, k; @@ -4571,23 +5381,22 @@ init_optabs () #endif add_optab = init_optab (PLUS); - addv_optab = init_optab (PLUS); + addv_optab = init_optabv (PLUS); sub_optab = init_optab (MINUS); - subv_optab = init_optab (MINUS); + subv_optab = init_optabv (MINUS); smul_optab = init_optab (MULT); - smulv_optab = init_optab (MULT); + smulv_optab = init_optabv (MULT); smul_highpart_optab = init_optab (UNKNOWN); umul_highpart_optab = init_optab (UNKNOWN); smul_widen_optab = init_optab (UNKNOWN); umul_widen_optab = init_optab (UNKNOWN); sdiv_optab = init_optab (DIV); - sdivv_optab = init_optab (DIV); + sdivv_optab = init_optabv (DIV); sdivmod_optab = init_optab (UNKNOWN); udiv_optab = init_optab (UDIV); udivmod_optab = init_optab (UNKNOWN); smod_optab = init_optab (MOD); umod_optab = init_optab (UMOD); - flodiv_optab = init_optab (DIV); ftrunc_optab = init_optab (UNKNOWN); and_optab = init_optab (AND); ior_optab = init_optab (IOR); @@ -4601,24 +5410,45 @@ init_optabs () smax_optab = init_optab (SMAX); umin_optab = init_optab (UMIN); umax_optab = init_optab (UMAX); - mov_optab = init_optab (UNKNOWN); - movstrict_optab = init_optab (UNKNOWN); - cmp_optab = init_optab (UNKNOWN); + pow_optab = init_optab (UNKNOWN); + atan2_optab = init_optab (UNKNOWN); + + /* These three have codes assigned exclusively for the sake of + have_insn_for. */ + mov_optab = init_optab (SET); + movstrict_optab = init_optab (STRICT_LOW_PART); + cmp_optab = init_optab (COMPARE); + ucmp_optab = init_optab (UNKNOWN); tst_optab = init_optab (UNKNOWN); neg_optab = init_optab (NEG); - negv_optab = init_optab (NEG); + negv_optab = init_optabv (NEG); abs_optab = init_optab (ABS); - absv_optab = init_optab (ABS); + absv_optab = init_optabv (ABS); + addcc_optab = init_optab (UNKNOWN); one_cmpl_optab = init_optab (NOT); ffs_optab = init_optab (FFS); + clz_optab = init_optab (CLZ); + ctz_optab = init_optab (CTZ); + popcount_optab = init_optab (POPCOUNT); + parity_optab = init_optab (PARITY); sqrt_optab = init_optab (SQRT); + floor_optab = init_optab (UNKNOWN); + ceil_optab = init_optab (UNKNOWN); + round_optab = init_optab (UNKNOWN); + trunc_optab = init_optab (UNKNOWN); + nearbyint_optab = init_optab (UNKNOWN); sin_optab = init_optab (UNKNOWN); cos_optab = init_optab (UNKNOWN); + exp_optab = init_optab (UNKNOWN); + log_optab = init_optab (UNKNOWN); + tan_optab = init_optab (UNKNOWN); + atan_optab = init_optab (UNKNOWN); strlen_optab = init_optab (UNKNOWN); cbranch_optab = init_optab (UNKNOWN); cmov_optab = init_optab (UNKNOWN); cstore_optab = init_optab (UNKNOWN); + push_optab = init_optab (UNKNOWN); for (i = 0; i < NUM_MACHINE_MODES; i++) { @@ -4655,13 +5485,13 @@ init_optabs () init_integral_libfuncs (smulv_optab, "mulv", '3'); init_floating_libfuncs (smulv_optab, "mul", '3'); init_integral_libfuncs (sdiv_optab, "div", '3'); + init_floating_libfuncs (sdiv_optab, "div", '3'); init_integral_libfuncs (sdivv_optab, "divv", '3'); init_integral_libfuncs (udiv_optab, "udiv", '3'); init_integral_libfuncs (sdivmod_optab, "divmod", '4'); init_integral_libfuncs (udivmod_optab, "udivmod", '4'); init_integral_libfuncs (smod_optab, "mod", '3'); init_integral_libfuncs (umod_optab, "umod", '3'); - init_floating_libfuncs (flodiv_optab, "div", '3'); init_floating_libfuncs (ftrunc_optab, "ftrunc", '2'); init_integral_libfuncs (and_optab, "and", '3'); init_integral_libfuncs (ior_optab, "ior", '3'); @@ -4681,6 +5511,10 @@ init_optabs () init_floating_libfuncs (negv_optab, "neg", '2'); init_integral_libfuncs (one_cmpl_optab, "one_cmpl", '2'); init_integral_libfuncs (ffs_optab, "ffs", '2'); + init_integral_libfuncs (clz_optab, "clz", '2'); + init_integral_libfuncs (ctz_optab, "ctz", '2'); + init_integral_libfuncs (popcount_optab, "popcount", '2'); + init_integral_libfuncs (parity_optab, "parity", '2'); /* Comparison libcalls for integers MUST come in pairs, signed/unsigned. */ init_integral_libfuncs (cmp_optab, "cmp", '2'); @@ -4753,19 +5587,19 @@ init_optabs () truncxfdf2_libfunc = init_one_libfunc ("__truncxfdf2"); trunctfdf2_libfunc = init_one_libfunc ("__trunctfdf2"); + abort_libfunc = init_one_libfunc ("abort"); memcpy_libfunc = init_one_libfunc ("memcpy"); + memmove_libfunc = init_one_libfunc ("memmove"); bcopy_libfunc = init_one_libfunc ("bcopy"); memcmp_libfunc = init_one_libfunc ("memcmp"); bcmp_libfunc = init_one_libfunc ("__gcc_bcmp"); memset_libfunc = init_one_libfunc ("memset"); bzero_libfunc = init_one_libfunc ("bzero"); + setbits_libfunc = init_one_libfunc ("__setbits"); - throw_libfunc = init_one_libfunc ("__throw"); - rethrow_libfunc = init_one_libfunc ("__rethrow"); - sjthrow_libfunc = init_one_libfunc ("__sjthrow"); - sjpopnthrow_libfunc = init_one_libfunc ("__sjpopnthrow"); - terminate_libfunc = init_one_libfunc ("__terminate"); - eh_rtime_match_libfunc = init_one_libfunc ("__eh_rtime_match"); + unwind_resume_libfunc = init_one_libfunc (USING_SJLJ_EXCEPTIONS + ? "_Unwind_SjLj_Resume" + : "_Unwind_Resume"); #ifndef DONT_USE_BUILTIN_SETJMP setjmp_libfunc = init_one_libfunc ("__builtin_setjmp"); longjmp_libfunc = init_one_libfunc ("__builtin_longjmp"); @@ -4773,6 +5607,9 @@ init_optabs () setjmp_libfunc = init_one_libfunc ("setjmp"); longjmp_libfunc = init_one_libfunc ("longjmp"); #endif + unwind_sjlj_register_libfunc = init_one_libfunc ("_Unwind_SjLj_Register"); + unwind_sjlj_unregister_libfunc + = init_one_libfunc ("_Unwind_SjLj_Unregister"); eqhf2_libfunc = init_one_libfunc ("__eqhf2"); nehf2_libfunc = init_one_libfunc ("__nehf2"); @@ -4862,99 +5699,65 @@ init_optabs () fixunstfdi_libfunc = init_one_libfunc ("__fixunstfdi"); fixunstfti_libfunc = init_one_libfunc ("__fixunstfti"); - /* For check-memory-usage. */ - chkr_check_addr_libfunc = init_one_libfunc ("chkr_check_addr"); - chkr_set_right_libfunc = init_one_libfunc ("chkr_set_right"); - chkr_copy_bitmap_libfunc = init_one_libfunc ("chkr_copy_bitmap"); - chkr_check_exec_libfunc = init_one_libfunc ("chkr_check_exec"); - chkr_check_str_libfunc = init_one_libfunc ("chkr_check_str"); - /* For function entry/exit instrumentation. */ profile_function_entry_libfunc = init_one_libfunc ("__cyg_profile_func_enter"); profile_function_exit_libfunc = init_one_libfunc ("__cyg_profile_func_exit"); -#ifdef HAVE_conditional_trap - init_traps (); -#endif + gcov_flush_libfunc = init_one_libfunc ("__gcov_flush"); + gcov_init_libfunc = init_one_libfunc ("__gcov_init"); + + if (HAVE_conditional_trap) + trap_rtx = gen_rtx_fmt_ee (EQ, VOIDmode, NULL_RTX, NULL_RTX); #ifdef INIT_TARGET_OPTABS /* Allow the target to add more libcalls or rename some, etc. */ INIT_TARGET_OPTABS; #endif - - /* Add these GC roots. */ - ggc_add_root (optab_table, OTI_MAX, sizeof(optab), mark_optab); - ggc_add_rtx_root (libfunc_table, LTI_MAX); -} - -#ifdef BROKEN_LDEXP - -/* SCO 3.2 apparently has a broken ldexp. */ - -double -ldexp(x,n) - double x; - int n; -{ - if (n > 0) - while (n--) - x *= 2; - - return x; } -#endif /* BROKEN_LDEXP */ -#ifdef HAVE_conditional_trap -/* The insn generating function can not take an rtx_code argument. - TRAP_RTX is used as an rtx argument. Its code is replaced with - the code to be used in the trap insn and all other fields are - ignored. */ -static rtx trap_rtx; - -static void -init_traps () -{ - if (HAVE_conditional_trap) - { - trap_rtx = gen_rtx_fmt_ee (EQ, VOIDmode, NULL_RTX, NULL_RTX); - ggc_add_rtx_root (&trap_rtx, 1); - } -} -#endif - /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition CODE. Return 0 on failure. */ rtx -gen_cond_trap (code, op1, op2, tcode) - enum rtx_code code ATTRIBUTE_UNUSED; - rtx op1, op2 ATTRIBUTE_UNUSED, tcode ATTRIBUTE_UNUSED; +gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED, rtx op1, + rtx op2 ATTRIBUTE_UNUSED, rtx tcode ATTRIBUTE_UNUSED) { enum machine_mode mode = GET_MODE (op1); + enum insn_code icode; + rtx insn; + + if (!HAVE_conditional_trap) + return 0; if (mode == VOIDmode) return 0; -#ifdef HAVE_conditional_trap - if (HAVE_conditional_trap - && cmp_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing) + icode = cmp_optab->handlers[(int) mode].insn_code; + if (icode == CODE_FOR_nothing) + return 0; + + start_sequence (); + op1 = prepare_operand (icode, op1, 0, mode, mode, 0); + op2 = prepare_operand (icode, op2, 1, mode, mode, 0); + if (!op1 || !op2) { - rtx insn; - start_sequence(); - emit_insn (GEN_FCN (cmp_optab->handlers[(int) mode].insn_code) (op1, op2)); - PUT_CODE (trap_rtx, code); - insn = gen_conditional_trap (trap_rtx, tcode); - if (insn) - { - emit_insn (insn); - insn = gen_sequence (); - } - end_sequence(); - return insn; + end_sequence (); + return 0; } -#endif + emit_insn (GEN_FCN (icode) (op1, op2)); - return 0; + PUT_CODE (trap_rtx, code); + insn = gen_conditional_trap (trap_rtx, tcode); + if (insn) + { + emit_insn (insn); + insn = get_insns (); + } + end_sequence (); + + return insn; } + +#include "gt-optabs.h"