X-Git-Url: http://git.sourceforge.jp/view?a=blobdiff_plain;f=gcc%2Fexpmed.c;h=27ff0c063cf0b605b3051fd0145f147ac24dbd79;hb=052f0ad8a01ea8bb07d283f6f11d8e56e677dad2;hp=ff8c278ced3537ad0a3f30097bd3feb2e713fe0b;hpb=0663398065295b8b03266c3f4a1df130d6bdf712;p=pf3gnuchains%2Fgcc-fork.git diff --git a/gcc/expmed.c b/gcc/expmed.c index ff8c278ced3..27ff0c063cf 100644 --- a/gcc/expmed.c +++ b/gcc/expmed.c @@ -1,13 +1,14 @@ /* Medium-level subroutines: convert bit-field store and extract and shifts, multiplies and divides to rtl instructions. Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, - 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc. + 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007 + Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free -Software Foundation; either version 2, or (at your option) any later +Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY @@ -16,9 +17,8 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License -along with GCC; see the file COPYING. If not, write to the Free -Software Foundation, 59 Temple Place - Suite 330, Boston, MA -02111-1307, USA. */ +along with GCC; see the file COPYING3. If not see +. */ #include "config.h" @@ -36,6 +36,8 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA #include "real.h" #include "recog.h" #include "langhooks.h" +#include "df.h" +#include "target.h" static void store_fixed_bit_field (rtx, unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT, @@ -103,7 +105,8 @@ static int shift_cost[NUM_MACHINE_MODES][MAX_BITS_PER_WORD]; static int shiftadd_cost[NUM_MACHINE_MODES][MAX_BITS_PER_WORD]; static int shiftsub_cost[NUM_MACHINE_MODES][MAX_BITS_PER_WORD]; static int mul_cost[NUM_MACHINE_MODES]; -static int div_cost[NUM_MACHINE_MODES]; +static int sdiv_cost[NUM_MACHINE_MODES]; +static int udiv_cost[NUM_MACHINE_MODES]; static int mul_widen_cost[NUM_MACHINE_MODES]; static int mul_highpart_cost[NUM_MACHINE_MODES]; @@ -115,11 +118,12 @@ init_expmed (void) struct rtx_def reg; rtunion reg_fld[2]; struct rtx_def plus; rtunion plus_fld1; struct rtx_def neg; - struct rtx_def udiv; rtunion udiv_fld1; struct rtx_def mult; rtunion mult_fld1; - struct rtx_def div; rtunion div_fld1; - struct rtx_def mod; rtunion mod_fld1; + struct rtx_def sdiv; rtunion sdiv_fld1; + struct rtx_def udiv; rtunion udiv_fld1; struct rtx_def zext; + struct rtx_def sdiv_32; rtunion sdiv_32_fld1; + struct rtx_def smod_32; rtunion smod_32_fld1; struct rtx_def wide_mult; rtunion wide_mult_fld1; struct rtx_def wide_lshr; rtunion wide_lshr_fld1; struct rtx_def wide_trunc; @@ -146,7 +150,7 @@ init_expmed (void) PUT_CODE (&all.reg, REG); /* Avoid using hard regs in ways which may be unsupported. */ - REGNO (&all.reg) = LAST_VIRTUAL_REGISTER + 1; + SET_REGNO (&all.reg, LAST_VIRTUAL_REGISTER + 1); PUT_CODE (&all.plus, PLUS); XEXP (&all.plus, 0) = &all.reg; @@ -155,21 +159,25 @@ init_expmed (void) PUT_CODE (&all.neg, NEG); XEXP (&all.neg, 0) = &all.reg; - PUT_CODE (&all.udiv, UDIV); - XEXP (&all.udiv, 0) = &all.reg; - XEXP (&all.udiv, 1) = &all.reg; - PUT_CODE (&all.mult, MULT); XEXP (&all.mult, 0) = &all.reg; XEXP (&all.mult, 1) = &all.reg; - PUT_CODE (&all.div, DIV); - XEXP (&all.div, 0) = &all.reg; - XEXP (&all.div, 1) = 32 < MAX_BITS_PER_WORD ? cint[32] : GEN_INT (32); + PUT_CODE (&all.sdiv, DIV); + XEXP (&all.sdiv, 0) = &all.reg; + XEXP (&all.sdiv, 1) = &all.reg; - PUT_CODE (&all.mod, MOD); - XEXP (&all.mod, 0) = &all.reg; - XEXP (&all.mod, 1) = XEXP (&all.div, 1); + PUT_CODE (&all.udiv, UDIV); + XEXP (&all.udiv, 0) = &all.reg; + XEXP (&all.udiv, 1) = &all.reg; + + PUT_CODE (&all.sdiv_32, DIV); + XEXP (&all.sdiv_32, 0) = &all.reg; + XEXP (&all.sdiv_32, 1) = 32 < MAX_BITS_PER_WORD ? cint[32] : GEN_INT (32); + + PUT_CODE (&all.smod_32, MOD); + XEXP (&all.smod_32, 0) = &all.reg; + XEXP (&all.smod_32, 1) = XEXP (&all.sdiv_32, 1); PUT_CODE (&all.zext, ZERO_EXTEND); XEXP (&all.zext, 0) = &all.reg; @@ -205,10 +213,11 @@ init_expmed (void) PUT_MODE (&all.reg, mode); PUT_MODE (&all.plus, mode); PUT_MODE (&all.neg, mode); - PUT_MODE (&all.udiv, mode); PUT_MODE (&all.mult, mode); - PUT_MODE (&all.div, mode); - PUT_MODE (&all.mod, mode); + PUT_MODE (&all.sdiv, mode); + PUT_MODE (&all.udiv, mode); + PUT_MODE (&all.sdiv_32, mode); + PUT_MODE (&all.smod_32, mode); PUT_MODE (&all.wide_trunc, mode); PUT_MODE (&all.shift, mode); PUT_MODE (&all.shift_mult, mode); @@ -217,11 +226,14 @@ init_expmed (void) add_cost[mode] = rtx_cost (&all.plus, SET); neg_cost[mode] = rtx_cost (&all.neg, SET); - div_cost[mode] = rtx_cost (&all.udiv, SET); mul_cost[mode] = rtx_cost (&all.mult, SET); + sdiv_cost[mode] = rtx_cost (&all.sdiv, SET); + udiv_cost[mode] = rtx_cost (&all.udiv, SET); - sdiv_pow2_cheap[mode] = (rtx_cost (&all.div, SET) <= 2 * add_cost[mode]); - smod_pow2_cheap[mode] = (rtx_cost (&all.mod, SET) <= 4 * add_cost[mode]); + sdiv_pow2_cheap[mode] = (rtx_cost (&all.sdiv_32, SET) + <= 2 * add_cost[mode]); + smod_pow2_cheap[mode] = (rtx_cost (&all.smod_32, SET) + <= 4 * add_cost[mode]); wider_mode = GET_MODE_WIDER_MODE (mode); if (wider_mode != VOIDmode) @@ -315,26 +327,33 @@ mode_for_extraction (enum extraction_pattern pattern, int opno) return data->operand[opno].mode; } - -/* Generate code to store value from rtx VALUE - into a bit-field within structure STR_RTX - containing BITSIZE bits starting at bit BITNUM. - FIELDMODE is the machine-mode of the FIELD_DECL node for this field. - ALIGN is the alignment that STR_RTX is known to have. - TOTAL_SIZE is the size of the structure in bytes, or -1 if varying. */ +/* Return true if X, of mode MODE, matches the predicate for operand + OPNO of instruction ICODE. Allow volatile memories, regardless of + the ambient volatile_ok setting. */ -/* ??? Note that there are two different ideas here for how - to determine the size to count bits within, for a register. - One is BITS_PER_WORD, and the other is the size of operand 3 - of the insv pattern. +static bool +check_predicate_volatile_ok (enum insn_code icode, int opno, + rtx x, enum machine_mode mode) +{ + bool save_volatile_ok, result; - If operand 3 of the insv pattern is VOIDmode, then we will use BITS_PER_WORD - else, we use the mode of operand 3. */ + save_volatile_ok = volatile_ok; + result = insn_data[(int) icode].operand[opno].predicate (x, mode); + volatile_ok = save_volatile_ok; + return result; +} + +/* A subroutine of store_bit_field, with the same arguments. Return true + if the operation could be implemented. -rtx -store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize, - unsigned HOST_WIDE_INT bitnum, enum machine_mode fieldmode, - rtx value) + If FALLBACK_P is true, fall back to store_fixed_bit_field if we have + no other way of implementing the operation. If FALLBACK_P is false, + return false instead. */ + +static bool +store_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize, + unsigned HOST_WIDE_INT bitnum, enum machine_mode fieldmode, + rtx value, bool fallback_p) { unsigned int unit = (MEM_P (str_rtx)) ? BITS_PER_UNIT : BITS_PER_WORD; @@ -352,7 +371,25 @@ store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize, meaningful at a much higher level; when structures are copied between memory and regs, the higher-numbered regs always get higher addresses. */ - bitnum += SUBREG_BYTE (op0) * BITS_PER_UNIT; + int inner_mode_size = GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0))); + int outer_mode_size = GET_MODE_SIZE (GET_MODE (op0)); + + byte_offset = 0; + + /* Paradoxical subregs need special handling on big endian machines. */ + if (SUBREG_BYTE (op0) == 0 && inner_mode_size < outer_mode_size) + { + int difference = inner_mode_size - outer_mode_size; + + if (WORDS_BIG_ENDIAN) + byte_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD; + if (BYTES_BIG_ENDIAN) + byte_offset += difference % UNITS_PER_WORD; + } + else + byte_offset = SUBREG_BYTE (op0); + + bitnum += byte_offset * BITS_PER_UNIT; op0 = SUBREG_REG (op0); } @@ -360,13 +397,13 @@ store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize, lies completely outside that register. This can occur if the source code contains an out-of-bounds access to a small array. */ if (REG_P (op0) && bitnum >= GET_MODE_BITSIZE (GET_MODE (op0))) - return value; + return true; /* Use vec_set patterns for inserting parts of vectors whenever available. */ if (VECTOR_MODE_P (GET_MODE (op0)) && !MEM_P (op0) - && (vec_set_optab->handlers[GET_MODE (op0)].insn_code + && (optab_handler (vec_set_optab, GET_MODE (op0))->insn_code != CODE_FOR_nothing) && fieldmode == GET_MODE_INNER (GET_MODE (op0)) && bitsize == GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0))) @@ -374,7 +411,7 @@ store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize, { enum machine_mode outermode = GET_MODE (op0); enum machine_mode innermode = GET_MODE_INNER (outermode); - int icode = (int) vec_set_optab->handlers[outermode].insn_code; + int icode = (int) optab_handler (vec_set_optab, outermode)->insn_code; int pos = bitnum / GET_MODE_BITSIZE (innermode); rtx rtxpos = GEN_INT (pos); rtx src = value; @@ -404,18 +441,10 @@ store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize, { emit_insn (seq); emit_insn (pat); - return dest; + return true; } } - if (flag_force_mem) - { - int old_generating_concat_p = generating_concat_p; - generating_concat_p = 0; - value = force_not_mem (value); - generating_concat_p = old_generating_concat_p; - } - /* If the target is a register, overwriting the entire object, or storing a full-word or multi-word field can be done with just a SUBREG. @@ -438,16 +467,13 @@ store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize, || (offset * BITS_PER_UNIT % bitsize == 0 && MEM_ALIGN (op0) % GET_MODE_BITSIZE (fieldmode) == 0)))) { - if (GET_MODE (op0) != fieldmode) - { - if (MEM_P (op0)) - op0 = adjust_address (op0, fieldmode, offset); - else - op0 = simplify_gen_subreg (fieldmode, op0, GET_MODE (op0), - byte_offset); - } + if (MEM_P (op0)) + op0 = adjust_address (op0, fieldmode, offset); + else if (GET_MODE (op0) != fieldmode) + op0 = simplify_gen_subreg (fieldmode, op0, GET_MODE (op0), + byte_offset); emit_move_insn (op0, value); - return value; + return true; } /* Make sure we are playing with integral modes. Pun with subregs @@ -491,10 +517,10 @@ store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize, if (!MEM_P (op0) && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0) && bitsize == GET_MODE_BITSIZE (fieldmode) - && (movstrict_optab->handlers[fieldmode].insn_code + && (optab_handler (movstrict_optab, fieldmode)->insn_code != CODE_FOR_nothing)) { - int icode = movstrict_optab->handlers[fieldmode].insn_code; + int icode = optab_handler (movstrict_optab, fieldmode)->insn_code; /* Get appropriate low part of the value being stored. */ if (GET_CODE (value) == CONST_INT || REG_P (value)) @@ -524,7 +550,7 @@ store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize, + (offset * UNITS_PER_WORD)), value)); - return value; + return true; } /* Handle fields bigger than a word. */ @@ -540,6 +566,7 @@ store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize, unsigned int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode; unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD; unsigned int i; + rtx last; /* This is the mode we must force value to, so that there will be enough subwords to extract. Note that fieldmode will often (always?) be @@ -550,6 +577,7 @@ store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize, if (fieldmode == VOIDmode) fieldmode = smallest_mode_for_size (nwords * BITS_PER_WORD, MODE_INT); + last = get_last_insn (); for (i = 0; i < nwords; i++) { /* If I is 0, use the low-order word in both field and target; @@ -560,13 +588,18 @@ store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize, * BITS_PER_WORD, 0) : (int) i * BITS_PER_WORD); + rtx value_word = operand_subword_force (value, wordnum, fieldmode); - store_bit_field (op0, MIN (BITS_PER_WORD, - bitsize - i * BITS_PER_WORD), - bitnum + bit_offset, word_mode, - operand_subword_force (value, wordnum, fieldmode)); + if (!store_bit_field_1 (op0, MIN (BITS_PER_WORD, + bitsize - i * BITS_PER_WORD), + bitnum + bit_offset, word_mode, + value_word, fallback_p)) + { + delete_insns_since (last); + return false; + } } - return value; + return true; } /* From here on we can assume that the field to be stored in is @@ -615,79 +648,32 @@ store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize, if (HAVE_insv && GET_MODE (value) != BLKmode - && !(bitsize == 1 && GET_CODE (value) == CONST_INT) - /* Ensure insv's size is wide enough for this field. */ - && (GET_MODE_BITSIZE (op_mode) >= bitsize) + && bitsize > 0 + && GET_MODE_BITSIZE (op_mode) >= bitsize && ! ((REG_P (op0) || GET_CODE (op0) == SUBREG) - && (bitsize + bitpos > GET_MODE_BITSIZE (op_mode)))) + && (bitsize + bitpos > GET_MODE_BITSIZE (op_mode))) + && insn_data[CODE_FOR_insv].operand[1].predicate (GEN_INT (bitsize), + VOIDmode) + && check_predicate_volatile_ok (CODE_FOR_insv, 0, op0, VOIDmode)) { int xbitpos = bitpos; rtx value1; rtx xop0 = op0; rtx last = get_last_insn (); rtx pat; - enum machine_mode maxmode = mode_for_extraction (EP_insv, 3); - int save_volatile_ok = volatile_ok; - - volatile_ok = 1; - - /* If this machine's insv can only insert into a register, copy OP0 - into a register and save it back later. */ - /* This used to check flag_force_mem, but that was a serious - de-optimization now that flag_force_mem is enabled by -O2. */ - if (MEM_P (op0) - && ! ((*insn_data[(int) CODE_FOR_insv].operand[0].predicate) - (op0, VOIDmode))) - { - rtx tempreg; - enum machine_mode bestmode; - - /* Get the mode to use for inserting into this field. If OP0 is - BLKmode, get the smallest mode consistent with the alignment. If - OP0 is a non-BLKmode object that is no wider than MAXMODE, use its - mode. Otherwise, use the smallest mode containing the field. */ - - if (GET_MODE (op0) == BLKmode - || GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (maxmode)) - bestmode - = get_best_mode (bitsize, bitnum, MEM_ALIGN (op0), maxmode, - MEM_VOLATILE_P (op0)); - else - bestmode = GET_MODE (op0); - - if (bestmode == VOIDmode - || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (op0)) - && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (op0))) - goto insv_loses; - - /* Adjust address to point to the containing unit of that mode. - Compute offset as multiple of this unit, counting in bytes. */ - unit = GET_MODE_BITSIZE (bestmode); - offset = (bitnum / unit) * GET_MODE_SIZE (bestmode); - bitpos = bitnum % unit; - op0 = adjust_address (op0, bestmode, offset); - - /* Fetch that unit, store the bitfield in it, then store - the unit. */ - tempreg = copy_to_reg (op0); - store_bit_field (tempreg, bitsize, bitpos, fieldmode, orig_value); - emit_move_insn (op0, tempreg); - return value; - } - volatile_ok = save_volatile_ok; /* Add OFFSET into OP0's address. */ if (MEM_P (xop0)) xop0 = adjust_address (xop0, byte_mode, offset); - /* If xop0 is a register, we need it in MAXMODE + /* If xop0 is a register, we need it in OP_MODE to make it acceptable to the format of insv. */ if (GET_CODE (xop0) == SUBREG) /* We can't just change the mode, because this might clobber op0, and we will need the original value of op0 if insv fails. */ - xop0 = gen_rtx_SUBREG (maxmode, SUBREG_REG (xop0), SUBREG_BYTE (xop0)); - if (REG_P (xop0) && GET_MODE (xop0) != maxmode) - xop0 = gen_rtx_SUBREG (maxmode, xop0, 0); + xop0 = gen_rtx_SUBREG (op_mode, SUBREG_REG (xop0), SUBREG_BYTE (xop0)); + if (REG_P (xop0) && GET_MODE (xop0) != op_mode) + xop0 = gen_rtx_SUBREG (op_mode, xop0, 0); /* On big-endian machines, we count bits from the most significant. If the bit field insn does not, we must invert. */ @@ -698,13 +684,13 @@ store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize, /* We have been counting XBITPOS within UNIT. Count instead within the size of the register. */ if (BITS_BIG_ENDIAN && !MEM_P (xop0)) - xbitpos += GET_MODE_BITSIZE (maxmode) - unit; + xbitpos += GET_MODE_BITSIZE (op_mode) - unit; - unit = GET_MODE_BITSIZE (maxmode); + unit = GET_MODE_BITSIZE (op_mode); - /* Convert VALUE to maxmode (which insv insn wants) in VALUE1. */ + /* Convert VALUE to op_mode (which insv insn wants) in VALUE1. */ value1 = value; - if (GET_MODE (value) != maxmode) + if (GET_MODE (value) != op_mode) { if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize) { @@ -712,23 +698,23 @@ store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize, if it has all the bits we will actually use. However, if we must narrow it, be sure we do it correctly. */ - if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (maxmode)) + if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (op_mode)) { rtx tmp; - tmp = simplify_subreg (maxmode, value1, GET_MODE (value), 0); + tmp = simplify_subreg (op_mode, value1, GET_MODE (value), 0); if (! tmp) - tmp = simplify_gen_subreg (maxmode, + tmp = simplify_gen_subreg (op_mode, force_reg (GET_MODE (value), value1), GET_MODE (value), 0); value1 = tmp; } else - value1 = gen_lowpart (maxmode, value1); + value1 = gen_lowpart (op_mode, value1); } else if (GET_CODE (value) == CONST_INT) - value1 = gen_int_mode (INTVAL (value), maxmode); + value1 = gen_int_mode (INTVAL (value), op_mode); else /* Parse phase is supposed to make VALUE's data type match that of the component reference, which is a type @@ -740,23 +726,89 @@ store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize, /* If this machine's insv insists on a register, get VALUE1 into a register. */ if (! ((*insn_data[(int) CODE_FOR_insv].operand[3].predicate) - (value1, maxmode))) - value1 = force_reg (maxmode, value1); + (value1, op_mode))) + value1 = force_reg (op_mode, value1); pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1); if (pat) - emit_insn (pat); + { + emit_insn (pat); + return true; + } + delete_insns_since (last); + } + + /* If OP0 is a memory, try copying it to a register and seeing if a + cheap register alternative is available. */ + if (HAVE_insv && MEM_P (op0)) + { + enum machine_mode bestmode; + + /* Get the mode to use for inserting into this field. If OP0 is + BLKmode, get the smallest mode consistent with the alignment. If + OP0 is a non-BLKmode object that is no wider than OP_MODE, use its + mode. Otherwise, use the smallest mode containing the field. */ + + if (GET_MODE (op0) == BLKmode + || (op_mode != MAX_MACHINE_MODE + && GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (op_mode))) + bestmode = get_best_mode (bitsize, bitnum, MEM_ALIGN (op0), + (op_mode == MAX_MACHINE_MODE + ? VOIDmode : op_mode), + MEM_VOLATILE_P (op0)); else + bestmode = GET_MODE (op0); + + if (bestmode != VOIDmode + && GET_MODE_SIZE (bestmode) >= GET_MODE_SIZE (fieldmode) + && !(SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (op0)) + && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (op0))) { + rtx last, tempreg, xop0; + unsigned HOST_WIDE_INT xoffset, xbitpos; + + last = get_last_insn (); + + /* Adjust address to point to the containing unit of + that mode. Compute the offset as a multiple of this unit, + counting in bytes. */ + unit = GET_MODE_BITSIZE (bestmode); + xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode); + xbitpos = bitnum % unit; + xop0 = adjust_address (op0, bestmode, xoffset); + + /* Fetch that unit, store the bitfield in it, then store + the unit. */ + tempreg = copy_to_reg (xop0); + if (store_bit_field_1 (tempreg, bitsize, xbitpos, + fieldmode, orig_value, false)) + { + emit_move_insn (xop0, tempreg); + return true; + } delete_insns_since (last); - store_fixed_bit_field (op0, offset, bitsize, bitpos, value); } } - else - insv_loses: - /* Insv is not available; store using shifts and boolean ops. */ - store_fixed_bit_field (op0, offset, bitsize, bitpos, value); - return value; + + if (!fallback_p) + return false; + + store_fixed_bit_field (op0, offset, bitsize, bitpos, value); + return true; +} + +/* Generate code to store value from rtx VALUE + into a bit-field within structure STR_RTX + containing BITSIZE bits starting at bit BITNUM. + FIELDMODE is the machine-mode of the FIELD_DECL node for this field. */ + +void +store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize, + unsigned HOST_WIDE_INT bitnum, enum machine_mode fieldmode, + rtx value) +{ + if (!store_bit_field_1 (str_rtx, bitsize, bitnum, fieldmode, value, true)) + gcc_unreachable (); } /* Use shifts and boolean operations to store VALUE @@ -775,7 +827,7 @@ store_fixed_bit_field (rtx op0, unsigned HOST_WIDE_INT offset, { enum machine_mode mode; unsigned int total_bits = BITS_PER_WORD; - rtx subtarget, temp; + rtx temp; int all_zero = 0; int all_one = 0; @@ -901,26 +953,33 @@ store_fixed_bit_field (rtx op0, unsigned HOST_WIDE_INT offset, /* Now clear the chosen bits in OP0, except that if VALUE is -1 we need not bother. */ + /* We keep the intermediates in registers to allow CSE to combine + consecutive bitfield assignments. */ - subtarget = (REG_P (op0) || ! flag_force_mem) ? op0 : 0; + temp = force_reg (mode, op0); if (! all_one) { - temp = expand_binop (mode, and_optab, op0, + temp = expand_binop (mode, and_optab, temp, mask_rtx (mode, bitpos, bitsize, 1), - subtarget, 1, OPTAB_LIB_WIDEN); - subtarget = temp; + NULL_RTX, 1, OPTAB_LIB_WIDEN); + temp = force_reg (mode, temp); } - else - temp = op0; /* Now logical-or VALUE into OP0, unless it is zero. */ if (! all_zero) - temp = expand_binop (mode, ior_optab, temp, value, - subtarget, 1, OPTAB_LIB_WIDEN); + { + temp = expand_binop (mode, ior_optab, temp, value, + NULL_RTX, 1, OPTAB_LIB_WIDEN); + temp = force_reg (mode, temp); + } + if (op0 != temp) - emit_move_insn (op0, temp); + { + op0 = copy_rtx (op0); + emit_move_insn (op0, temp); + } } /* Store a bit field that is split across multiple accessible memory objects. @@ -1044,40 +1103,52 @@ store_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize, } } -/* Generate code to extract a byte-field from STR_RTX - containing BITSIZE bits, starting at BITNUM, - and put it in TARGET if possible (if TARGET is nonzero). - Regardless of TARGET, we return the rtx for where the value is placed. +/* A subroutine of extract_bit_field_1 that converts return value X + to either MODE or TMODE. MODE, TMODE and UNSIGNEDP are arguments + to extract_bit_field. */ - STR_RTX is the structure containing the byte (a REG or MEM). - UNSIGNEDP is nonzero if this is an unsigned bit field. - MODE is the natural mode of the field value once extracted. - TMODE is the mode the caller would like the value to have; - but the value may be returned with type MODE instead. +static rtx +convert_extracted_bit_field (rtx x, enum machine_mode mode, + enum machine_mode tmode, bool unsignedp) +{ + if (GET_MODE (x) == tmode || GET_MODE (x) == mode) + return x; - TOTAL_SIZE is the size in bytes of the containing structure, - or -1 if varying. + /* If the x mode is not a scalar integral, first convert to the + integer mode of that size and then access it as a floating-point + value via a SUBREG. */ + if (!SCALAR_INT_MODE_P (tmode)) + { + enum machine_mode smode; - If a TARGET is specified and we can store in it at no extra cost, - we do so, and return TARGET. - Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred - if they are equally easy. */ + smode = mode_for_size (GET_MODE_BITSIZE (tmode), MODE_INT, 0); + x = convert_to_mode (smode, x, unsignedp); + x = force_reg (smode, x); + return gen_lowpart (tmode, x); + } -rtx -extract_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize, - unsigned HOST_WIDE_INT bitnum, int unsignedp, rtx target, - enum machine_mode mode, enum machine_mode tmode) + return convert_to_mode (tmode, x, unsignedp); +} + +/* A subroutine of extract_bit_field, with the same arguments. + If FALLBACK_P is true, fall back to extract_fixed_bit_field + if we can find no other means of implementing the operation. + if FALLBACK_P is false, return NULL instead. */ + +static rtx +extract_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize, + unsigned HOST_WIDE_INT bitnum, int unsignedp, rtx target, + enum machine_mode mode, enum machine_mode tmode, + bool fallback_p) { unsigned int unit = (MEM_P (str_rtx)) ? BITS_PER_UNIT : BITS_PER_WORD; unsigned HOST_WIDE_INT offset, bitpos; rtx op0 = str_rtx; - rtx spec_target = target; - rtx spec_target_subreg = 0; enum machine_mode int_mode; - enum machine_mode extv_mode = mode_for_extraction (EP_extv, 0); - enum machine_mode extzv_mode = mode_for_extraction (EP_extzv, 0); + enum machine_mode ext_mode; enum machine_mode mode1; + enum insn_code icode; int byte_offset; if (tmode == VOIDmode) @@ -1104,18 +1175,48 @@ extract_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize, return op0; } + /* See if we can get a better vector mode before extracting. */ + if (VECTOR_MODE_P (GET_MODE (op0)) + && !MEM_P (op0) + && GET_MODE_INNER (GET_MODE (op0)) != tmode) + { + enum machine_mode new_mode; + int nunits = GET_MODE_NUNITS (GET_MODE (op0)); + + if (GET_MODE_CLASS (tmode) == MODE_FLOAT) + new_mode = MIN_MODE_VECTOR_FLOAT; + else if (GET_MODE_CLASS (tmode) == MODE_FRACT) + new_mode = MIN_MODE_VECTOR_FRACT; + else if (GET_MODE_CLASS (tmode) == MODE_UFRACT) + new_mode = MIN_MODE_VECTOR_UFRACT; + else if (GET_MODE_CLASS (tmode) == MODE_ACCUM) + new_mode = MIN_MODE_VECTOR_ACCUM; + else if (GET_MODE_CLASS (tmode) == MODE_UACCUM) + new_mode = MIN_MODE_VECTOR_UACCUM; + else + new_mode = MIN_MODE_VECTOR_INT; + + for (; new_mode != VOIDmode ; new_mode = GET_MODE_WIDER_MODE (new_mode)) + if (GET_MODE_NUNITS (new_mode) == nunits + && GET_MODE_INNER (new_mode) == tmode + && targetm.vector_mode_supported_p (new_mode)) + break; + if (new_mode != VOIDmode) + op0 = gen_lowpart (new_mode, op0); + } + /* Use vec_extract patterns for extracting parts of vectors whenever available. */ if (VECTOR_MODE_P (GET_MODE (op0)) && !MEM_P (op0) - && (vec_extract_optab->handlers[GET_MODE (op0)].insn_code + && (optab_handler (vec_extract_optab, GET_MODE (op0))->insn_code != CODE_FOR_nothing) && ((bitnum + bitsize - 1) / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0))) == bitnum / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0))))) { enum machine_mode outermode = GET_MODE (op0); enum machine_mode innermode = GET_MODE_INNER (outermode); - int icode = (int) vec_extract_optab->handlers[outermode].insn_code; + int icode = (int) optab_handler (vec_extract_optab, outermode)->insn_code; unsigned HOST_WIDE_INT pos = bitnum / GET_MODE_BITSIZE (innermode); rtx rtxpos = GEN_INT (pos); rtx src = op0; @@ -1154,6 +1255,8 @@ extract_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize, { emit_insn (seq); emit_insn (pat); + if (mode0 != mode) + return gen_lowpart (tmode, dest); return dest; } } @@ -1239,18 +1342,15 @@ extract_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize, || (offset * BITS_PER_UNIT % bitsize == 0 && MEM_ALIGN (op0) % bitsize == 0))))) { - if (mode1 != GET_MODE (op0)) + if (MEM_P (op0)) + op0 = adjust_address (op0, mode1, offset); + else if (mode1 != GET_MODE (op0)) { - if (MEM_P (op0)) - op0 = adjust_address (op0, mode1, offset); - else - { - rtx sub = simplify_gen_subreg (mode1, op0, GET_MODE (op0), - byte_offset); - if (sub == NULL) - goto no_subreg_mode_swap; - op0 = sub; - } + rtx sub = simplify_gen_subreg (mode1, op0, GET_MODE (op0), + byte_offset); + if (sub == NULL) + goto no_subreg_mode_swap; + op0 = sub; } if (mode1 != mode) return convert_to_mode (tmode, op0, unsignedp); @@ -1274,7 +1374,7 @@ extract_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize, target = gen_reg_rtx (mode); /* Indicate for flow that the entire target reg is being set. */ - emit_insn (gen_rtx_CLOBBER (VOIDmode, target)); + emit_clobber (target); for (i = 0; i < nwords; i++) { @@ -1362,289 +1462,172 @@ extract_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize, } /* Now OFFSET is nonzero only for memory operands. */ - - if (unsignedp) + ext_mode = mode_for_extraction (unsignedp ? EP_extzv : EP_extv, 0); + icode = unsignedp ? CODE_FOR_extzv : CODE_FOR_extv; + if (ext_mode != MAX_MACHINE_MODE + && bitsize > 0 + && GET_MODE_BITSIZE (ext_mode) >= bitsize + /* If op0 is a register, we need it in EXT_MODE to make it + acceptable to the format of ext(z)v. */ + && !(GET_CODE (op0) == SUBREG && GET_MODE (op0) != ext_mode) + && !((REG_P (op0) || GET_CODE (op0) == SUBREG) + && (bitsize + bitpos > GET_MODE_BITSIZE (ext_mode))) + && check_predicate_volatile_ok (icode, 1, op0, GET_MODE (op0))) { - if (HAVE_extzv - && (GET_MODE_BITSIZE (extzv_mode) >= bitsize) - && ! ((REG_P (op0) || GET_CODE (op0) == SUBREG) - && (bitsize + bitpos > GET_MODE_BITSIZE (extzv_mode)))) - { - unsigned HOST_WIDE_INT xbitpos = bitpos, xoffset = offset; - rtx bitsize_rtx, bitpos_rtx; - rtx last = get_last_insn (); - rtx xop0 = op0; - rtx xtarget = target; - rtx xspec_target = spec_target; - rtx xspec_target_subreg = spec_target_subreg; - rtx pat; - enum machine_mode maxmode = mode_for_extraction (EP_extzv, 0); - - if (MEM_P (xop0)) - { - int save_volatile_ok = volatile_ok; - volatile_ok = 1; - - /* Is the memory operand acceptable? */ - if (! ((*insn_data[(int) CODE_FOR_extzv].operand[1].predicate) - (xop0, GET_MODE (xop0)))) - { - /* No, load into a reg and extract from there. */ - enum machine_mode bestmode; - - /* Get the mode to use for inserting into this field. If - OP0 is BLKmode, get the smallest mode consistent with the - alignment. If OP0 is a non-BLKmode object that is no - wider than MAXMODE, use its mode. Otherwise, use the - smallest mode containing the field. */ - - if (GET_MODE (xop0) == BLKmode - || (GET_MODE_SIZE (GET_MODE (op0)) - > GET_MODE_SIZE (maxmode))) - bestmode = get_best_mode (bitsize, bitnum, - MEM_ALIGN (xop0), maxmode, - MEM_VOLATILE_P (xop0)); - else - bestmode = GET_MODE (xop0); - - if (bestmode == VOIDmode - || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (xop0)) - && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (xop0))) - goto extzv_loses; - - /* Compute offset as multiple of this unit, - counting in bytes. */ - unit = GET_MODE_BITSIZE (bestmode); - xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode); - xbitpos = bitnum % unit; - xop0 = adjust_address (xop0, bestmode, xoffset); - - /* Fetch it to a register in that size. */ - xop0 = force_reg (bestmode, xop0); - - /* XBITPOS counts within UNIT, which is what is expected. */ - } - else - /* Get ref to first byte containing part of the field. */ - xop0 = adjust_address (xop0, byte_mode, xoffset); - - volatile_ok = save_volatile_ok; - } + unsigned HOST_WIDE_INT xbitpos = bitpos, xoffset = offset; + rtx bitsize_rtx, bitpos_rtx; + rtx last = get_last_insn (); + rtx xop0 = op0; + rtx xtarget = target; + rtx xspec_target = target; + rtx xspec_target_subreg = 0; + rtx pat; - /* If op0 is a register, we need it in MAXMODE (which is usually - SImode). to make it acceptable to the format of extzv. */ - if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode) - goto extzv_loses; - if (REG_P (xop0) && GET_MODE (xop0) != maxmode) - xop0 = gen_rtx_SUBREG (maxmode, xop0, 0); + /* If op0 is a register, we need it in EXT_MODE to make it + acceptable to the format of ext(z)v. */ + if (REG_P (xop0) && GET_MODE (xop0) != ext_mode) + xop0 = gen_rtx_SUBREG (ext_mode, xop0, 0); + if (MEM_P (xop0)) + /* Get ref to first byte containing part of the field. */ + xop0 = adjust_address (xop0, byte_mode, xoffset); - /* On big-endian machines, we count bits from the most significant. - If the bit field insn does not, we must invert. */ - if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN) - xbitpos = unit - bitsize - xbitpos; + /* On big-endian machines, we count bits from the most significant. + If the bit field insn does not, we must invert. */ + if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN) + xbitpos = unit - bitsize - xbitpos; - /* Now convert from counting within UNIT to counting in MAXMODE. */ - if (BITS_BIG_ENDIAN && !MEM_P (xop0)) - xbitpos += GET_MODE_BITSIZE (maxmode) - unit; + /* Now convert from counting within UNIT to counting in EXT_MODE. */ + if (BITS_BIG_ENDIAN && !MEM_P (xop0)) + xbitpos += GET_MODE_BITSIZE (ext_mode) - unit; - unit = GET_MODE_BITSIZE (maxmode); + unit = GET_MODE_BITSIZE (ext_mode); - if (xtarget == 0 - || (flag_force_mem && MEM_P (xtarget))) - xtarget = xspec_target = gen_reg_rtx (tmode); + if (xtarget == 0) + xtarget = xspec_target = gen_reg_rtx (tmode); - if (GET_MODE (xtarget) != maxmode) + if (GET_MODE (xtarget) != ext_mode) + { + if (REG_P (xtarget)) { - if (REG_P (xtarget)) - { - int wider = (GET_MODE_SIZE (maxmode) - > GET_MODE_SIZE (GET_MODE (xtarget))); - xtarget = gen_lowpart (maxmode, xtarget); - if (wider) - xspec_target_subreg = xtarget; - } - else - xtarget = gen_reg_rtx (maxmode); + xtarget = gen_lowpart (ext_mode, xtarget); + if (GET_MODE_SIZE (ext_mode) + > GET_MODE_SIZE (GET_MODE (xspec_target))) + xspec_target_subreg = xtarget; } + else + xtarget = gen_reg_rtx (ext_mode); + } - /* If this machine's extzv insists on a register target, - make sure we have one. */ - if (! ((*insn_data[(int) CODE_FOR_extzv].operand[0].predicate) - (xtarget, maxmode))) - xtarget = gen_reg_rtx (maxmode); + /* If this machine's ext(z)v insists on a register target, + make sure we have one. */ + if (!insn_data[(int) icode].operand[0].predicate (xtarget, ext_mode)) + xtarget = gen_reg_rtx (ext_mode); - bitsize_rtx = GEN_INT (bitsize); - bitpos_rtx = GEN_INT (xbitpos); + bitsize_rtx = GEN_INT (bitsize); + bitpos_rtx = GEN_INT (xbitpos); - pat = gen_extzv (xtarget, xop0, bitsize_rtx, bitpos_rtx); - if (pat) - { - emit_insn (pat); - target = xtarget; - spec_target = xspec_target; - spec_target_subreg = xspec_target_subreg; - } - else - { - delete_insns_since (last); - target = extract_fixed_bit_field (int_mode, op0, offset, bitsize, - bitpos, target, 1); - } + pat = (unsignedp + ? gen_extzv (xtarget, xop0, bitsize_rtx, bitpos_rtx) + : gen_extv (xtarget, xop0, bitsize_rtx, bitpos_rtx)); + if (pat) + { + emit_insn (pat); + if (xtarget == xspec_target) + return xtarget; + if (xtarget == xspec_target_subreg) + return xspec_target; + return convert_extracted_bit_field (xtarget, mode, tmode, unsignedp); } - else - extzv_loses: - target = extract_fixed_bit_field (int_mode, op0, offset, bitsize, - bitpos, target, 1); + delete_insns_since (last); } - else - { - if (HAVE_extv - && (GET_MODE_BITSIZE (extv_mode) >= bitsize) - && ! ((REG_P (op0) || GET_CODE (op0) == SUBREG) - && (bitsize + bitpos > GET_MODE_BITSIZE (extv_mode)))) - { - int xbitpos = bitpos, xoffset = offset; - rtx bitsize_rtx, bitpos_rtx; - rtx last = get_last_insn (); - rtx xop0 = op0, xtarget = target; - rtx xspec_target = spec_target; - rtx xspec_target_subreg = spec_target_subreg; - rtx pat; - enum machine_mode maxmode = mode_for_extraction (EP_extv, 0); - - if (MEM_P (xop0)) - { - /* Is the memory operand acceptable? */ - if (! ((*insn_data[(int) CODE_FOR_extv].operand[1].predicate) - (xop0, GET_MODE (xop0)))) - { - /* No, load into a reg and extract from there. */ - enum machine_mode bestmode; - - /* Get the mode to use for inserting into this field. If - OP0 is BLKmode, get the smallest mode consistent with the - alignment. If OP0 is a non-BLKmode object that is no - wider than MAXMODE, use its mode. Otherwise, use the - smallest mode containing the field. */ - - if (GET_MODE (xop0) == BLKmode - || (GET_MODE_SIZE (GET_MODE (op0)) - > GET_MODE_SIZE (maxmode))) - bestmode = get_best_mode (bitsize, bitnum, - MEM_ALIGN (xop0), maxmode, - MEM_VOLATILE_P (xop0)); - else - bestmode = GET_MODE (xop0); - - if (bestmode == VOIDmode - || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (xop0)) - && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (xop0))) - goto extv_loses; - - /* Compute offset as multiple of this unit, - counting in bytes. */ - unit = GET_MODE_BITSIZE (bestmode); - xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode); - xbitpos = bitnum % unit; - xop0 = adjust_address (xop0, bestmode, xoffset); - - /* Fetch it to a register in that size. */ - xop0 = force_reg (bestmode, xop0); - - /* XBITPOS counts within UNIT, which is what is expected. */ - } - else - /* Get ref to first byte containing part of the field. */ - xop0 = adjust_address (xop0, byte_mode, xoffset); - } - - /* If op0 is a register, we need it in MAXMODE (which is usually - SImode) to make it acceptable to the format of extv. */ - if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode) - goto extv_loses; - if (REG_P (xop0) && GET_MODE (xop0) != maxmode) - xop0 = gen_rtx_SUBREG (maxmode, xop0, 0); - /* On big-endian machines, we count bits from the most significant. - If the bit field insn does not, we must invert. */ - if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN) - xbitpos = unit - bitsize - xbitpos; - - /* XBITPOS counts within a size of UNIT. - Adjust to count within a size of MAXMODE. */ - if (BITS_BIG_ENDIAN && !MEM_P (xop0)) - xbitpos += (GET_MODE_BITSIZE (maxmode) - unit); + /* If OP0 is a memory, try copying it to a register and seeing if a + cheap register alternative is available. */ + if (ext_mode != MAX_MACHINE_MODE && MEM_P (op0)) + { + enum machine_mode bestmode; + + /* Get the mode to use for inserting into this field. If + OP0 is BLKmode, get the smallest mode consistent with the + alignment. If OP0 is a non-BLKmode object that is no + wider than EXT_MODE, use its mode. Otherwise, use the + smallest mode containing the field. */ + + if (GET_MODE (op0) == BLKmode + || (ext_mode != MAX_MACHINE_MODE + && GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (ext_mode))) + bestmode = get_best_mode (bitsize, bitnum, MEM_ALIGN (op0), + (ext_mode == MAX_MACHINE_MODE + ? VOIDmode : ext_mode), + MEM_VOLATILE_P (op0)); + else + bestmode = GET_MODE (op0); - unit = GET_MODE_BITSIZE (maxmode); + if (bestmode != VOIDmode + && !(SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (op0)) + && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (op0))) + { + unsigned HOST_WIDE_INT xoffset, xbitpos; - if (xtarget == 0 - || (flag_force_mem && MEM_P (xtarget))) - xtarget = xspec_target = gen_reg_rtx (tmode); + /* Compute the offset as a multiple of this unit, + counting in bytes. */ + unit = GET_MODE_BITSIZE (bestmode); + xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode); + xbitpos = bitnum % unit; - if (GET_MODE (xtarget) != maxmode) + /* Make sure the register is big enough for the whole field. */ + if (xoffset * BITS_PER_UNIT + unit + >= offset * BITS_PER_UNIT + bitsize) { - if (REG_P (xtarget)) - { - int wider = (GET_MODE_SIZE (maxmode) - > GET_MODE_SIZE (GET_MODE (xtarget))); - xtarget = gen_lowpart (maxmode, xtarget); - if (wider) - xspec_target_subreg = xtarget; - } - else - xtarget = gen_reg_rtx (maxmode); - } + rtx last, result, xop0; - /* If this machine's extv insists on a register target, - make sure we have one. */ - if (! ((*insn_data[(int) CODE_FOR_extv].operand[0].predicate) - (xtarget, maxmode))) - xtarget = gen_reg_rtx (maxmode); + last = get_last_insn (); - bitsize_rtx = GEN_INT (bitsize); - bitpos_rtx = GEN_INT (xbitpos); + /* Fetch it to a register in that size. */ + xop0 = adjust_address (op0, bestmode, xoffset); + xop0 = force_reg (bestmode, xop0); + result = extract_bit_field_1 (xop0, bitsize, xbitpos, + unsignedp, target, + mode, tmode, false); + if (result) + return result; - pat = gen_extv (xtarget, xop0, bitsize_rtx, bitpos_rtx); - if (pat) - { - emit_insn (pat); - target = xtarget; - spec_target = xspec_target; - spec_target_subreg = xspec_target_subreg; - } - else - { delete_insns_since (last); - target = extract_fixed_bit_field (int_mode, op0, offset, bitsize, - bitpos, target, 0); } } - else - extv_loses: - target = extract_fixed_bit_field (int_mode, op0, offset, bitsize, - bitpos, target, 0); } - if (target == spec_target) - return target; - if (target == spec_target_subreg) - return spec_target; - if (GET_MODE (target) != tmode && GET_MODE (target) != mode) - { - /* If the target mode is not a scalar integral, first convert to the - integer mode of that size and then access it as a floating-point - value via a SUBREG. */ - if (!SCALAR_INT_MODE_P (tmode)) - { - enum machine_mode smode - = mode_for_size (GET_MODE_BITSIZE (tmode), MODE_INT, 0); - target = convert_to_mode (smode, target, unsignedp); - target = force_reg (smode, target); - return gen_lowpart (tmode, target); - } - return convert_to_mode (tmode, target, unsignedp); - } - return target; + if (!fallback_p) + return NULL; + + target = extract_fixed_bit_field (int_mode, op0, offset, bitsize, + bitpos, target, unsignedp); + return convert_extracted_bit_field (target, mode, tmode, unsignedp); +} + +/* Generate code to extract a byte-field from STR_RTX + containing BITSIZE bits, starting at BITNUM, + and put it in TARGET if possible (if TARGET is nonzero). + Regardless of TARGET, we return the rtx for where the value is placed. + + STR_RTX is the structure containing the byte (a REG or MEM). + UNSIGNEDP is nonzero if this is an unsigned bit field. + MODE is the natural mode of the field value once extracted. + TMODE is the mode the caller would like the value to have; + but the value may be returned with type MODE instead. + + If a TARGET is specified and we can store in it at no extra cost, + we do so, and return TARGET. + Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred + if they are equally easy. */ + +rtx +extract_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize, + unsigned HOST_WIDE_INT bitnum, int unsignedp, rtx target, + enum machine_mode mode, enum machine_mode tmode) +{ + return extract_bit_field_1 (str_rtx, bitsize, bitnum, unsignedp, + target, mode, tmode, true); } /* Extract a bit field using shifts and boolean operations @@ -1855,152 +1838,6 @@ lshift_value (enum machine_mode mode, rtx value, int bitpos, int bitsize) return immed_double_const (low, high, mode); } -/* Extract a bit field from a memory by forcing the alignment of the - memory. This efficient only if the field spans at least 4 boundaries. - - OP0 is the MEM. - BITSIZE is the field width; BITPOS is the position of the first bit. - UNSIGNEDP is true if the result should be zero-extended. */ - -static rtx -extract_force_align_mem_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize, - unsigned HOST_WIDE_INT bitpos, - int unsignedp) -{ - enum machine_mode mode, dmode; - unsigned int m_bitsize, m_size; - unsigned int sign_shift_up, sign_shift_dn; - rtx base, a1, a2, v1, v2, comb, shift, result, start; - - /* Choose a mode that will fit BITSIZE. */ - mode = smallest_mode_for_size (bitsize, MODE_INT); - m_size = GET_MODE_SIZE (mode); - m_bitsize = GET_MODE_BITSIZE (mode); - - /* Choose a mode twice as wide. Fail if no such mode exists. */ - dmode = mode_for_size (m_bitsize * 2, MODE_INT, false); - if (dmode == BLKmode) - return NULL; - - do_pending_stack_adjust (); - start = get_last_insn (); - - /* At the end, we'll need an additional shift to deal with sign/zero - extension. By default this will be a left+right shift of the - appropriate size. But we may be able to eliminate one of them. */ - sign_shift_up = sign_shift_dn = m_bitsize - bitsize; - - if (STRICT_ALIGNMENT) - { - base = plus_constant (XEXP (op0, 0), bitpos / BITS_PER_UNIT); - bitpos %= BITS_PER_UNIT; - - /* We load two values to be concatenate. There's an edge condition - that bears notice -- an aligned value at the end of a page can - only load one value lest we segfault. So the two values we load - are at "base & -size" and "(base + size - 1) & -size". If base - is unaligned, the addresses will be aligned and sequential; if - base is aligned, the addresses will both be equal to base. */ - - a1 = expand_simple_binop (Pmode, AND, force_operand (base, NULL), - GEN_INT (-(HOST_WIDE_INT)m_size), - NULL, true, OPTAB_LIB_WIDEN); - mark_reg_pointer (a1, m_bitsize); - v1 = gen_rtx_MEM (mode, a1); - set_mem_align (v1, m_bitsize); - v1 = force_reg (mode, validize_mem (v1)); - - a2 = plus_constant (base, GET_MODE_SIZE (mode) - 1); - a2 = expand_simple_binop (Pmode, AND, force_operand (a2, NULL), - GEN_INT (-(HOST_WIDE_INT)m_size), - NULL, true, OPTAB_LIB_WIDEN); - v2 = gen_rtx_MEM (mode, a2); - set_mem_align (v2, m_bitsize); - v2 = force_reg (mode, validize_mem (v2)); - - /* Combine these two values into a double-word value. */ - if (m_bitsize == BITS_PER_WORD) - { - comb = gen_reg_rtx (dmode); - emit_insn (gen_rtx_CLOBBER (VOIDmode, comb)); - emit_move_insn (gen_rtx_SUBREG (mode, comb, 0), v1); - emit_move_insn (gen_rtx_SUBREG (mode, comb, m_size), v2); - } - else - { - if (BYTES_BIG_ENDIAN) - comb = v1, v1 = v2, v2 = comb; - v1 = convert_modes (dmode, mode, v1, true); - if (v1 == NULL) - goto fail; - v2 = convert_modes (dmode, mode, v2, true); - v2 = expand_simple_binop (dmode, ASHIFT, v2, GEN_INT (m_bitsize), - NULL, true, OPTAB_LIB_WIDEN); - if (v2 == NULL) - goto fail; - comb = expand_simple_binop (dmode, IOR, v1, v2, NULL, - true, OPTAB_LIB_WIDEN); - if (comb == NULL) - goto fail; - } - - shift = expand_simple_binop (Pmode, AND, base, GEN_INT (m_size - 1), - NULL, true, OPTAB_LIB_WIDEN); - shift = expand_mult (Pmode, shift, GEN_INT (BITS_PER_UNIT), NULL, 1); - - if (bitpos != 0) - { - if (sign_shift_up <= bitpos) - bitpos -= sign_shift_up, sign_shift_up = 0; - shift = expand_simple_binop (Pmode, PLUS, shift, GEN_INT (bitpos), - NULL, true, OPTAB_LIB_WIDEN); - } - } - else - { - unsigned HOST_WIDE_INT offset = bitpos / BITS_PER_UNIT; - bitpos %= BITS_PER_UNIT; - - /* When strict alignment is not required, we can just load directly - from memory without masking. If the remaining BITPOS offset is - small enough, we may be able to do all operations in MODE as - opposed to DMODE. */ - if (bitpos + bitsize <= m_bitsize) - dmode = mode; - comb = adjust_address (op0, dmode, offset); - - if (sign_shift_up <= bitpos) - bitpos -= sign_shift_up, sign_shift_up = 0; - shift = GEN_INT (bitpos); - } - - /* Shift down the double-word such that the requested value is at bit 0. */ - if (shift != const0_rtx) - comb = expand_simple_binop (dmode, unsignedp ? LSHIFTRT : ASHIFTRT, - comb, shift, NULL, unsignedp, OPTAB_LIB_WIDEN); - if (comb == NULL) - goto fail; - - /* If the field exactly matches MODE, then all we need to do is return the - lowpart. Otherwise, shift to get the sign bits set properly. */ - result = force_reg (mode, gen_lowpart (mode, comb)); - - if (sign_shift_up) - result = expand_simple_binop (mode, ASHIFT, result, - GEN_INT (sign_shift_up), - NULL_RTX, 0, OPTAB_LIB_WIDEN); - if (sign_shift_dn) - result = expand_simple_binop (mode, unsignedp ? LSHIFTRT : ASHIFTRT, - result, GEN_INT (sign_shift_dn), - NULL_RTX, 0, OPTAB_LIB_WIDEN); - - return result; - - fail: - delete_insns_since (start); - return NULL; -} - /* Extract a bit field that is split across two words and return an RTX for the result. @@ -2022,16 +1859,7 @@ extract_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize, if (REG_P (op0) || GET_CODE (op0) == SUBREG) unit = BITS_PER_WORD; else - { - unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD); - if (0 && bitsize / unit > 2) - { - rtx tmp = extract_force_align_mem_bit_field (op0, bitsize, bitpos, - unsignedp); - if (tmp) - return tmp; - } - } + unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD); while (bitsdone < bitsize) { @@ -2118,6 +1946,66 @@ extract_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize, NULL_RTX, 0); } +/* Try to read the low bits of SRC as an rvalue of mode MODE, preserving + the bit pattern. SRC_MODE is the mode of SRC; if this is smaller than + MODE, fill the upper bits with zeros. Fail if the layout of either + mode is unknown (as for CC modes) or if the extraction would involve + unprofitable mode punning. Return the value on success, otherwise + return null. + + This is different from gen_lowpart* in these respects: + + - the returned value must always be considered an rvalue + + - when MODE is wider than SRC_MODE, the extraction involves + a zero extension + + - when MODE is smaller than SRC_MODE, the extraction involves + a truncation (and is thus subject to TRULY_NOOP_TRUNCATION). + + In other words, this routine performs a computation, whereas the + gen_lowpart* routines are conceptually lvalue or rvalue subreg + operations. */ + +rtx +extract_low_bits (enum machine_mode mode, enum machine_mode src_mode, rtx src) +{ + enum machine_mode int_mode, src_int_mode; + + if (mode == src_mode) + return src; + + if (CONSTANT_P (src)) + return simplify_gen_subreg (mode, src, src_mode, + subreg_lowpart_offset (mode, src_mode)); + + if (GET_MODE_CLASS (mode) == MODE_CC || GET_MODE_CLASS (src_mode) == MODE_CC) + return NULL_RTX; + + if (GET_MODE_BITSIZE (mode) == GET_MODE_BITSIZE (src_mode) + && MODES_TIEABLE_P (mode, src_mode)) + { + rtx x = gen_lowpart_common (mode, src); + if (x) + return x; + } + + src_int_mode = int_mode_for_mode (src_mode); + int_mode = int_mode_for_mode (mode); + if (src_int_mode == BLKmode || int_mode == BLKmode) + return NULL_RTX; + + if (!MODES_TIEABLE_P (src_int_mode, src_mode)) + return NULL_RTX; + if (!MODES_TIEABLE_P (int_mode, mode)) + return NULL_RTX; + + src = gen_lowpart (src_int_mode, src); + src = convert_modes (int_mode, src_int_mode, src, true); + src = gen_lowpart (mode, src); + return src; +} + /* Add INC into TARGET. */ void @@ -2156,14 +2044,32 @@ expand_shift (enum tree_code code, enum machine_mode mode, rtx shifted, rtx op1, temp = 0; int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR); int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR); + optab lshift_optab = ashl_optab; + optab rshift_arith_optab = ashr_optab; + optab rshift_uns_optab = lshr_optab; + optab lrotate_optab = rotl_optab; + optab rrotate_optab = rotr_optab; + enum machine_mode op1_mode; int try; + op1 = expand_normal (amount); + op1_mode = GET_MODE (op1); + + /* Determine whether the shift/rotate amount is a vector, or scalar. If the + shift amount is a vector, use the vector/vector shift patterns. */ + if (VECTOR_MODE_P (mode) && VECTOR_MODE_P (op1_mode)) + { + lshift_optab = vashl_optab; + rshift_arith_optab = vashr_optab; + rshift_uns_optab = vlshr_optab; + lrotate_optab = vrotl_optab; + rrotate_optab = vrotr_optab; + } + /* Previously detected shift-counts computed by NEGATE_EXPR and shifted in the other direction; but that does not work on all machines. */ - op1 = expand_expr (amount, NULL_RTX, VOIDmode, 0); - if (SHIFT_COUNT_TRUNCATED) { if (GET_CODE (op1) == CONST_INT @@ -2185,7 +2091,9 @@ expand_shift (enum tree_code code, enum machine_mode mode, rtx shifted, && GET_CODE (op1) == CONST_INT && INTVAL (op1) > 0 && INTVAL (op1) < GET_MODE_BITSIZE (mode) - && shift_cost[mode][INTVAL (op1)] > INTVAL (op1) * add_cost[mode]) + && INTVAL (op1) < MAX_BITS_PER_WORD + && shift_cost[mode][INTVAL (op1)] > INTVAL (op1) * add_cost[mode] + && shift_cost[mode][INTVAL (op1)] != MAX_COST) { int i; for (i = 0; i < INTVAL (op1); i++) @@ -2228,44 +2136,35 @@ expand_shift (enum tree_code code, enum machine_mode mode, rtx shifted, code below. */ rtx subtarget = target == shifted ? 0 : target; + tree new_amount, other_amount; rtx temp1; tree type = TREE_TYPE (amount); - tree new_amount = make_tree (type, op1); - tree other_amount + if (GET_MODE (op1) != TYPE_MODE (type) + && GET_MODE (op1) != VOIDmode) + op1 = convert_to_mode (TYPE_MODE (type), op1, 1); + new_amount = make_tree (type, op1); + other_amount = fold_build2 (MINUS_EXPR, type, build_int_cst (type, GET_MODE_BITSIZE (mode)), - amount); + new_amount); shifted = force_reg (mode, shifted); temp = expand_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR, - mode, shifted, new_amount, subtarget, 1); + mode, shifted, new_amount, 0, 1); temp1 = expand_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR, - mode, shifted, other_amount, 0, 1); + mode, shifted, other_amount, subtarget, 1); return expand_binop (mode, ior_optab, temp, temp1, target, unsignedp, methods); } temp = expand_binop (mode, - left ? rotl_optab : rotr_optab, + left ? lrotate_optab : rrotate_optab, shifted, op1, target, unsignedp, methods); - - /* If we don't have the rotate, but we are rotating by a constant - that is in range, try a rotate in the opposite direction. */ - - if (temp == 0 && GET_CODE (op1) == CONST_INT - && INTVAL (op1) > 0 - && (unsigned int) INTVAL (op1) < GET_MODE_BITSIZE (mode)) - temp = expand_binop (mode, - left ? rotr_optab : rotl_optab, - shifted, - GEN_INT (GET_MODE_BITSIZE (mode) - - INTVAL (op1)), - target, unsignedp, methods); } else if (unsignedp) temp = expand_binop (mode, - left ? ashl_optab : lshr_optab, + left ? lshift_optab : rshift_uns_optab, shifted, op1, target, unsignedp, methods); /* Do arithmetic shifts. @@ -2284,7 +2183,7 @@ expand_shift (enum tree_code code, enum machine_mode mode, rtx shifted, /* Arithmetic shift */ temp = expand_binop (mode, - left ? ashl_optab : ashr_optab, + left ? lshift_optab : rshift_arith_optab, shifted, op1, target, unsignedp, methods1); } @@ -2298,10 +2197,18 @@ expand_shift (enum tree_code code, enum machine_mode mode, rtx shifted, return temp; } -enum alg_code { alg_unknown, alg_zero, alg_m, alg_shift, - alg_add_t_m2, alg_sub_t_m2, - alg_add_factor, alg_sub_factor, - alg_add_t2_m, alg_sub_t2_m }; +enum alg_code { + alg_unknown, + alg_zero, + alg_m, alg_shift, + alg_add_t_m2, + alg_sub_t_m2, + alg_add_factor, + alg_sub_factor, + alg_add_t2_m, + alg_sub_t2_m, + alg_impossible +}; /* This structure holds the "cost" of a multiply sequence. The "cost" field holds the total rtx_cost of every operator in the @@ -2368,17 +2275,26 @@ struct algorithm /* The entry for our multiplication cache/hash table. */ struct alg_hash_entry { /* The number we are multiplying by. */ - unsigned int t; + unsigned HOST_WIDE_INT t; /* The mode in which we are multiplying something by T. */ enum machine_mode mode; /* The best multiplication algorithm for t. */ enum alg_code alg; + + /* The cost of multiplication if ALG_CODE is not alg_impossible. + Otherwise, the cost within which multiplication by T is + impossible. */ + struct mult_cost cost; }; /* The number of cache/hash entries. */ +#if HOST_BITS_PER_WIDE_INT == 64 +#define NUM_ALG_HASH_ENTRIES 1031 +#else #define NUM_ALG_HASH_ENTRIES 307 +#endif /* Each entry of ALG_HASH caches alg_code for some integer. This is actually a hash table. If we have a collision, that the older @@ -2477,29 +2393,57 @@ synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t, && alg_hash[hash_index].mode == mode && alg_hash[hash_index].alg != alg_unknown) { - cache_hit = true; cache_alg = alg_hash[hash_index].alg; - switch (cache_alg) + + if (cache_alg == alg_impossible) { - case alg_shift: - goto do_alg_shift; + /* The cache tells us that it's impossible to synthesize + multiplication by T within alg_hash[hash_index].cost. */ + if (!CHEAPER_MULT_COST (&alg_hash[hash_index].cost, cost_limit)) + /* COST_LIMIT is at least as restrictive as the one + recorded in the hash table, in which case we have no + hope of synthesizing a multiplication. Just + return. */ + return; + + /* If we get here, COST_LIMIT is less restrictive than the + one recorded in the hash table, so we may be able to + synthesize a multiplication. Proceed as if we didn't + have the cache entry. */ + } + else + { + if (CHEAPER_MULT_COST (cost_limit, &alg_hash[hash_index].cost)) + /* The cached algorithm shows that this multiplication + requires more cost than COST_LIMIT. Just return. This + way, we don't clobber this cache entry with + alg_impossible but retain useful information. */ + return; - case alg_add_t_m2: - case alg_sub_t_m2: - goto do_alg_addsub_t_m2; + cache_hit = true; - case alg_add_factor: - case alg_sub_factor: - goto do_alg_addsub_factor; + switch (cache_alg) + { + case alg_shift: + goto do_alg_shift; - case alg_add_t2_m: - goto do_alg_add_t2_m; + case alg_add_t_m2: + case alg_sub_t_m2: + goto do_alg_addsub_t_m2; - case alg_sub_t2_m: - goto do_alg_sub_t2_m; + case alg_add_factor: + case alg_sub_factor: + goto do_alg_addsub_factor; - default: - gcc_unreachable (); + case alg_add_t2_m: + goto do_alg_add_t2_m; + + case alg_sub_t2_m: + goto do_alg_sub_t2_m; + + default: + gcc_unreachable (); + } } } @@ -2752,7 +2696,18 @@ synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t, done: /* If best_cost has not decreased, we have not found any algorithm. */ if (!CHEAPER_MULT_COST (&best_cost, cost_limit)) - return; + { + /* We failed to find an algorithm. Record alg_impossible for + this case (that is, ) so that next time + we are asked to find an algorithm for T within the same or + lower COST_LIMIT, we can immediately return to the + caller. */ + alg_hash[hash_index].t = t; + alg_hash[hash_index].mode = mode; + alg_hash[hash_index].alg = alg_impossible; + alg_hash[hash_index].cost = *cost_limit; + return; + } /* Cache the result. */ if (!cache_hit) @@ -2760,6 +2715,8 @@ synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t, alg_hash[hash_index].t = t; alg_hash[hash_index].mode = mode; alg_hash[hash_index].alg = best_alg->op[best_alg->ops]; + alg_hash[hash_index].cost.cost = best_cost.cost; + alg_hash[hash_index].cost.latency = best_cost.latency; } /* If we are getting a too long sequence for `struct algorithm' @@ -2797,6 +2754,17 @@ choose_mult_variant (enum machine_mode mode, HOST_WIDE_INT val, struct mult_cost limit; int op_cost; + /* Fail quickly for impossible bounds. */ + if (mult_cost < 0) + return false; + + /* Ensure that mult_cost provides a reasonable upper bound. + Any constant multiplication can be performed with less + than 2 * bits additions. */ + op_cost = 2 * GET_MODE_BITSIZE (mode) * add_cost[mode]; + if (mult_cost > op_cost) + mult_cost = op_cost; + *variant = basic_variant; limit.cost = mult_cost; limit.latency = mult_cost; @@ -2862,10 +2830,9 @@ expand_mult_const (enum machine_mode mode, rtx op0, HOST_WIDE_INT val, int opno; enum machine_mode nmode; - /* Avoid referencing memory over and over. - For speed, but also for correctness when mem is volatile. */ - if (MEM_P (op0)) - op0 = force_reg (mode, op0); + /* Avoid referencing memory over and over and invalid sharing + on SUBREGs. */ + op0 = force_reg (mode, op0); /* ACCUM starts out either as OP0 or as a zero, depending on the first operation. */ @@ -2976,7 +2943,8 @@ expand_mult_const (enum machine_mode mode, rtx op0, HOST_WIDE_INT val, insn = get_last_insn (); set_unique_reg_note (insn, REG_EQUAL, - gen_rtx_MULT (nmode, tem, GEN_INT (val_so_far))); + gen_rtx_MULT (nmode, tem, + GEN_INT (val_so_far))); } if (variant == negate_variant) @@ -3034,6 +3002,7 @@ expand_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target, && (unsignedp || !flag_trapv)) { HOST_WIDE_INT coeff = 0; + rtx fake_reg = gen_raw_REG (mode, LAST_VIRTUAL_REGISTER + 1); /* synth_mult does an `unsigned int' multiply. As long as the mode is less than or equal in size to `unsigned int' this doesn't matter. @@ -3051,8 +3020,10 @@ expand_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target, && GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT) { /* Its safe to use -INTVAL (op1) even for INT_MIN, as the - result is interpreted as an unsigned coefficient. */ - max_cost = rtx_cost (gen_rtx_MULT (mode, op0, op1), SET) + result is interpreted as an unsigned coefficient. + Exclude cost of op0 from max_cost to match the cost + calculation of the synth_mult. */ + max_cost = rtx_cost (gen_rtx_MULT (mode, fake_reg, op1), SET) - neg_cost[mode]; if (max_cost > 0 && choose_mult_variant (mode, -INTVAL (op1), &algorithm, @@ -3095,7 +3066,9 @@ expand_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target, build_int_cst (NULL_TREE, floor_log2 (coeff)), target, unsignedp); - max_cost = rtx_cost (gen_rtx_MULT (mode, op0, op1), SET); + /* Exclude cost of op0 from max_cost to match the cost + calculation of the synth_mult. */ + max_cost = rtx_cost (gen_rtx_MULT (mode, fake_reg, op1), SET); if (choose_mult_variant (mode, coeff, &algorithm, &variant, max_cost)) return expand_mult_const (mode, op0, coeff, target, @@ -3112,7 +3085,7 @@ expand_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target, /* Expand x*2.0 as x+x. */ if (GET_CODE (op1) == CONST_DOUBLE - && GET_MODE_CLASS (mode) == MODE_FLOAT) + && SCALAR_FLOAT_MODE_P (mode)) { REAL_VALUE_TYPE d; REAL_VALUE_FROM_CONST_DOUBLE (d, op1); @@ -3318,6 +3291,8 @@ extract_high_half (enum machine_mode mode, rtx op) if (mode == word_mode) return gen_highpart (mode, op); + gcc_assert (!SCALAR_FLOAT_MODE_P (mode)); + wider_mode = GET_MODE_WIDER_MODE (mode); op = expand_shift (RSHIFT_EXPR, wider_mode, op, build_int_cst (NULL_TREE, GET_MODE_BITSIZE (mode)), 0, 1); @@ -3337,6 +3312,8 @@ expand_mult_highpart_optab (enum machine_mode mode, rtx op0, rtx op1, rtx tem; int size; + gcc_assert (!SCALAR_FLOAT_MODE_P (mode)); + wider_mode = GET_MODE_WIDER_MODE (mode); size = GET_MODE_BITSIZE (mode); @@ -3368,7 +3345,7 @@ expand_mult_highpart_optab (enum machine_mode mode, rtx op0, rtx op1, /* Try widening multiplication. */ moptab = unsignedp ? umul_widen_optab : smul_widen_optab; - if (moptab->handlers[wider_mode].insn_code != CODE_FOR_nothing + if (optab_handler (moptab, wider_mode)->insn_code != CODE_FOR_nothing && mul_widen_cost[wider_mode] < max_cost) { tem = expand_binop (wider_mode, moptab, op0, narrow_op1, 0, @@ -3378,7 +3355,7 @@ expand_mult_highpart_optab (enum machine_mode mode, rtx op0, rtx op1, } /* Try widening the mode and perform a non-widening multiplication. */ - if (smul_optab->handlers[wider_mode].insn_code != CODE_FOR_nothing + if (optab_handler (smul_optab, wider_mode)->insn_code != CODE_FOR_nothing && size - 1 < BITS_PER_WORD && mul_cost[wider_mode] + shift_cost[mode][size-1] < max_cost) { @@ -3405,7 +3382,7 @@ expand_mult_highpart_optab (enum machine_mode mode, rtx op0, rtx op1, /* Try widening multiplication of opposite signedness, and adjust. */ moptab = unsignedp ? smul_widen_optab : umul_widen_optab; - if (moptab->handlers[wider_mode].insn_code != CODE_FOR_nothing + if (optab_handler (moptab, wider_mode)->insn_code != CODE_FOR_nothing && size - 1 < BITS_PER_WORD && (mul_widen_cost[wider_mode] + 2 * shift_cost[mode][size-1] + 4 * add_cost[mode] < max_cost)) @@ -3447,6 +3424,7 @@ expand_mult_highpart (enum machine_mode mode, rtx op0, rtx op1, struct algorithm alg; rtx tem; + gcc_assert (!SCALAR_FLOAT_MODE_P (mode)); /* We can't support modes wider than HOST_BITS_PER_INT. */ gcc_assert (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT); @@ -3525,7 +3503,7 @@ expand_smod_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d) use a LSHIFTRT, 1 ADD, 1 SUB and an AND. */ temp = gen_rtx_LSHIFTRT (mode, result, shift); - if (lshr_optab->handlers[mode].insn_code == CODE_FOR_nothing + if (optab_handler (lshr_optab, mode)->insn_code == CODE_FOR_nothing || rtx_cost (temp, SET) > COSTS_N_INSNS (2)) { temp = expand_binop (mode, xor_optab, op0, signmask, @@ -3824,15 +3802,15 @@ expand_divmod (int rem_flag, enum tree_code code, enum machine_mode mode, for (compute_mode = mode; compute_mode != VOIDmode; compute_mode = GET_MODE_WIDER_MODE (compute_mode)) - if (optab1->handlers[compute_mode].insn_code != CODE_FOR_nothing - || optab2->handlers[compute_mode].insn_code != CODE_FOR_nothing) + if (optab_handler (optab1, compute_mode)->insn_code != CODE_FOR_nothing + || optab_handler (optab2, compute_mode)->insn_code != CODE_FOR_nothing) break; if (compute_mode == VOIDmode) for (compute_mode = mode; compute_mode != VOIDmode; compute_mode = GET_MODE_WIDER_MODE (compute_mode)) - if (optab1->handlers[compute_mode].libfunc - || optab2->handlers[compute_mode].libfunc) + if (optab_libfunc (optab1, compute_mode) + || optab_libfunc (optab2, compute_mode)) break; /* If we still couldn't find a mode, use MODE, but expand_binop will @@ -3856,11 +3834,10 @@ expand_divmod (int rem_flag, enum tree_code code, enum machine_mode mode, /* Only deduct something for a REM if the last divide done was for a different constant. Then set the constant of the last divide. */ - max_cost = div_cost[compute_mode] - - (rem_flag && ! (last_div_const != 0 && op1_is_constant - && INTVAL (op1) == last_div_const) - ? mul_cost[compute_mode] + add_cost[compute_mode] - : 0); + max_cost = unsignedp ? udiv_cost[compute_mode] : sdiv_cost[compute_mode]; + if (rem_flag && ! (last_div_const != 0 && op1_is_constant + && INTVAL (op1) == last_div_const)) + max_cost -= mul_cost[compute_mode] + add_cost[compute_mode]; last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0; @@ -4046,7 +4023,14 @@ expand_divmod (int rem_flag, enum tree_code code, enum machine_mode mode, int lgup, post_shift; rtx mlr; HOST_WIDE_INT d = INTVAL (op1); - unsigned HOST_WIDE_INT abs_d = d >= 0 ? d : -d; + unsigned HOST_WIDE_INT abs_d; + + /* Since d might be INT_MIN, we have to cast to + unsigned HOST_WIDE_INT before negating to avoid + undefined signed overflow. */ + abs_d = (d >= 0 + ? (unsigned HOST_WIDE_INT) d + : - (unsigned HOST_WIDE_INT) d); /* n rem d = n rem -d */ if (rem_flag && d < 0) @@ -4073,11 +4057,13 @@ expand_divmod (int rem_flag, enum tree_code code, enum machine_mode mode, : sdiv_pow2_cheap[compute_mode]) /* We assume that cheap metric is true if the optab has an expander for this mode. */ - && (((rem_flag ? smod_optab : sdiv_optab) - ->handlers[compute_mode].insn_code + && ((optab_handler ((rem_flag ? smod_optab + : sdiv_optab), + compute_mode)->insn_code != CODE_FOR_nothing) - || (sdivmod_optab->handlers[compute_mode] - .insn_code != CODE_FOR_nothing))) + || (optab_handler(sdivmod_optab, + compute_mode) + ->insn_code != CODE_FOR_nothing))) ; else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d)) { @@ -4089,9 +4075,9 @@ expand_divmod (int rem_flag, enum tree_code code, enum machine_mode mode, } if (sdiv_pow2_cheap[compute_mode] - && ((sdiv_optab->handlers[compute_mode].insn_code + && ((optab_handler (sdiv_optab, compute_mode)->insn_code != CODE_FOR_nothing) - || (sdivmod_optab->handlers[compute_mode].insn_code + || (optab_handler (sdivmod_optab, compute_mode)->insn_code != CODE_FOR_nothing))) quotient = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, op0, @@ -4739,7 +4725,7 @@ expand_divmod (int rem_flag, enum tree_code code, enum machine_mode mode, = sign_expand_binop (compute_mode, umod_optab, smod_optab, op0, op1, target, unsignedp, - ((optab2->handlers[compute_mode].insn_code + ((optab_handler (optab2, compute_mode)->insn_code != CODE_FOR_nothing) ? OPTAB_DIRECT : OPTAB_WIDEN)); if (remainder == 0) @@ -4767,7 +4753,7 @@ expand_divmod (int rem_flag, enum tree_code code, enum machine_mode mode, = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab, op0, op1, rem_flag ? NULL_RTX : target, unsignedp, - ((optab2->handlers[compute_mode].insn_code + ((optab_handler (optab2, compute_mode)->insn_code != CODE_FOR_nothing) ? OPTAB_DIRECT : OPTAB_WIDEN)); @@ -4873,17 +4859,17 @@ make_tree (tree type, rtx x) case CONST_VECTOR: { - int i, units; - rtx elt; + int units = CONST_VECTOR_NUNITS (x); + tree itype = TREE_TYPE (type); tree t = NULL_TREE; + int i; - units = CONST_VECTOR_NUNITS (x); /* Build a tree with vector elements. */ for (i = units - 1; i >= 0; --i) { - elt = CONST_VECTOR_ELT (x, i); - t = tree_cons (NULL_TREE, make_tree (type, elt), t); + rtx elt = CONST_VECTOR_ELT (x, i); + t = tree_cons (NULL_TREE, make_tree (itype, elt), t); } return build_vector (type, t); @@ -4909,20 +4895,20 @@ make_tree (tree type, rtx x) make_tree (type, XEXP (x, 1))); case LSHIFTRT: - t = lang_hooks.types.unsigned_type (type); + t = unsigned_type_for (type); return fold_convert (type, build2 (RSHIFT_EXPR, t, make_tree (t, XEXP (x, 0)), make_tree (type, XEXP (x, 1)))); case ASHIFTRT: - t = lang_hooks.types.signed_type (type); + t = signed_type_for (type); return fold_convert (type, build2 (RSHIFT_EXPR, t, make_tree (t, XEXP (x, 0)), make_tree (type, XEXP (x, 1)))); case DIV: if (TREE_CODE (type) != REAL_TYPE) - t = lang_hooks.types.signed_type (type); + t = signed_type_for (type); else t = type; @@ -4930,7 +4916,7 @@ make_tree (tree type, rtx x) make_tree (t, XEXP (x, 0)), make_tree (t, XEXP (x, 1)))); case UDIV: - t = lang_hooks.types.unsigned_type (type); + t = unsigned_type_for (type); return fold_convert (type, build2 (TRUNC_DIV_EXPR, t, make_tree (t, XEXP (x, 0)), make_tree (t, XEXP (x, 1)))); @@ -4941,6 +4927,15 @@ make_tree (tree type, rtx x) GET_CODE (x) == ZERO_EXTEND); return fold_convert (type, make_tree (t, XEXP (x, 0))); + case CONST: + return make_tree (type, XEXP (x, 0)); + + case SYMBOL_REF: + t = SYMBOL_REF_DECL (x); + if (t) + return fold_convert (type, build_fold_addr_expr (t)); + /* else fall through. */ + default: t = build_decl (VAR_DECL, NULL_TREE, type); @@ -4951,74 +4946,11 @@ make_tree (tree type, rtx x) /* Note that we do *not* use SET_DECL_RTL here, because we do not want set_decl_rtl to go adjusting REG_ATTRS for this temporary. */ - t->decl.rtl = x; + t->decl_with_rtl.rtl = x; return t; } } - -/* Check whether the multiplication X * MULT + ADD overflows. - X, MULT and ADD must be CONST_*. - MODE is the machine mode for the computation. - X and MULT must have mode MODE. ADD may have a different mode. - So can X (defaults to same as MODE). - UNSIGNEDP is nonzero to do unsigned multiplication. */ - -bool -const_mult_add_overflow_p (rtx x, rtx mult, rtx add, - enum machine_mode mode, int unsignedp) -{ - tree type, mult_type, add_type, result; - - type = lang_hooks.types.type_for_mode (mode, unsignedp); - - /* In order to get a proper overflow indication from an unsigned - type, we have to pretend that it's a sizetype. */ - mult_type = type; - if (unsignedp) - { - /* FIXME:It would be nice if we could step directly from this - type to its sizetype equivalent. */ - mult_type = build_distinct_type_copy (type); - TYPE_IS_SIZETYPE (mult_type) = 1; - } - - add_type = (GET_MODE (add) == VOIDmode ? mult_type - : lang_hooks.types.type_for_mode (GET_MODE (add), unsignedp)); - - result = fold_build2 (PLUS_EXPR, mult_type, - fold_build2 (MULT_EXPR, mult_type, - make_tree (mult_type, x), - make_tree (mult_type, mult)), - make_tree (add_type, add)); - - return TREE_CONSTANT_OVERFLOW (result); -} - -/* Return an rtx representing the value of X * MULT + ADD. - TARGET is a suggestion for where to store the result (an rtx). - MODE is the machine mode for the computation. - X and MULT must have mode MODE. ADD may have a different mode. - So can X (defaults to same as MODE). - UNSIGNEDP is nonzero to do unsigned multiplication. - This may emit insns. */ - -rtx -expand_mult_add (rtx x, rtx target, rtx mult, rtx add, enum machine_mode mode, - int unsignedp) -{ - tree type = lang_hooks.types.type_for_mode (mode, unsignedp); - tree add_type = (GET_MODE (add) == VOIDmode - ? type: lang_hooks.types.type_for_mode (GET_MODE (add), - unsignedp)); - tree result = fold_build2 (PLUS_EXPR, type, - fold_build2 (MULT_EXPR, type, - make_tree (type, x), - make_tree (type, mult)), - make_tree (add_type, add)); - - return expand_expr (result, target, VOIDmode, 0); -} /* Compute the logical-and of OP0 and OP1, storing it in TARGET and returning TARGET. @@ -5042,6 +4974,77 @@ expand_and (enum machine_mode mode, rtx op0, rtx op1, rtx target) return target; } +/* Helper function for emit_store_flag. */ +static rtx +emit_store_flag_1 (rtx target, rtx subtarget, enum machine_mode mode, + int normalizep) +{ + rtx op0; + enum machine_mode target_mode = GET_MODE (target); + + /* If we are converting to a wider mode, first convert to + TARGET_MODE, then normalize. This produces better combining + opportunities on machines that have a SIGN_EXTRACT when we are + testing a single bit. This mostly benefits the 68k. + + If STORE_FLAG_VALUE does not have the sign bit set when + interpreted in MODE, we can do this conversion as unsigned, which + is usually more efficient. */ + if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode)) + { + convert_move (target, subtarget, + (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT) + && 0 == (STORE_FLAG_VALUE + & ((HOST_WIDE_INT) 1 + << (GET_MODE_BITSIZE (mode) -1)))); + op0 = target; + mode = target_mode; + } + else + op0 = subtarget; + + /* If we want to keep subexpressions around, don't reuse our last + target. */ + if (optimize) + subtarget = 0; + + /* Now normalize to the proper value in MODE. Sometimes we don't + have to do anything. */ + if (normalizep == 0 || normalizep == STORE_FLAG_VALUE) + ; + /* STORE_FLAG_VALUE might be the most negative number, so write + the comparison this way to avoid a compiler-time warning. */ + else if (- normalizep == STORE_FLAG_VALUE) + op0 = expand_unop (mode, neg_optab, op0, subtarget, 0); + + /* We don't want to use STORE_FLAG_VALUE < 0 below since this makes + it hard to use a value of just the sign bit due to ANSI integer + constant typing rules. */ + else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT + && (STORE_FLAG_VALUE + & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))) + op0 = expand_shift (RSHIFT_EXPR, mode, op0, + size_int (GET_MODE_BITSIZE (mode) - 1), subtarget, + normalizep == 1); + else + { + gcc_assert (STORE_FLAG_VALUE & 1); + + op0 = expand_and (mode, op0, const1_rtx, subtarget); + if (normalizep == -1) + op0 = expand_unop (mode, neg_optab, op0, op0, 0); + } + + /* If we were converting to a smaller mode, do the conversion now. */ + if (target_mode != mode) + { + convert_move (target, op0, 0); + return target; + } + else + return op0; +} + /* Emit a store-flags instruction for comparison CODE on OP0 and OP1 and storing in TARGET. Normally return TARGET. Return 0 if that cannot be done. @@ -5131,12 +5134,14 @@ emit_store_flag (rtx target, enum rtx_code code, rtx op0, rtx op1, { rtx op00, op01, op0both; - /* Do a logical OR or AND of the two words and compare the result. */ + /* Do a logical OR or AND of the two words and compare the + result. */ op00 = simplify_gen_subreg (word_mode, op0, mode, 0); op01 = simplify_gen_subreg (word_mode, op0, mode, UNITS_PER_WORD); op0both = expand_binop (word_mode, op1 == const0_rtx ? ior_optab : and_optab, - op00, op01, NULL_RTX, unsignedp, OPTAB_DIRECT); + op00, op01, NULL_RTX, unsignedp, + OPTAB_DIRECT); if (op0both != 0) return emit_store_flag (target, code, op0both, op1, word_mode, @@ -5148,15 +5153,13 @@ emit_store_flag (rtx target, enum rtx_code code, rtx op0, rtx op1, /* If testing the sign bit, can just test on high word. */ op0h = simplify_gen_subreg (word_mode, op0, mode, - subreg_highpart_offset (word_mode, mode)); + subreg_highpart_offset (word_mode, + mode)); return emit_store_flag (target, code, op0h, op1, word_mode, unsignedp, normalizep); } } - /* From now on, we won't change CODE, so set ICODE now. */ - icode = setcc_gen_code[(int) code]; - /* If this is A < 0 or A >= 0, we can do this by taking the ones complement of A (for GE) and shifting the sign bit to the low bit. */ if (op1 == const0_rtx && (code == LT || code == GE) @@ -5164,7 +5167,8 @@ emit_store_flag (rtx target, enum rtx_code code, rtx op0, rtx op1, && (normalizep || STORE_FLAG_VALUE == 1 || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode)) - == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))))) + == ((unsigned HOST_WIDE_INT) 1 + << (GET_MODE_BITSIZE (mode) - 1)))))) { subtarget = target; @@ -5199,6 +5203,8 @@ emit_store_flag (rtx target, enum rtx_code code, rtx op0, rtx op1, return op0; } + icode = setcc_gen_code[(int) code]; + if (icode != CODE_FOR_nothing) { insn_operand_predicate_fn pred; @@ -5256,72 +5262,65 @@ emit_store_flag (rtx target, enum rtx_code code, rtx op0, rtx op1, if (pattern) { emit_insn (pattern); + return emit_store_flag_1 (target, subtarget, compare_mode, + normalizep); + } + } + else + { + /* We don't have an scc insn, so try a cstore insn. */ - /* If we are converting to a wider mode, first convert to - TARGET_MODE, then normalize. This produces better combining - opportunities on machines that have a SIGN_EXTRACT when we are - testing a single bit. This mostly benefits the 68k. + for (compare_mode = mode; compare_mode != VOIDmode; + compare_mode = GET_MODE_WIDER_MODE (compare_mode)) + { + icode = optab_handler (cstore_optab, compare_mode)->insn_code; + if (icode != CODE_FOR_nothing) + break; + } - If STORE_FLAG_VALUE does not have the sign bit set when - interpreted in COMPARE_MODE, we can do this conversion as - unsigned, which is usually more efficient. */ - if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (compare_mode)) + if (icode != CODE_FOR_nothing) + { + enum machine_mode result_mode + = insn_data[(int) icode].operand[0].mode; + rtx cstore_op0 = op0; + rtx cstore_op1 = op1; + + do_pending_stack_adjust (); + last = get_last_insn (); + + if (compare_mode != mode) { - convert_move (target, subtarget, - (GET_MODE_BITSIZE (compare_mode) - <= HOST_BITS_PER_WIDE_INT) - && 0 == (STORE_FLAG_VALUE - & ((HOST_WIDE_INT) 1 - << (GET_MODE_BITSIZE (compare_mode) -1)))); - op0 = target; - compare_mode = target_mode; + cstore_op0 = convert_modes (compare_mode, mode, cstore_op0, + unsignedp); + cstore_op1 = convert_modes (compare_mode, mode, cstore_op1, + unsignedp); } - else - op0 = subtarget; + + if (!insn_data[(int) icode].operand[2].predicate (cstore_op0, + compare_mode)) + cstore_op0 = copy_to_mode_reg (compare_mode, cstore_op0); - /* If we want to keep subexpressions around, don't reuse our - last target. */ + if (!insn_data[(int) icode].operand[3].predicate (cstore_op1, + compare_mode)) + cstore_op1 = copy_to_mode_reg (compare_mode, cstore_op1); - if (optimize) - subtarget = 0; + comparison = gen_rtx_fmt_ee (code, result_mode, cstore_op0, + cstore_op1); + subtarget = target; - /* Now normalize to the proper value in COMPARE_MODE. Sometimes - we don't have to do anything. */ - if (normalizep == 0 || normalizep == STORE_FLAG_VALUE) - ; - /* STORE_FLAG_VALUE might be the most negative number, so write - the comparison this way to avoid a compiler-time warning. */ - else if (- normalizep == STORE_FLAG_VALUE) - op0 = expand_unop (compare_mode, neg_optab, op0, subtarget, 0); - - /* We don't want to use STORE_FLAG_VALUE < 0 below since this - makes it hard to use a value of just the sign bit due to - ANSI integer constant typing rules. */ - else if (GET_MODE_BITSIZE (compare_mode) <= HOST_BITS_PER_WIDE_INT - && (STORE_FLAG_VALUE - & ((HOST_WIDE_INT) 1 - << (GET_MODE_BITSIZE (compare_mode) - 1)))) - op0 = expand_shift (RSHIFT_EXPR, compare_mode, op0, - size_int (GET_MODE_BITSIZE (compare_mode) - 1), - subtarget, normalizep == 1); - else - { - gcc_assert (STORE_FLAG_VALUE & 1); - - op0 = expand_and (compare_mode, op0, const1_rtx, subtarget); - if (normalizep == -1) - op0 = expand_unop (compare_mode, neg_optab, op0, op0, 0); - } + if (optimize || !(insn_data[(int) icode].operand[0].predicate + (subtarget, result_mode))) + subtarget = gen_reg_rtx (result_mode); - /* If we were converting to a smaller mode, do the - conversion now. */ - if (target_mode != compare_mode) + pattern = GEN_FCN (icode) (subtarget, comparison, cstore_op0, + cstore_op1); + + if (pattern) { - convert_move (target, op0, 0); - return target; + emit_insn (pattern); + return emit_store_flag_1 (target, subtarget, result_mode, + normalizep); } - else - return op0; } } @@ -5438,9 +5437,9 @@ emit_store_flag (rtx target, enum rtx_code code, rtx op0, rtx op1, that is compensated by the subsequent overflow when subtracting one / negating. */ - if (abs_optab->handlers[mode].insn_code != CODE_FOR_nothing) + if (optab_handler (abs_optab, mode)->insn_code != CODE_FOR_nothing) tem = expand_unop (mode, abs_optab, op0, subtarget, 1); - else if (ffs_optab->handlers[mode].insn_code != CODE_FOR_nothing) + else if (optab_handler (ffs_optab, mode)->insn_code != CODE_FOR_nothing) tem = expand_unop (mode, ffs_optab, op0, subtarget, 1); else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD) { @@ -5534,66 +5533,14 @@ emit_store_flag_force (rtx target, enum rtx_code code, rtx op0, rtx op1, } /* Perform possibly multi-word comparison and conditional jump to LABEL - if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE - - The algorithm is based on the code in expr.c:do_jump. - - Note that this does not perform a general comparison. Only - variants generated within expmed.c are correctly handled, others - could be handled if needed. */ + if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE. This is + now a thin wrapper around do_compare_rtx_and_jump. */ static void do_cmp_and_jump (rtx arg1, rtx arg2, enum rtx_code op, enum machine_mode mode, rtx label) { - /* If this mode is an integer too wide to compare properly, - compare word by word. Rely on cse to optimize constant cases. */ - - if (GET_MODE_CLASS (mode) == MODE_INT - && ! can_compare_p (op, mode, ccp_jump)) - { - rtx label2 = gen_label_rtx (); - - switch (op) - { - case LTU: - do_jump_by_parts_greater_rtx (mode, 1, arg2, arg1, label2, label); - break; - - case LEU: - do_jump_by_parts_greater_rtx (mode, 1, arg1, arg2, label, label2); - break; - - case LT: - do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label2, label); - break; - - case GT: - do_jump_by_parts_greater_rtx (mode, 0, arg1, arg2, label2, label); - break; - - case GE: - do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label, label2); - break; - - /* do_jump_by_parts_equality_rtx compares with zero. Luckily - that's the only equality operations we do */ - case EQ: - gcc_assert (arg2 == const0_rtx && mode == GET_MODE(arg1)); - do_jump_by_parts_equality_rtx (arg1, label2, label); - break; - - case NE: - gcc_assert (arg2 == const0_rtx && mode == GET_MODE(arg1)); - do_jump_by_parts_equality_rtx (arg1, label, label2); - break; - - default: - gcc_unreachable (); - } - - emit_label (label2); - } - else - emit_cmp_and_jump_insns (arg1, arg2, op, NULL_RTX, mode, 0, label); + int unsignedp = (op == LTU || op == LEU || op == GTU || op == GEU); + do_compare_rtx_and_jump (arg1, arg2, op, unsignedp, mode, + NULL_RTX, NULL_RTX, label); }