X-Git-Url: http://git.sourceforge.jp/view?a=blobdiff_plain;f=gcc%2Fcombine.c;h=ad9aa38987192225ddcb54138dd8d60b3a02272f;hb=ba2f8f6bb45dad549b9ba03546ff5d1f2ff4bad7;hp=afc56b1d847388cbd6cd721fa2d1958dcddd5d49;hpb=b537bfdbb09508753dd3395a645c7900fc995c84;p=pf3gnuchains%2Fgcc-fork.git diff --git a/gcc/combine.c b/gcc/combine.c index afc56b1d847..ad9aa389871 100644 --- a/gcc/combine.c +++ b/gcc/combine.c @@ -1560,7 +1560,7 @@ set_nonzero_bits_and_sign_copies (rtx x, const_rtx set, void *data) say what its contents were. */ && ! REGNO_REG_SET_P (DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), REGNO (x)) - && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT) + && HWI_COMPUTABLE_MODE_P (GET_MODE (x))) { reg_stat_type *rsp = VEC_index (reg_stat_type, reg_stat, REGNO (x)); @@ -2758,14 +2758,14 @@ try_combine (rtx i3, rtx i2, rtx i1, rtx i0, int *new_direct_jump_p, offset = INTVAL (XEXP (dest, 2)); dest = XEXP (dest, 0); if (BITS_BIG_ENDIAN) - offset = GET_MODE_BITSIZE (GET_MODE (dest)) - width - offset; + offset = GET_MODE_PRECISION (GET_MODE (dest)) - width - offset; } } else { if (GET_CODE (dest) == STRICT_LOW_PART) dest = XEXP (dest, 0); - width = GET_MODE_BITSIZE (GET_MODE (dest)); + width = GET_MODE_PRECISION (GET_MODE (dest)); offset = 0; } @@ -2775,16 +2775,16 @@ try_combine (rtx i3, rtx i2, rtx i1, rtx i0, int *new_direct_jump_p, if (subreg_lowpart_p (dest)) ; /* Handle the case where inner is twice the size of outer. */ - else if (GET_MODE_BITSIZE (GET_MODE (SET_DEST (temp))) - == 2 * GET_MODE_BITSIZE (GET_MODE (dest))) - offset += GET_MODE_BITSIZE (GET_MODE (dest)); + else if (GET_MODE_PRECISION (GET_MODE (SET_DEST (temp))) + == 2 * GET_MODE_PRECISION (GET_MODE (dest))) + offset += GET_MODE_PRECISION (GET_MODE (dest)); /* Otherwise give up for now. */ else offset = -1; } if (offset >= 0 - && (GET_MODE_BITSIZE (GET_MODE (SET_DEST (temp))) + && (GET_MODE_PRECISION (GET_MODE (SET_DEST (temp))) <= HOST_BITS_PER_DOUBLE_INT)) { double_int m, o, i; @@ -3745,8 +3745,8 @@ try_combine (rtx i3, rtx i2, rtx i1, rtx i0, int *new_direct_jump_p, (REG_P (temp) && VEC_index (reg_stat_type, reg_stat, REGNO (temp))->nonzero_bits != 0 - && GET_MODE_BITSIZE (GET_MODE (temp)) < BITS_PER_WORD - && GET_MODE_BITSIZE (GET_MODE (temp)) < HOST_BITS_PER_INT + && GET_MODE_PRECISION (GET_MODE (temp)) < BITS_PER_WORD + && GET_MODE_PRECISION (GET_MODE (temp)) < HOST_BITS_PER_INT && (VEC_index (reg_stat_type, reg_stat, REGNO (temp))->nonzero_bits != GET_MODE_MASK (word_mode)))) @@ -3755,8 +3755,8 @@ try_combine (rtx i3, rtx i2, rtx i1, rtx i0, int *new_direct_jump_p, (REG_P (temp) && VEC_index (reg_stat_type, reg_stat, REGNO (temp))->nonzero_bits != 0 - && GET_MODE_BITSIZE (GET_MODE (temp)) < BITS_PER_WORD - && GET_MODE_BITSIZE (GET_MODE (temp)) < HOST_BITS_PER_INT + && GET_MODE_PRECISION (GET_MODE (temp)) < BITS_PER_WORD + && GET_MODE_PRECISION (GET_MODE (temp)) < HOST_BITS_PER_INT && (VEC_index (reg_stat_type, reg_stat, REGNO (temp))->nonzero_bits != GET_MODE_MASK (word_mode))))) @@ -4679,14 +4679,13 @@ find_split_point (rtx *loc, rtx insn, bool set_src) /* See if this is a bitfield assignment with everything constant. If so, this is an IOR of an AND, so split it into that. */ if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT - && (GET_MODE_BITSIZE (GET_MODE (XEXP (SET_DEST (x), 0))) - <= HOST_BITS_PER_WIDE_INT) + && HWI_COMPUTABLE_MODE_P (GET_MODE (XEXP (SET_DEST (x), 0))) && CONST_INT_P (XEXP (SET_DEST (x), 1)) && CONST_INT_P (XEXP (SET_DEST (x), 2)) && CONST_INT_P (SET_SRC (x)) && ((INTVAL (XEXP (SET_DEST (x), 1)) + INTVAL (XEXP (SET_DEST (x), 2))) - <= GET_MODE_BITSIZE (GET_MODE (XEXP (SET_DEST (x), 0)))) + <= GET_MODE_PRECISION (GET_MODE (XEXP (SET_DEST (x), 0)))) && ! side_effects_p (XEXP (SET_DEST (x), 0))) { HOST_WIDE_INT pos = INTVAL (XEXP (SET_DEST (x), 2)); @@ -4699,7 +4698,7 @@ find_split_point (rtx *loc, rtx insn, bool set_src) rtx or_mask; if (BITS_BIG_ENDIAN) - pos = GET_MODE_BITSIZE (mode) - len - pos; + pos = GET_MODE_PRECISION (mode) - len - pos; or_mask = gen_int_mode (src << pos, mode); if (src == mask) @@ -4792,7 +4791,7 @@ find_split_point (rtx *loc, rtx insn, bool set_src) break; pos = 0; - len = GET_MODE_BITSIZE (GET_MODE (inner)); + len = GET_MODE_PRECISION (GET_MODE (inner)); unsignedp = 0; break; @@ -4806,7 +4805,7 @@ find_split_point (rtx *loc, rtx insn, bool set_src) pos = INTVAL (XEXP (SET_SRC (x), 2)); if (BITS_BIG_ENDIAN) - pos = GET_MODE_BITSIZE (GET_MODE (inner)) - len - pos; + pos = GET_MODE_PRECISION (GET_MODE (inner)) - len - pos; unsignedp = (code == ZERO_EXTRACT); } break; @@ -4815,7 +4814,8 @@ find_split_point (rtx *loc, rtx insn, bool set_src) break; } - if (len && pos >= 0 && pos + len <= GET_MODE_BITSIZE (GET_MODE (inner))) + if (len && pos >= 0 + && pos + len <= GET_MODE_PRECISION (GET_MODE (inner))) { enum machine_mode mode = GET_MODE (SET_SRC (x)); @@ -4846,9 +4846,9 @@ find_split_point (rtx *loc, rtx insn, bool set_src) (unsignedp ? LSHIFTRT : ASHIFTRT, mode, gen_rtx_ASHIFT (mode, gen_lowpart (mode, inner), - GEN_INT (GET_MODE_BITSIZE (mode) + GEN_INT (GET_MODE_PRECISION (mode) - len - pos)), - GEN_INT (GET_MODE_BITSIZE (mode) - len))); + GEN_INT (GET_MODE_PRECISION (mode) - len))); split = find_split_point (&SET_SRC (x), insn, true); if (split && split != &SET_SRC (x)) @@ -5545,7 +5545,7 @@ combine_simplify_rtx (rtx x, enum machine_mode op0_mode, int in_dest, if (GET_CODE (temp) == ASHIFTRT && CONST_INT_P (XEXP (temp, 1)) - && INTVAL (XEXP (temp, 1)) == GET_MODE_BITSIZE (mode) - 1) + && INTVAL (XEXP (temp, 1)) == GET_MODE_PRECISION (mode) - 1) return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (temp, 0), INTVAL (XEXP (temp, 1))); @@ -5564,8 +5564,8 @@ combine_simplify_rtx (rtx x, enum machine_mode op0_mode, int in_dest, rtx temp1 = simplify_shift_const (NULL_RTX, ASHIFTRT, mode, simplify_shift_const (NULL_RTX, ASHIFT, mode, temp, - GET_MODE_BITSIZE (mode) - 1 - i), - GET_MODE_BITSIZE (mode) - 1 - i); + GET_MODE_PRECISION (mode) - 1 - i), + GET_MODE_PRECISION (mode) - 1 - i); /* If all we did was surround TEMP with the two shifts, we haven't improved anything, so don't use it. Otherwise, @@ -5584,7 +5584,7 @@ combine_simplify_rtx (rtx x, enum machine_mode op0_mode, int in_dest, if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT) break; - if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT) + if (HWI_COMPUTABLE_MODE_P (mode)) SUBST (XEXP (x, 0), force_to_mode (XEXP (x, 0), GET_MODE (XEXP (x, 0)), GET_MODE_MASK (mode), 0)); @@ -5596,7 +5596,7 @@ combine_simplify_rtx (rtx x, enum machine_mode op0_mode, int in_dest, /* Similarly to what we do in simplify-rtx.c, a truncate of a register whose value is a comparison can be replaced with a subreg if STORE_FLAG_VALUE permits. */ - if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT + if (HWI_COMPUTABLE_MODE_P (mode) && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0 && (temp = get_last_value (XEXP (x, 0))) && COMPARISON_P (temp)) @@ -5634,20 +5634,20 @@ combine_simplify_rtx (rtx x, enum machine_mode op0_mode, int in_dest, && INTVAL (XEXP (x, 1)) == -INTVAL (XEXP (XEXP (x, 0), 1)) && ((i = exact_log2 (UINTVAL (XEXP (XEXP (x, 0), 1)))) >= 0 || (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0) - && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT + && HWI_COMPUTABLE_MODE_P (mode) && ((GET_CODE (XEXP (XEXP (x, 0), 0)) == AND && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1)) && (UINTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)) == ((unsigned HOST_WIDE_INT) 1 << (i + 1)) - 1)) || (GET_CODE (XEXP (XEXP (x, 0), 0)) == ZERO_EXTEND - && (GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0))) + && (GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0))) == (unsigned int) i + 1)))) return simplify_shift_const (NULL_RTX, ASHIFTRT, mode, simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (XEXP (XEXP (x, 0), 0), 0), - GET_MODE_BITSIZE (mode) - (i + 1)), - GET_MODE_BITSIZE (mode) - (i + 1)); + GET_MODE_PRECISION (mode) - (i + 1)), + GET_MODE_PRECISION (mode) - (i + 1)); /* If only the low-order bit of X is possibly nonzero, (plus x -1) can become (ashiftrt (ashift (xor x 1) C) C) where C is @@ -5661,15 +5661,15 @@ combine_simplify_rtx (rtx x, enum machine_mode op0_mode, int in_dest, return simplify_shift_const (NULL_RTX, ASHIFTRT, mode, simplify_shift_const (NULL_RTX, ASHIFT, mode, gen_rtx_XOR (mode, XEXP (x, 0), const1_rtx), - GET_MODE_BITSIZE (mode) - 1), - GET_MODE_BITSIZE (mode) - 1); + GET_MODE_PRECISION (mode) - 1), + GET_MODE_PRECISION (mode) - 1); /* If we are adding two things that have no bits in common, convert the addition into an IOR. This will often be further simplified, for example in cases like ((a & 1) + (a & 2)), which can become a & 3. */ - if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT + if (HWI_COMPUTABLE_MODE_P (mode) && (nonzero_bits (XEXP (x, 0), mode) & nonzero_bits (XEXP (x, 1), mode)) == 0) { @@ -5794,7 +5794,7 @@ combine_simplify_rtx (rtx x, enum machine_mode op0_mode, int in_dest, && op1 == const0_rtx && mode == GET_MODE (op0) && (num_sign_bit_copies (op0, mode) - == GET_MODE_BITSIZE (mode))) + == GET_MODE_PRECISION (mode))) { op0 = expand_compound_operation (op0); return simplify_gen_unary (NEG, mode, @@ -5819,7 +5819,7 @@ combine_simplify_rtx (rtx x, enum machine_mode op0_mode, int in_dest, && op1 == const0_rtx && mode == GET_MODE (op0) && (num_sign_bit_copies (op0, mode) - == GET_MODE_BITSIZE (mode))) + == GET_MODE_PRECISION (mode))) { op0 = expand_compound_operation (op0); return plus_constant (gen_lowpart (mode, op0), 1); @@ -5834,7 +5834,7 @@ combine_simplify_rtx (rtx x, enum machine_mode op0_mode, int in_dest, && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT && op1 == const0_rtx && (num_sign_bit_copies (op0, mode) - == GET_MODE_BITSIZE (mode))) + == GET_MODE_PRECISION (mode))) return gen_lowpart (mode, expand_compound_operation (op0)); @@ -5855,7 +5855,7 @@ combine_simplify_rtx (rtx x, enum machine_mode op0_mode, int in_dest, && op1 == const0_rtx && mode == GET_MODE (op0) && (num_sign_bit_copies (op0, mode) - == GET_MODE_BITSIZE (mode))) + == GET_MODE_PRECISION (mode))) { op0 = expand_compound_operation (op0); return simplify_gen_unary (NOT, mode, @@ -5880,7 +5880,7 @@ combine_simplify_rtx (rtx x, enum machine_mode op0_mode, int in_dest, AND with STORE_FLAG_VALUE when we are done, since we are only going to test the sign bit. */ if (new_code == NE && GET_MODE_CLASS (mode) == MODE_INT - && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT + && HWI_COMPUTABLE_MODE_P (mode) && val_signbit_p (mode, STORE_FLAG_VALUE) && op1 == const0_rtx && mode == GET_MODE (op0) @@ -5888,7 +5888,7 @@ combine_simplify_rtx (rtx x, enum machine_mode op0_mode, int in_dest, { x = simplify_shift_const (NULL_RTX, ASHIFT, mode, expand_compound_operation (op0), - GET_MODE_BITSIZE (mode) - 1 - i); + GET_MODE_PRECISION (mode) - 1 - i); if (GET_CODE (x) == AND && XEXP (x, 1) == const_true_rtx) return XEXP (x, 0); else @@ -5939,7 +5939,9 @@ combine_simplify_rtx (rtx x, enum machine_mode op0_mode, int in_dest, else if (SHIFT_COUNT_TRUNCATED && !REG_P (XEXP (x, 1))) SUBST (XEXP (x, 1), force_to_mode (XEXP (x, 1), GET_MODE (XEXP (x, 1)), - targetm.shift_truncation_mask (GET_MODE (x)), + ((unsigned HOST_WIDE_INT) 1 + << exact_log2 (GET_MODE_BITSIZE (GET_MODE (x)))) + - 1, 0)); break; @@ -6012,7 +6014,7 @@ simplify_if_then_else (rtx x) } else if (true_code == EQ && true_val == const0_rtx && (num_sign_bit_copies (from, GET_MODE (from)) - == GET_MODE_BITSIZE (GET_MODE (from)))) + == GET_MODE_PRECISION (GET_MODE (from)))) { false_code = EQ; false_val = constm1_rtx; @@ -6182,8 +6184,8 @@ simplify_if_then_else (rtx x) && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f) && (num_sign_bit_copies (f, GET_MODE (f)) > (unsigned int) - (GET_MODE_BITSIZE (mode) - - GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (t, 0), 0)))))) + (GET_MODE_PRECISION (mode) + - GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (t, 0), 0)))))) { c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0)); extend_op = SIGN_EXTEND; @@ -6198,8 +6200,8 @@ simplify_if_then_else (rtx x) && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f) && (num_sign_bit_copies (f, GET_MODE (f)) > (unsigned int) - (GET_MODE_BITSIZE (mode) - - GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (t, 0), 1)))))) + (GET_MODE_PRECISION (mode) + - GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (t, 0), 1)))))) { c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0)); extend_op = SIGN_EXTEND; @@ -6214,7 +6216,7 @@ simplify_if_then_else (rtx x) || GET_CODE (XEXP (t, 0)) == LSHIFTRT || GET_CODE (XEXP (t, 0)) == ASHIFTRT) && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG - && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT + && HWI_COMPUTABLE_MODE_P (mode) && subreg_lowpart_p (XEXP (XEXP (t, 0), 0)) && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f) && ((nonzero_bits (f, GET_MODE (f)) @@ -6230,7 +6232,7 @@ simplify_if_then_else (rtx x) || GET_CODE (XEXP (t, 0)) == IOR || GET_CODE (XEXP (t, 0)) == XOR) && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG - && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT + && HWI_COMPUTABLE_MODE_P (mode) && subreg_lowpart_p (XEXP (XEXP (t, 0), 1)) && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f) && ((nonzero_bits (f, GET_MODE (f)) @@ -6270,7 +6272,7 @@ simplify_if_then_else (rtx x) && ((1 == nonzero_bits (XEXP (cond, 0), mode) && (i = exact_log2 (UINTVAL (true_rtx))) >= 0) || ((num_sign_bit_copies (XEXP (cond, 0), mode) - == GET_MODE_BITSIZE (mode)) + == GET_MODE_PRECISION (mode)) && (i = exact_log2 (-UINTVAL (true_rtx))) >= 0))) return simplify_shift_const (NULL_RTX, ASHIFT, mode, @@ -6301,15 +6303,14 @@ simplify_set (rtx x) rtx *cc_use; /* (set (pc) (return)) gets written as (return). */ - if (GET_CODE (dest) == PC && GET_CODE (src) == RETURN) + if (GET_CODE (dest) == PC && ANY_RETURN_P (src)) return src; /* Now that we know for sure which bits of SRC we are using, see if we can simplify the expression for the object knowing that we only need the low-order bits. */ - if (GET_MODE_CLASS (mode) == MODE_INT - && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT) + if (GET_MODE_CLASS (mode) == MODE_INT && HWI_COMPUTABLE_MODE_P (mode)) { src = force_to_mode (src, mode, ~(unsigned HOST_WIDE_INT) 0, 0); SUBST (SET_SRC (x), src); @@ -6444,7 +6445,7 @@ simplify_set (rtx x) if (((old_code == NE && new_code == EQ) || (old_code == EQ && new_code == NE)) && ! other_changed_previously && op1 == const0_rtx - && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT + && HWI_COMPUTABLE_MODE_P (GET_MODE (op0)) && exact_log2 (mask = nonzero_bits (op0, GET_MODE (op0))) >= 0) { rtx pat = PATTERN (other_insn), note = 0; @@ -6537,8 +6538,8 @@ simplify_set (rtx x) if (dest == cc0_rtx && GET_CODE (src) == SUBREG && subreg_lowpart_p (src) - && (GET_MODE_BITSIZE (GET_MODE (src)) - < GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (src))))) + && (GET_MODE_PRECISION (GET_MODE (src)) + < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (src))))) { rtx inner = SUBREG_REG (src); enum machine_mode inner_mode = GET_MODE (inner); @@ -6590,7 +6591,7 @@ simplify_set (rtx x) #endif && (num_sign_bit_copies (XEXP (XEXP (src, 0), 0), GET_MODE (XEXP (XEXP (src, 0), 0))) - == GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (src, 0), 0)))) + == GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (src, 0), 0)))) && ! side_effects_p (src)) { rtx true_rtx = (GET_CODE (XEXP (src, 0)) == NE @@ -6657,7 +6658,7 @@ simplify_logical (rtx x) any (sign) bits when converting INTVAL (op1) to "unsigned HOST_WIDE_INT". */ if (CONST_INT_P (op1) - && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT + && (HWI_COMPUTABLE_MODE_P (mode) || INTVAL (op1) > 0)) { x = simplify_and_const_int (x, mode, op0, INTVAL (op1)); @@ -6766,7 +6767,7 @@ expand_compound_operation (rtx x) if (! SCALAR_INT_MODE_P (GET_MODE (XEXP (x, 0)))) return x; - len = GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))); + len = GET_MODE_PRECISION (GET_MODE (XEXP (x, 0))); /* If the inner object has VOIDmode (the only way this can happen is if it is an ASM_OPERANDS), we can't do anything since we don't know how much masking to do. */ @@ -6800,11 +6801,11 @@ expand_compound_operation (rtx x) pos = INTVAL (XEXP (x, 2)); /* This should stay within the object being extracted, fail otherwise. */ - if (len + pos > GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))) + if (len + pos > GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))) return x; if (BITS_BIG_ENDIAN) - pos = GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))) - len - pos; + pos = GET_MODE_PRECISION (GET_MODE (XEXP (x, 0))) - len - pos; break; @@ -6815,7 +6816,7 @@ expand_compound_operation (rtx x) bit is not set, as this is easier to optimize. It will be converted back to cheaper alternative in make_extraction. */ if (GET_CODE (x) == SIGN_EXTEND - && (GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT + && (HWI_COMPUTABLE_MODE_P (GET_MODE (x)) && ((nonzero_bits (XEXP (x, 0), GET_MODE (XEXP (x, 0))) & ~(((unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) @@ -6826,11 +6827,11 @@ expand_compound_operation (rtx x) rtx temp2 = expand_compound_operation (temp); /* Make sure this is a profitable operation. */ - if (rtx_cost (x, SET, optimize_this_for_speed_p) - > rtx_cost (temp2, SET, optimize_this_for_speed_p)) + if (set_src_cost (x, optimize_this_for_speed_p) + > set_src_cost (temp2, optimize_this_for_speed_p)) return temp2; - else if (rtx_cost (x, SET, optimize_this_for_speed_p) - > rtx_cost (temp, SET, optimize_this_for_speed_p)) + else if (set_src_cost (x, optimize_this_for_speed_p) + > set_src_cost (temp, optimize_this_for_speed_p)) return temp; else return x; @@ -6844,7 +6845,7 @@ expand_compound_operation (rtx x) set. */ if (GET_CODE (XEXP (x, 0)) == TRUNCATE && GET_MODE (XEXP (XEXP (x, 0), 0)) == GET_MODE (x) - && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT + && HWI_COMPUTABLE_MODE_P (GET_MODE (x)) && (nonzero_bits (XEXP (XEXP (x, 0), 0), GET_MODE (x)) & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0) return XEXP (XEXP (x, 0), 0); @@ -6853,7 +6854,7 @@ expand_compound_operation (rtx x) if (GET_CODE (XEXP (x, 0)) == SUBREG && GET_MODE (SUBREG_REG (XEXP (x, 0))) == GET_MODE (x) && subreg_lowpart_p (XEXP (x, 0)) - && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT + && HWI_COMPUTABLE_MODE_P (GET_MODE (x)) && (nonzero_bits (SUBREG_REG (XEXP (x, 0)), GET_MODE (x)) & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0) return SUBREG_REG (XEXP (x, 0)); @@ -6865,7 +6866,7 @@ expand_compound_operation (rtx x) if (GET_CODE (XEXP (x, 0)) == TRUNCATE && GET_MODE (XEXP (XEXP (x, 0), 0)) == GET_MODE (x) && COMPARISON_P (XEXP (XEXP (x, 0), 0)) - && (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))) + && (GET_MODE_PRECISION (GET_MODE (XEXP (x, 0))) <= HOST_BITS_PER_WIDE_INT) && (STORE_FLAG_VALUE & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0) return XEXP (XEXP (x, 0), 0); @@ -6875,7 +6876,7 @@ expand_compound_operation (rtx x) && GET_MODE (SUBREG_REG (XEXP (x, 0))) == GET_MODE (x) && subreg_lowpart_p (XEXP (x, 0)) && COMPARISON_P (SUBREG_REG (XEXP (x, 0))) - && (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))) + && (GET_MODE_PRECISION (GET_MODE (XEXP (x, 0))) <= HOST_BITS_PER_WIDE_INT) && (STORE_FLAG_VALUE & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0) return SUBREG_REG (XEXP (x, 0)); @@ -6897,7 +6898,7 @@ expand_compound_operation (rtx x) extraction. Then the constant of 31 would be substituted in to produce such a position. */ - modewidth = GET_MODE_BITSIZE (GET_MODE (x)); + modewidth = GET_MODE_PRECISION (GET_MODE (x)); if (modewidth >= pos + len) { enum machine_mode mode = GET_MODE (x); @@ -6951,7 +6952,7 @@ expand_field_assignment (const_rtx x) && GET_CODE (XEXP (SET_DEST (x), 0)) == SUBREG) { inner = SUBREG_REG (XEXP (SET_DEST (x), 0)); - len = GET_MODE_BITSIZE (GET_MODE (XEXP (SET_DEST (x), 0))); + len = GET_MODE_PRECISION (GET_MODE (XEXP (SET_DEST (x), 0))); pos = GEN_INT (subreg_lsb (XEXP (SET_DEST (x), 0))); } else if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT @@ -6963,23 +6964,23 @@ expand_field_assignment (const_rtx x) /* A constant position should stay within the width of INNER. */ if (CONST_INT_P (pos) - && INTVAL (pos) + len > GET_MODE_BITSIZE (GET_MODE (inner))) + && INTVAL (pos) + len > GET_MODE_PRECISION (GET_MODE (inner))) break; if (BITS_BIG_ENDIAN) { if (CONST_INT_P (pos)) - pos = GEN_INT (GET_MODE_BITSIZE (GET_MODE (inner)) - len + pos = GEN_INT (GET_MODE_PRECISION (GET_MODE (inner)) - len - INTVAL (pos)); else if (GET_CODE (pos) == MINUS && CONST_INT_P (XEXP (pos, 1)) && (INTVAL (XEXP (pos, 1)) - == GET_MODE_BITSIZE (GET_MODE (inner)) - len)) + == GET_MODE_PRECISION (GET_MODE (inner)) - len)) /* If position is ADJUST - X, new position is X. */ pos = XEXP (pos, 0); else pos = simplify_gen_binary (MINUS, GET_MODE (pos), - GEN_INT (GET_MODE_BITSIZE ( + GEN_INT (GET_MODE_PRECISION ( GET_MODE (inner)) - len), pos); @@ -7154,7 +7155,7 @@ make_extraction (enum machine_mode mode, rtx inner, HOST_WIDE_INT pos, : BITS_PER_UNIT)) == 0 /* We can't do this if we are widening INNER_MODE (it may not be aligned, for one thing). */ - && GET_MODE_BITSIZE (inner_mode) >= GET_MODE_BITSIZE (tmode) + && GET_MODE_PRECISION (inner_mode) >= GET_MODE_PRECISION (tmode) && (inner_mode == tmode || (! mode_dependent_address_p (XEXP (inner, 0)) && ! MEM_VOLATILE_P (inner)))))) @@ -7172,7 +7173,7 @@ make_extraction (enum machine_mode mode, rtx inner, HOST_WIDE_INT pos, /* POS counts from lsb, but make OFFSET count in memory order. */ if (BYTES_BIG_ENDIAN) - offset = (GET_MODE_BITSIZE (is_mode) - len - pos) / BITS_PER_UNIT; + offset = (GET_MODE_PRECISION (is_mode) - len - pos) / BITS_PER_UNIT; else offset = pos / BITS_PER_UNIT; @@ -7242,11 +7243,9 @@ make_extraction (enum machine_mode mode, rtx inner, HOST_WIDE_INT pos, bit is not set, convert the extraction to the cheaper of sign and zero extension, that are equivalent in these cases. */ if (flag_expensive_optimizations - && (GET_MODE_BITSIZE (tmode) <= HOST_BITS_PER_WIDE_INT + && (HWI_COMPUTABLE_MODE_P (tmode) && ((nonzero_bits (new_rtx, tmode) - & ~(((unsigned HOST_WIDE_INT) - GET_MODE_MASK (tmode)) - >> 1)) + & ~(((unsigned HOST_WIDE_INT)GET_MODE_MASK (tmode)) >> 1)) == 0))) { rtx temp = gen_rtx_ZERO_EXTEND (mode, new_rtx); @@ -7254,8 +7253,8 @@ make_extraction (enum machine_mode mode, rtx inner, HOST_WIDE_INT pos, /* Prefer ZERO_EXTENSION, since it gives more information to backends. */ - if (rtx_cost (temp, SET, optimize_this_for_speed_p) - <= rtx_cost (temp1, SET, optimize_this_for_speed_p)) + if (set_src_cost (temp, optimize_this_for_speed_p) + <= set_src_cost (temp1, optimize_this_for_speed_p)) return temp; return temp1; } @@ -7279,7 +7278,7 @@ make_extraction (enum machine_mode mode, rtx inner, HOST_WIDE_INT pos, other cases, we would only be going outside our object in cases when an original shift would have been undefined. */ if (MEM_P (inner) - && ((pos_rtx == 0 && pos + len > GET_MODE_BITSIZE (is_mode)) + && ((pos_rtx == 0 && pos + len > GET_MODE_PRECISION (is_mode)) || (pos_rtx != 0 && len != 1))) return 0; @@ -7445,7 +7444,7 @@ make_extraction (enum machine_mode mode, rtx inner, HOST_WIDE_INT pos, SIGN_EXTENSION or ZERO_EXTENSION, that are equivalent in these cases. */ if (flag_expensive_optimizations - && (GET_MODE_BITSIZE (GET_MODE (pos_rtx)) <= HOST_BITS_PER_WIDE_INT + && (HWI_COMPUTABLE_MODE_P (GET_MODE (pos_rtx)) && ((nonzero_bits (pos_rtx, GET_MODE (pos_rtx)) & ~(((unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (pos_rtx))) @@ -7456,8 +7455,8 @@ make_extraction (enum machine_mode mode, rtx inner, HOST_WIDE_INT pos, /* Prefer ZERO_EXTENSION, since it gives more information to backends. */ - if (rtx_cost (temp1, SET, optimize_this_for_speed_p) - < rtx_cost (temp, SET, optimize_this_for_speed_p)) + if (set_src_cost (temp1, optimize_this_for_speed_p) + < set_src_cost (temp, optimize_this_for_speed_p)) temp = temp1; } pos_rtx = temp; @@ -7554,7 +7553,7 @@ make_compound_operation (rtx x, enum rtx_code in_code) { enum rtx_code code = GET_CODE (x); enum machine_mode mode = GET_MODE (x); - int mode_width = GET_MODE_BITSIZE (mode); + int mode_width = GET_MODE_PRECISION (mode); rtx rhs, lhs; enum rtx_code next_code; int i, j; @@ -7713,7 +7712,7 @@ make_compound_operation (rtx x, enum rtx_code in_code) { new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code); new_rtx = make_extraction (mode, new_rtx, - (GET_MODE_BITSIZE (mode) + (GET_MODE_PRECISION (mode) - INTVAL (XEXP (XEXP (x, 0), 1))), NULL_RTX, i, 1, 0, in_code == COMPARE); } @@ -7788,6 +7787,7 @@ make_compound_operation (rtx x, enum rtx_code in_code) && GET_CODE (lhs) == ASHIFT && CONST_INT_P (XEXP (lhs, 1)) && INTVAL (rhs) >= INTVAL (XEXP (lhs, 1)) + && INTVAL (XEXP (lhs, 1)) >= 0 && INTVAL (rhs) < mode_width) { new_rtx = make_compound_operation (XEXP (lhs, 0), next_code); @@ -8104,7 +8104,7 @@ force_to_mode (rtx x, enum machine_mode mode, unsigned HOST_WIDE_INT mask, /* It is not valid to do a right-shift in a narrower mode than the one it came in with. */ if ((code == LSHIFTRT || code == ASHIFTRT) - && GET_MODE_BITSIZE (mode) < GET_MODE_BITSIZE (GET_MODE (x))) + && GET_MODE_PRECISION (mode) < GET_MODE_PRECISION (GET_MODE (x))) op_mode = GET_MODE (x); /* Truncate MASK to fit OP_MODE. */ @@ -8207,12 +8207,12 @@ force_to_mode (rtx x, enum machine_mode mode, unsigned HOST_WIDE_INT mask, if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1)) && GET_MODE_MASK (GET_MODE (x)) != mask - && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT) + && HWI_COMPUTABLE_MODE_P (GET_MODE (x))) { unsigned HOST_WIDE_INT cval = UINTVAL (XEXP (x, 1)) | (GET_MODE_MASK (GET_MODE (x)) & ~mask); - int width = GET_MODE_BITSIZE (GET_MODE (x)); + int width = GET_MODE_PRECISION (GET_MODE (x)); rtx y; /* If MODE is narrower than HOST_WIDE_INT and CVAL is a negative @@ -8223,8 +8223,8 @@ force_to_mode (rtx x, enum machine_mode mode, unsigned HOST_WIDE_INT mask, y = simplify_gen_binary (AND, GET_MODE (x), XEXP (x, 0), GEN_INT (cval)); - if (rtx_cost (y, SET, optimize_this_for_speed_p) - < rtx_cost (x, SET, optimize_this_for_speed_p)) + if (set_src_cost (y, optimize_this_for_speed_p) + < set_src_cost (x, optimize_this_for_speed_p)) x = y; } @@ -8240,7 +8240,7 @@ force_to_mode (rtx x, enum machine_mode mode, unsigned HOST_WIDE_INT mask, This may eliminate that PLUS and, later, the AND. */ { - unsigned int width = GET_MODE_BITSIZE (mode); + unsigned int width = GET_MODE_PRECISION (mode); unsigned HOST_WIDE_INT smask = mask; /* If MODE is narrower than HOST_WIDE_INT and mask is a negative @@ -8308,7 +8308,7 @@ force_to_mode (rtx x, enum machine_mode mode, unsigned HOST_WIDE_INT mask, && CONST_INT_P (XEXP (x, 1)) && ((INTVAL (XEXP (XEXP (x, 0), 1)) + floor_log2 (INTVAL (XEXP (x, 1)))) - < GET_MODE_BITSIZE (GET_MODE (x))) + < GET_MODE_PRECISION (GET_MODE (x))) && (UINTVAL (XEXP (x, 1)) & ~nonzero_bits (XEXP (x, 0), GET_MODE (x))) == 0) { @@ -8353,10 +8353,10 @@ force_to_mode (rtx x, enum machine_mode mode, unsigned HOST_WIDE_INT mask, if (! (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) >= 0 - && INTVAL (XEXP (x, 1)) < GET_MODE_BITSIZE (mode)) + && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (mode)) && ! (GET_MODE (XEXP (x, 1)) != VOIDmode && (nonzero_bits (XEXP (x, 1), GET_MODE (XEXP (x, 1))) - < (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode)))) + < (unsigned HOST_WIDE_INT) GET_MODE_PRECISION (mode)))) break; /* If the shift count is a constant and we can do arithmetic in @@ -8364,8 +8364,8 @@ force_to_mode (rtx x, enum machine_mode mode, unsigned HOST_WIDE_INT mask, conservative form of the mask. */ if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) >= 0 - && INTVAL (XEXP (x, 1)) < GET_MODE_BITSIZE (op_mode) - && GET_MODE_BITSIZE (op_mode) <= HOST_BITS_PER_WIDE_INT) + && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (op_mode) + && HWI_COMPUTABLE_MODE_P (op_mode)) mask >>= INTVAL (XEXP (x, 1)); else mask = fuller_mask; @@ -8385,7 +8385,7 @@ force_to_mode (rtx x, enum machine_mode mode, unsigned HOST_WIDE_INT mask, if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT - && GET_MODE_BITSIZE (op_mode) <= HOST_BITS_PER_WIDE_INT) + && HWI_COMPUTABLE_MODE_P (op_mode)) { rtx inner = XEXP (x, 0); unsigned HOST_WIDE_INT inner_mask; @@ -8415,17 +8415,17 @@ force_to_mode (rtx x, enum machine_mode mode, unsigned HOST_WIDE_INT mask, bit. */ && ((INTVAL (XEXP (x, 1)) + num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0)))) - >= GET_MODE_BITSIZE (GET_MODE (x))) + >= GET_MODE_PRECISION (GET_MODE (x))) && exact_log2 (mask + 1) >= 0 /* Number of bits left after the shift must be more than the mask needs. */ && ((INTVAL (XEXP (x, 1)) + exact_log2 (mask + 1)) - <= GET_MODE_BITSIZE (GET_MODE (x))) + <= GET_MODE_PRECISION (GET_MODE (x))) /* Must be more sign bit copies than the mask needs. */ && ((int) num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0))) >= exact_log2 (mask + 1))) x = simplify_gen_binary (LSHIFTRT, GET_MODE (x), XEXP (x, 0), - GEN_INT (GET_MODE_BITSIZE (GET_MODE (x)) + GEN_INT (GET_MODE_PRECISION (GET_MODE (x)) - exact_log2 (mask + 1))); goto shiftrt; @@ -8452,20 +8452,20 @@ force_to_mode (rtx x, enum machine_mode mode, unsigned HOST_WIDE_INT mask, represent a mask for all its bits in a single scalar. But we only care about the lower bits, so calculate these. */ - if (GET_MODE_BITSIZE (GET_MODE (x)) > HOST_BITS_PER_WIDE_INT) + if (GET_MODE_PRECISION (GET_MODE (x)) > HOST_BITS_PER_WIDE_INT) { nonzero = ~(unsigned HOST_WIDE_INT) 0; - /* GET_MODE_BITSIZE (GET_MODE (x)) - INTVAL (XEXP (x, 1)) + /* GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1)) is the number of bits a full-width mask would have set. We need only shift if these are fewer than nonzero can hold. If not, we must keep all bits set in nonzero. */ - if (GET_MODE_BITSIZE (GET_MODE (x)) - INTVAL (XEXP (x, 1)) + if (GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT) nonzero >>= INTVAL (XEXP (x, 1)) + HOST_BITS_PER_WIDE_INT - - GET_MODE_BITSIZE (GET_MODE (x)) ; + - GET_MODE_PRECISION (GET_MODE (x)) ; } else { @@ -8485,7 +8485,7 @@ force_to_mode (rtx x, enum machine_mode mode, unsigned HOST_WIDE_INT mask, { x = simplify_shift_const (NULL_RTX, LSHIFTRT, GET_MODE (x), XEXP (x, 0), - GET_MODE_BITSIZE (GET_MODE (x)) - 1 - i); + GET_MODE_PRECISION (GET_MODE (x)) - 1 - i); if (GET_CODE (x) != ASHIFTRT) return force_to_mode (x, mode, mask, next_select); @@ -8508,7 +8508,7 @@ force_to_mode (rtx x, enum machine_mode mode, unsigned HOST_WIDE_INT mask, && CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) >= 0 && (INTVAL (XEXP (x, 1)) - <= GET_MODE_BITSIZE (GET_MODE (x)) - (floor_log2 (mask) + 1)) + <= GET_MODE_PRECISION (GET_MODE (x)) - (floor_log2 (mask) + 1)) && GET_CODE (XEXP (x, 0)) == ASHIFT && XEXP (XEXP (x, 0), 1) == XEXP (x, 1)) return force_to_mode (XEXP (XEXP (x, 0), 0), mode, mask, @@ -8556,7 +8556,7 @@ force_to_mode (rtx x, enum machine_mode mode, unsigned HOST_WIDE_INT mask, && CONST_INT_P (XEXP (XEXP (x, 0), 1)) && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0 && (INTVAL (XEXP (XEXP (x, 0), 1)) + floor_log2 (mask) - < GET_MODE_BITSIZE (GET_MODE (x))) + < GET_MODE_PRECISION (GET_MODE (x))) && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT) { temp = gen_int_mode (mask << INTVAL (XEXP (XEXP (x, 0), 1)), @@ -8808,15 +8808,14 @@ if_then_else_cond (rtx x, rtx *ptrue, rtx *pfalse) false values when testing X. */ else if (x == constm1_rtx || x == const0_rtx || (mode != VOIDmode - && num_sign_bit_copies (x, mode) == GET_MODE_BITSIZE (mode))) + && num_sign_bit_copies (x, mode) == GET_MODE_PRECISION (mode))) { *ptrue = constm1_rtx, *pfalse = const0_rtx; return x; } /* Likewise for 0 or a single bit. */ - else if (SCALAR_INT_MODE_P (mode) - && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT + else if (HWI_COMPUTABLE_MODE_P (mode) && exact_log2 (nz = nonzero_bits (x, mode)) >= 0) { *ptrue = gen_int_mode (nz, mode), *pfalse = const0_rtx; @@ -9141,8 +9140,8 @@ make_field_assignment (rtx x) return x; pos = get_pos_from_mask ((~c1) & GET_MODE_MASK (GET_MODE (dest)), &len); - if (pos < 0 || pos + len > GET_MODE_BITSIZE (GET_MODE (dest)) - || GET_MODE_BITSIZE (GET_MODE (dest)) > HOST_BITS_PER_WIDE_INT + if (pos < 0 || pos + len > GET_MODE_PRECISION (GET_MODE (dest)) + || GET_MODE_PRECISION (GET_MODE (dest)) > HOST_BITS_PER_WIDE_INT || (c1 & nonzero_bits (other, GET_MODE (dest))) != 0) return x; @@ -9163,7 +9162,7 @@ make_field_assignment (rtx x) other, pos), dest); src = force_to_mode (src, mode, - GET_MODE_BITSIZE (mode) >= HOST_BITS_PER_WIDE_INT + GET_MODE_PRECISION (mode) >= HOST_BITS_PER_WIDE_INT ? ~(unsigned HOST_WIDE_INT) 0 : ((unsigned HOST_WIDE_INT) 1 << len) - 1, 0); @@ -9378,8 +9377,8 @@ distribute_and_simplify_rtx (rtx x, int n) tmp = apply_distributive_law (simplify_gen_binary (inner_code, mode, new_op0, new_op1)); if (GET_CODE (tmp) != outer_code - && rtx_cost (tmp, SET, optimize_this_for_speed_p) - < rtx_cost (x, SET, optimize_this_for_speed_p)) + && (set_src_cost (tmp, optimize_this_for_speed_p) + < set_src_cost (x, optimize_this_for_speed_p))) return tmp; return NULL_RTX; @@ -9585,7 +9584,7 @@ reg_nonzero_bits_for_combine (const_rtx x, enum machine_mode mode, { unsigned HOST_WIDE_INT mask = rsp->nonzero_bits; - if (GET_MODE_BITSIZE (GET_MODE (x)) < GET_MODE_BITSIZE (mode)) + if (GET_MODE_PRECISION (GET_MODE (x)) < GET_MODE_PRECISION (mode)) /* We don't know anything about the upper bits. */ mask |= GET_MODE_MASK (mode) ^ GET_MODE_MASK (GET_MODE (x)); *nonzero &= mask; @@ -9631,7 +9630,7 @@ reg_num_sign_bit_copies_for_combine (const_rtx x, enum machine_mode mode, return tem; if (nonzero_sign_valid && rsp->sign_bit_copies != 0 - && GET_MODE_BITSIZE (GET_MODE (x)) == GET_MODE_BITSIZE (mode)) + && GET_MODE_PRECISION (GET_MODE (x)) == GET_MODE_PRECISION (mode)) *result = rsp->sign_bit_copies; return NULL; @@ -9655,8 +9654,8 @@ extended_count (const_rtx x, enum machine_mode mode, int unsignedp) return 0; return (unsignedp - ? (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT - ? (unsigned int) (GET_MODE_BITSIZE (mode) - 1 + ? (HWI_COMPUTABLE_MODE_P (mode) + ? (unsigned int) (GET_MODE_PRECISION (mode) - 1 - floor_log2 (nonzero_bits (x, mode))) : 0) : num_sign_bit_copies (x, mode) - 1); @@ -9807,7 +9806,7 @@ try_widen_shift_mode (enum rtx_code code, rtx op, int count, { if (orig_mode == mode) return mode; - gcc_assert (GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (orig_mode)); + gcc_assert (GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (orig_mode)); /* In general we can't perform in wider mode for right shift and rotate. */ switch (code) @@ -9816,14 +9815,14 @@ try_widen_shift_mode (enum rtx_code code, rtx op, int count, /* We can still widen if the bits brought in from the left are identical to the sign bit of ORIG_MODE. */ if (num_sign_bit_copies (op, mode) - > (unsigned) (GET_MODE_BITSIZE (mode) - - GET_MODE_BITSIZE (orig_mode))) + > (unsigned) (GET_MODE_PRECISION (mode) + - GET_MODE_PRECISION (orig_mode))) return mode; return orig_mode; case LSHIFTRT: /* Similarly here but with zero bits. */ - if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT + if (HWI_COMPUTABLE_MODE_P (mode) && (nonzero_bits (op, mode) & ~GET_MODE_MASK (orig_mode)) == 0) return mode; @@ -9834,7 +9833,7 @@ try_widen_shift_mode (enum rtx_code code, rtx op, int count, int care_bits = low_bitmask_len (orig_mode, outer_const); if (care_bits >= 0 - && GET_MODE_BITSIZE (orig_mode) - care_bits >= count) + && GET_MODE_PRECISION (orig_mode) - care_bits >= count) return mode; } /* fall through */ @@ -9850,9 +9849,9 @@ try_widen_shift_mode (enum rtx_code code, rtx op, int count, } } -/* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift. - The result of the shift is RESULT_MODE. Return NULL_RTX if we cannot - simplify it. Otherwise, return a simplified value. +/* Simplify a shift of VAROP by ORIG_COUNT bits. CODE says what kind + of shift. The result of the shift is RESULT_MODE. Return NULL_RTX + if we cannot simplify it. Otherwise, return a simplified value. The shift is normally computed in the widest mode we find in VAROP, as long as it isn't a different number of words than RESULT_MODE. Exceptions @@ -9879,12 +9878,12 @@ simplify_shift_const_1 (enum rtx_code code, enum machine_mode result_mode, want to do this inside the loop as it makes it more difficult to combine shifts. */ if (SHIFT_COUNT_TRUNCATED) - orig_count &= targetm.shift_truncation_mask (mode); + orig_count &= GET_MODE_BITSIZE (mode) - 1; /* If we were given an invalid count, don't do anything except exactly what was requested. */ - if (orig_count < 0 || orig_count >= (int) GET_MODE_BITSIZE (mode)) + if (orig_count < 0 || orig_count >= (int) GET_MODE_PRECISION (mode)) return NULL_RTX; count = orig_count; @@ -9901,7 +9900,7 @@ simplify_shift_const_1 (enum rtx_code code, enum machine_mode result_mode, /* Convert ROTATERT to ROTATE. */ if (code == ROTATERT) { - unsigned int bitsize = GET_MODE_BITSIZE (result_mode);; + unsigned int bitsize = GET_MODE_PRECISION (result_mode); code = ROTATE; if (VECTOR_MODE_P (result_mode)) count = bitsize / GET_MODE_NUNITS (result_mode) - count; @@ -9922,12 +9921,12 @@ simplify_shift_const_1 (enum rtx_code code, enum machine_mode result_mode, multiple operations, each of which are defined, we know what the result is supposed to be. */ - if (count > (GET_MODE_BITSIZE (shift_mode) - 1)) + if (count > (GET_MODE_PRECISION (shift_mode) - 1)) { if (code == ASHIFTRT) - count = GET_MODE_BITSIZE (shift_mode) - 1; + count = GET_MODE_PRECISION (shift_mode) - 1; else if (code == ROTATE || code == ROTATERT) - count %= GET_MODE_BITSIZE (shift_mode); + count %= GET_MODE_PRECISION (shift_mode); else { /* We can't simply return zero because there may be an @@ -9947,7 +9946,7 @@ simplify_shift_const_1 (enum rtx_code code, enum machine_mode result_mode, is a no-op. */ if (code == ASHIFTRT && (num_sign_bit_copies (varop, shift_mode) - == GET_MODE_BITSIZE (shift_mode))) + == GET_MODE_PRECISION (shift_mode))) { count = 0; break; @@ -9960,8 +9959,8 @@ simplify_shift_const_1 (enum rtx_code code, enum machine_mode result_mode, if (code == ASHIFTRT && (count + num_sign_bit_copies (varop, shift_mode) - >= GET_MODE_BITSIZE (shift_mode))) - count = GET_MODE_BITSIZE (shift_mode) - 1; + >= GET_MODE_PRECISION (shift_mode))) + count = GET_MODE_PRECISION (shift_mode) - 1; /* We simplify the tests below and elsewhere by converting ASHIFTRT to LSHIFTRT if we know the sign bit is clear. @@ -9973,10 +9972,10 @@ simplify_shift_const_1 (enum rtx_code code, enum machine_mode result_mode, code = LSHIFTRT; if (((code == LSHIFTRT - && GET_MODE_BITSIZE (shift_mode) <= HOST_BITS_PER_WIDE_INT + && HWI_COMPUTABLE_MODE_P (shift_mode) && !(nonzero_bits (varop, shift_mode) >> count)) || (code == ASHIFT - && GET_MODE_BITSIZE (shift_mode) <= HOST_BITS_PER_WIDE_INT + && HWI_COMPUTABLE_MODE_P (shift_mode) && !((nonzero_bits (varop, shift_mode) << count) & GET_MODE_MASK (shift_mode)))) && !side_effects_p (varop)) @@ -10091,9 +10090,9 @@ simplify_shift_const_1 (enum rtx_code code, enum machine_mode result_mode, AND of a new shift with a mask. We compute the result below. */ if (CONST_INT_P (XEXP (varop, 1)) && INTVAL (XEXP (varop, 1)) >= 0 - && INTVAL (XEXP (varop, 1)) < GET_MODE_BITSIZE (GET_MODE (varop)) - && GET_MODE_BITSIZE (result_mode) <= HOST_BITS_PER_WIDE_INT - && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT + && INTVAL (XEXP (varop, 1)) < GET_MODE_PRECISION (GET_MODE (varop)) + && HWI_COMPUTABLE_MODE_P (result_mode) + && HWI_COMPUTABLE_MODE_P (mode) && !VECTOR_MODE_P (result_mode)) { enum rtx_code first_code = GET_CODE (varop); @@ -10106,11 +10105,11 @@ simplify_shift_const_1 (enum rtx_code code, enum machine_mode result_mode, we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2) with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2), we can convert it to - (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0 C2) C3) C1). + (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0) C3) C2) C1). This simplifies certain SIGN_EXTEND operations. */ if (code == ASHIFT && first_code == ASHIFTRT - && count == (GET_MODE_BITSIZE (result_mode) - - GET_MODE_BITSIZE (GET_MODE (varop)))) + && count == (GET_MODE_PRECISION (result_mode) + - GET_MODE_PRECISION (GET_MODE (varop)))) { /* C3 has the low-order C1 bits zero. */ @@ -10178,7 +10177,7 @@ simplify_shift_const_1 (enum rtx_code code, enum machine_mode result_mode, if (code == ASHIFTRT || (code == ROTATE && first_code == ASHIFTRT) - || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT + || GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT || (GET_MODE (varop) != result_mode && (first_code == ASHIFTRT || first_code == LSHIFTRT || first_code == ROTATE @@ -10266,7 +10265,7 @@ simplify_shift_const_1 (enum rtx_code code, enum machine_mode result_mode, && XEXP (XEXP (varop, 0), 1) == constm1_rtx && (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1) && (code == LSHIFTRT || code == ASHIFTRT) - && count == (GET_MODE_BITSIZE (GET_MODE (varop)) - 1) + && count == (GET_MODE_PRECISION (GET_MODE (varop)) - 1) && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1))) { count = 0; @@ -10328,13 +10327,13 @@ simplify_shift_const_1 (enum rtx_code code, enum machine_mode result_mode, case EQ: /* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE says that the sign bit can be tested, FOO has mode MODE, C is - GET_MODE_BITSIZE (MODE) - 1, and FOO has only its low-order bit + GET_MODE_PRECISION (MODE) - 1, and FOO has only its low-order bit that may be nonzero. */ if (code == LSHIFTRT && XEXP (varop, 1) == const0_rtx && GET_MODE (XEXP (varop, 0)) == result_mode - && count == (GET_MODE_BITSIZE (result_mode) - 1) - && GET_MODE_BITSIZE (result_mode) <= HOST_BITS_PER_WIDE_INT + && count == (GET_MODE_PRECISION (result_mode) - 1) + && HWI_COMPUTABLE_MODE_P (result_mode) && STORE_FLAG_VALUE == -1 && nonzero_bits (XEXP (varop, 0), result_mode) == 1 && merge_outer_ops (&outer_op, &outer_const, XOR, 1, result_mode, @@ -10350,7 +10349,7 @@ simplify_shift_const_1 (enum rtx_code code, enum machine_mode result_mode, /* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less than the number of bits in the mode is equivalent to A. */ if (code == LSHIFTRT - && count == (GET_MODE_BITSIZE (result_mode) - 1) + && count == (GET_MODE_PRECISION (result_mode) - 1) && nonzero_bits (XEXP (varop, 0), result_mode) == 1) { varop = XEXP (varop, 0); @@ -10374,7 +10373,7 @@ simplify_shift_const_1 (enum rtx_code code, enum machine_mode result_mode, is one less than the number of bits in the mode is equivalent to (xor A 1). */ if (code == LSHIFTRT - && count == (GET_MODE_BITSIZE (result_mode) - 1) + && count == (GET_MODE_PRECISION (result_mode) - 1) && XEXP (varop, 1) == constm1_rtx && nonzero_bits (XEXP (varop, 0), result_mode) == 1 && merge_outer_ops (&outer_op, &outer_const, XOR, 1, result_mode, @@ -10402,7 +10401,7 @@ simplify_shift_const_1 (enum rtx_code code, enum machine_mode result_mode, } else if ((code == ASHIFTRT || code == LSHIFTRT) && count < HOST_BITS_PER_WIDE_INT - && GET_MODE_BITSIZE (result_mode) <= HOST_BITS_PER_WIDE_INT + && HWI_COMPUTABLE_MODE_P (result_mode) && 0 == (nonzero_bits (XEXP (varop, 0), result_mode) >> count) && 0 == (nonzero_bits (XEXP (varop, 0), result_mode) @@ -10458,7 +10457,7 @@ simplify_shift_const_1 (enum rtx_code code, enum machine_mode result_mode, if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1) && GET_CODE (XEXP (varop, 0)) == ASHIFTRT - && count == (GET_MODE_BITSIZE (GET_MODE (varop)) - 1) + && count == (GET_MODE_PRECISION (GET_MODE (varop)) - 1) && (code == LSHIFTRT || code == ASHIFTRT) && CONST_INT_P (XEXP (XEXP (varop, 0), 1)) && INTVAL (XEXP (XEXP (varop, 0), 1)) == count @@ -10482,8 +10481,8 @@ simplify_shift_const_1 (enum rtx_code code, enum machine_mode result_mode, && GET_CODE (XEXP (varop, 0)) == LSHIFTRT && CONST_INT_P (XEXP (XEXP (varop, 0), 1)) && (INTVAL (XEXP (XEXP (varop, 0), 1)) - >= (GET_MODE_BITSIZE (GET_MODE (XEXP (varop, 0))) - - GET_MODE_BITSIZE (GET_MODE (varop))))) + >= (GET_MODE_PRECISION (GET_MODE (XEXP (varop, 0))) + - GET_MODE_PRECISION (GET_MODE (varop))))) { rtx varop_inner = XEXP (varop, 0); @@ -10555,7 +10554,7 @@ simplify_shift_const_1 (enum rtx_code code, enum machine_mode result_mode, if (outer_op != UNKNOWN) { if (GET_RTX_CLASS (outer_op) != RTX_UNARY - && GET_MODE_BITSIZE (result_mode) < HOST_BITS_PER_WIDE_INT) + && GET_MODE_PRECISION (result_mode) < HOST_BITS_PER_WIDE_INT) outer_const = trunc_int_for_mode (outer_const, result_mode); if (outer_op == AND) @@ -10857,7 +10856,7 @@ static enum rtx_code simplify_compare_const (enum rtx_code code, rtx op0, rtx *pop1) { enum machine_mode mode = GET_MODE (op0); - unsigned int mode_width = GET_MODE_BITSIZE (mode); + unsigned int mode_width = GET_MODE_PRECISION (mode); HOST_WIDE_INT const_op = INTVAL (*pop1); /* Get the constant we are comparing against and turn off all bits @@ -11070,8 +11069,8 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1) && XEXP (op0, 1) == XEXP (XEXP (op1, 0), 1) && (INTVAL (XEXP (op0, 1)) - == (GET_MODE_BITSIZE (GET_MODE (op0)) - - (GET_MODE_BITSIZE + == (GET_MODE_PRECISION (GET_MODE (op0)) + - (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0)))))))) { op0 = SUBREG_REG (XEXP (XEXP (op0, 0), 0)); @@ -11084,7 +11083,7 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) this shift are known to be zero for both inputs and if the type of comparison is compatible with the shift. */ if (GET_CODE (op0) == GET_CODE (op1) - && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT + && HWI_COMPUTABLE_MODE_P (GET_MODE(op0)) && ((GET_CODE (op0) == ROTATE && (code == NE || code == EQ)) || ((GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFT) && (code != GT && code != LT && code != GE && code != LE)) @@ -11139,7 +11138,7 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) && GET_CODE (inner_op1) == SUBREG && (GET_MODE (SUBREG_REG (inner_op0)) == GET_MODE (SUBREG_REG (inner_op1))) - && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (inner_op0))) + && (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (inner_op0))) <= HOST_BITS_PER_WIDE_INT) && (0 == ((~c0) & nonzero_bits (SUBREG_REG (inner_op0), GET_MODE (SUBREG_REG (inner_op0))))) @@ -11202,7 +11201,7 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) while (CONST_INT_P (op1)) { enum machine_mode mode = GET_MODE (op0); - unsigned int mode_width = GET_MODE_BITSIZE (mode); + unsigned int mode_width = GET_MODE_PRECISION (mode); unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode); int equality_comparison_p; int sign_bit_comparison_p; @@ -11233,11 +11232,10 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) /* If this is a sign bit comparison and we can do arithmetic in MODE, say that we will only be needing the sign bit of OP0. */ - if (sign_bit_comparison_p - && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT) + if (sign_bit_comparison_p && HWI_COMPUTABLE_MODE_P (mode)) op0 = force_to_mode (op0, mode, (unsigned HOST_WIDE_INT) 1 - << (GET_MODE_BITSIZE (mode) - 1), + << (GET_MODE_PRECISION (mode) - 1), 0); /* Now try cases based on the opcode of OP0. If none of the cases @@ -11268,7 +11266,7 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) else { mode = new_mode; - i = (GET_MODE_BITSIZE (mode) - 1 - i); + i = (GET_MODE_PRECISION (mode) - 1 - i); } } @@ -11432,7 +11430,7 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) if (mode_width <= HOST_BITS_PER_WIDE_INT && subreg_lowpart_p (op0) - && GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0))) > mode_width + && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0))) > mode_width && GET_CODE (SUBREG_REG (op0)) == PLUS && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))) { @@ -11452,14 +11450,14 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) /* (A - C1) sign-extends if it is positive and 1-extends if it is negative, C2 both sign- and 1-extends. */ || (num_sign_bit_copies (a, inner_mode) - > (unsigned int) (GET_MODE_BITSIZE (inner_mode) + > (unsigned int) (GET_MODE_PRECISION (inner_mode) - mode_width) && const_op < 0))) || ((unsigned HOST_WIDE_INT) c1 < (unsigned HOST_WIDE_INT) 1 << (mode_width - 2) /* (A - C1) always sign-extends, like C2. */ && num_sign_bit_copies (a, inner_mode) - > (unsigned int) (GET_MODE_BITSIZE (inner_mode) + > (unsigned int) (GET_MODE_PRECISION (inner_mode) - (mode_width - 1)))) { op0 = SUBREG_REG (op0); @@ -11470,7 +11468,7 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) /* If the inner mode is narrower and we are extracting the low part, we can treat the SUBREG as if it were a ZERO_EXTEND. */ if (subreg_lowpart_p (op0) - && GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0))) < mode_width) + && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0))) < mode_width) /* Fall through */ ; else break; @@ -11481,7 +11479,7 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) mode = GET_MODE (XEXP (op0, 0)); if (mode != VOIDmode && GET_MODE_CLASS (mode) == MODE_INT && (unsigned_comparison_p || equality_comparison_p) - && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT) + && HWI_COMPUTABLE_MODE_P (mode) && ((unsigned HOST_WIDE_INT) const_op < GET_MODE_MASK (mode)) && have_insn_for (COMPARE, mode)) { @@ -11719,14 +11717,14 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) the code has been changed. */ && (0 #ifdef WORD_REGISTER_OPERATIONS - || (mode_width > GET_MODE_BITSIZE (tmode) + || (mode_width > GET_MODE_PRECISION (tmode) && mode_width <= BITS_PER_WORD) #endif - || (mode_width <= GET_MODE_BITSIZE (tmode) + || (mode_width <= GET_MODE_PRECISION (tmode) && subreg_lowpart_p (XEXP (op0, 0)))) && CONST_INT_P (XEXP (op0, 1)) && mode_width <= HOST_BITS_PER_WIDE_INT - && GET_MODE_BITSIZE (tmode) <= HOST_BITS_PER_WIDE_INT + && HWI_COMPUTABLE_MODE_P (tmode) && ((c1 = INTVAL (XEXP (op0, 1))) & ~mask) == 0 && (c1 & ~GET_MODE_MASK (tmode)) == 0 && c1 != mask @@ -11765,7 +11763,7 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) || (GET_CODE (shift_op) == XOR && CONST_INT_P (XEXP (shift_op, 1)) && CONST_INT_P (shift_count) - && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT + && HWI_COMPUTABLE_MODE_P (mode) && (UINTVAL (XEXP (shift_op, 1)) == (unsigned HOST_WIDE_INT) 1 << INTVAL (shift_count)))) @@ -11989,7 +11987,7 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) op1 = gen_lowpart (GET_MODE (op0), op1); } } - else if ((GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0))) + else if ((GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0))) <= HOST_BITS_PER_WIDE_INT) && (nonzero_bits (SUBREG_REG (op0), GET_MODE (SUBREG_REG (op0))) @@ -12014,8 +12012,7 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) && GET_MODE_SIZE (mode) < UNITS_PER_WORD && ! have_insn_for (COMPARE, mode)) for (tmode = GET_MODE_WIDER_MODE (mode); - (tmode != VOIDmode - && GET_MODE_BITSIZE (tmode) <= HOST_BITS_PER_WIDE_INT); + (tmode != VOIDmode && HWI_COMPUTABLE_MODE_P (tmode)); tmode = GET_MODE_WIDER_MODE (tmode)) if (have_insn_for (COMPARE, tmode)) { @@ -12026,7 +12023,7 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) a paradoxical subreg to extend OP0. */ if (op1 == const0_rtx && (code == LT || code == GE) - && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT) + && HWI_COMPUTABLE_MODE_P (mode)) { op0 = simplify_gen_binary (AND, tmode, gen_lowpart (tmode, op0), @@ -12052,11 +12049,11 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) if (zero_extended || ((num_sign_bit_copies (op0, tmode) - > (unsigned int) (GET_MODE_BITSIZE (tmode) - - GET_MODE_BITSIZE (mode))) + > (unsigned int) (GET_MODE_PRECISION (tmode) + - GET_MODE_PRECISION (mode))) && (num_sign_bit_copies (op1, tmode) - > (unsigned int) (GET_MODE_BITSIZE (tmode) - - GET_MODE_BITSIZE (mode))))) + > (unsigned int) (GET_MODE_PRECISION (tmode) + - GET_MODE_PRECISION (mode))))) { /* If OP0 is an AND and we don't have an AND in MODE either, make a new AND in the proper mode. */ @@ -12318,7 +12315,7 @@ record_value_for_reg (rtx reg, rtx insn, rtx value) subst_low_luid = DF_INSN_LUID (insn); rsp->last_set_mode = mode; if (GET_MODE_CLASS (mode) == MODE_INT - && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT) + && HWI_COMPUTABLE_MODE_P (mode)) mode = nonzero_bits_mode; rsp->last_set_nonzero_bits = nonzero_bits (value, mode); rsp->last_set_sign_bit_copies @@ -12355,7 +12352,7 @@ record_dead_and_set_regs_1 (rtx dest, const_rtx setter, void *data) else if (GET_CODE (setter) == SET && GET_CODE (SET_DEST (setter)) == SUBREG && SUBREG_REG (SET_DEST (setter)) == dest - && GET_MODE_BITSIZE (GET_MODE (dest)) <= BITS_PER_WORD + && GET_MODE_PRECISION (GET_MODE (dest)) <= BITS_PER_WORD && subreg_lowpart_p (SET_DEST (setter))) record_value_for_reg (dest, record_dead_insn, gen_lowpart (GET_MODE (dest), @@ -12452,7 +12449,7 @@ record_promoted_value (rtx insn, rtx subreg) unsigned int regno = REGNO (SUBREG_REG (subreg)); enum machine_mode mode = GET_MODE (subreg); - if (GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT) + if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT) return; for (links = LOG_LINKS (insn); links;) @@ -13276,8 +13273,20 @@ distribute_notes (rtx notes, rtx from_insn, rtx i3, rtx i2, rtx elim_i2, } break; + case REG_ARGS_SIZE: + /* ??? How to distribute between i3-i1. Assume i3 contains the + entire adjustment. Assert i3 contains at least some adjust. */ + if (!noop_move_p (i3)) + { + int old_size, args_size = INTVAL (XEXP (note, 0)); + old_size = fixup_args_size_notes (PREV_INSN (i3), i3, args_size); + gcc_assert (old_size != args_size); + } + break; + case REG_NORETURN: case REG_SETJMP: + case REG_TM: /* These notes must remain with the call. It should not be possible for both I2 and I3 to be a call. */ if (CALL_P (i3))