+/* Return true if BF is a bit-field that we can handle like a scalar. */
+
+static bool
+scalar_bitfield_p (tree bf)
+{
+ return (TREE_CODE (bf) == BIT_FIELD_REF
+ && (is_gimple_reg (TREE_OPERAND (bf, 0))
+ || (TYPE_MODE (TREE_TYPE (TREE_OPERAND (bf, 0))) != BLKmode
+ && (!TREE_SIDE_EFFECTS (TREE_OPERAND (bf, 0))
+ || (GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE
+ (TREE_OPERAND (bf, 0))))
+ <= BITS_PER_WORD)))));
+}
+
+/* Create an assignment statement from SRC to DST. */
+
+static gimple_seq
+sra_build_assignment (tree dst, tree src)
+{
+ gimple stmt;
+ gimple_seq seq = NULL;
+ /* Turning BIT_FIELD_REFs into bit operations enables other passes
+ to do a much better job at optimizing the code.
+ From dst = BIT_FIELD_REF <var, sz, off> we produce
+
+ SR.1 = (scalar type) var;
+ SR.2 = SR.1 >> off;
+ SR.3 = SR.2 & ((1 << sz) - 1);
+ ... possible sign extension of SR.3 ...
+ dst = (destination type) SR.3;
+ */
+ if (scalar_bitfield_p (src))
+ {
+ tree var, shift, width;
+ tree utype, stype, stmp, utmp, dtmp;
+ bool unsignedp = (INTEGRAL_TYPE_P (TREE_TYPE (src))
+ ? TYPE_UNSIGNED (TREE_TYPE (src)) : true);
+
+ var = TREE_OPERAND (src, 0);
+ width = TREE_OPERAND (src, 1);
+ /* The offset needs to be adjusted to a right shift quantity
+ depending on the endianness. */
+ if (BYTES_BIG_ENDIAN)
+ {
+ tree tmp = size_binop (PLUS_EXPR, width, TREE_OPERAND (src, 2));
+ shift = size_binop (MINUS_EXPR, TYPE_SIZE (TREE_TYPE (var)), tmp);
+ }
+ else
+ shift = TREE_OPERAND (src, 2);
+
+ /* In weird cases we have non-integral types for the source or
+ destination object.
+ ??? For unknown reasons we also want an unsigned scalar type. */
+ stype = TREE_TYPE (var);
+ if (!INTEGRAL_TYPE_P (stype))
+ stype = lang_hooks.types.type_for_size (TREE_INT_CST_LOW
+ (TYPE_SIZE (stype)), 1);
+ else if (!TYPE_UNSIGNED (stype))
+ stype = unsigned_type_for (stype);
+
+ utype = TREE_TYPE (dst);
+ if (!INTEGRAL_TYPE_P (utype))
+ utype = lang_hooks.types.type_for_size (TREE_INT_CST_LOW
+ (TYPE_SIZE (utype)), 1);
+ else if (!TYPE_UNSIGNED (utype))
+ utype = unsigned_type_for (utype);
+
+ stmp = make_rename_temp (stype, "SR");
+
+ /* Convert the base var of the BIT_FIELD_REF to the scalar type
+ we use for computation if we cannot use it directly. */
+ if (!useless_type_conversion_p (stype, TREE_TYPE (var)))
+ {
+ if (INTEGRAL_TYPE_P (TREE_TYPE (var)))
+ stmt = gimple_build_assign (stmp, fold_convert (stype, var));
+ else
+ stmt = gimple_build_assign (stmp, fold_build1 (VIEW_CONVERT_EXPR,
+ stype, var));
+ gimple_seq_add_stmt (&seq, stmt);
+ var = stmp;
+ }
+
+ if (!integer_zerop (shift))
+ {
+ stmt = gimple_build_assign (stmp, fold_build2 (RSHIFT_EXPR, stype,
+ var, shift));
+ gimple_seq_add_stmt (&seq, stmt);
+ var = stmp;
+ }
+
+ /* If we need a masking operation, produce one. */
+ if (TREE_INT_CST_LOW (width) == TYPE_PRECISION (stype))
+ unsignedp = true;
+ else
+ {
+ tree one = build_int_cst_wide (stype, 1, 0);
+ tree mask = int_const_binop (LSHIFT_EXPR, one, width, 0);
+ mask = int_const_binop (MINUS_EXPR, mask, one, 0);
+
+ stmt = gimple_build_assign (stmp, fold_build2 (BIT_AND_EXPR, stype,
+ var, mask));
+ gimple_seq_add_stmt (&seq, stmt);
+ var = stmp;
+ }
+
+ /* After shifting and masking, convert to the target type. */
+ utmp = stmp;
+ if (!useless_type_conversion_p (utype, stype))
+ {
+ utmp = make_rename_temp (utype, "SR");
+
+ stmt = gimple_build_assign (utmp, fold_convert (utype, var));
+ gimple_seq_add_stmt (&seq, stmt);
+
+ var = utmp;
+ }
+
+ /* Perform sign extension, if required.
+ ??? This should never be necessary. */
+ if (!unsignedp)
+ {
+ tree signbit = int_const_binop (LSHIFT_EXPR,
+ build_int_cst_wide (utype, 1, 0),
+ size_binop (MINUS_EXPR, width,
+ bitsize_int (1)), 0);
+
+ stmt = gimple_build_assign (utmp, fold_build2 (BIT_XOR_EXPR, utype,
+ var, signbit));
+ gimple_seq_add_stmt (&seq, stmt);
+
+ stmt = gimple_build_assign (utmp, fold_build2 (MINUS_EXPR, utype,
+ utmp, signbit));
+ gimple_seq_add_stmt (&seq, stmt);
+
+ var = utmp;
+ }
+
+ /* fold_build3 (BIT_FIELD_REF, ...) sometimes returns a cast. */
+ STRIP_NOPS (dst);
+
+ /* Finally, move and convert to the destination. */
+ if (!useless_type_conversion_p (TREE_TYPE (dst), TREE_TYPE (var)))
+ {
+ if (INTEGRAL_TYPE_P (TREE_TYPE (dst)))
+ var = fold_convert (TREE_TYPE (dst), var);
+ else
+ var = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (dst), var);
+
+ /* If the destination is not a register the conversion needs
+ to be a separate statement. */
+ if (!is_gimple_reg (dst))
+ {
+ dtmp = make_rename_temp (TREE_TYPE (dst), "SR");
+ stmt = gimple_build_assign (dtmp, var);
+ gimple_seq_add_stmt (&seq, stmt);
+ var = dtmp;
+ }
+ }
+ stmt = gimple_build_assign (dst, var);
+ gimple_seq_add_stmt (&seq, stmt);
+
+ return seq;
+ }
+
+ /* fold_build3 (BIT_FIELD_REF, ...) sometimes returns a cast. */
+ if (CONVERT_EXPR_P (dst))
+ {
+ STRIP_NOPS (dst);
+ src = fold_convert (TREE_TYPE (dst), src);
+ }
+ /* It was hoped that we could perform some type sanity checking
+ here, but since front-ends can emit accesses of fields in types
+ different from their nominal types and copy structures containing
+ them as a whole, we'd have to handle such differences here.
+ Since such accesses under different types require compatibility
+ anyway, there's little point in making tests and/or adding
+ conversions to ensure the types of src and dst are the same.
+ So we just assume type differences at this point are ok.
+ The only exception we make here are pointer types, which can be different
+ in e.g. structurally equal, but non-identical RECORD_TYPEs. */
+ else if (POINTER_TYPE_P (TREE_TYPE (dst))
+ && !useless_type_conversion_p (TREE_TYPE (dst), TREE_TYPE (src)))
+ src = fold_convert (TREE_TYPE (dst), src);
+
+ stmt = gimple_build_assign (dst, src);
+ gimple_seq_add_stmt (&seq, stmt);
+ return seq;
+}
+
+/* BIT_FIELD_REFs must not be shared. sra_build_elt_assignment()
+ takes care of assignments, but we must create copies for uses. */
+#define REPLDUP(t) (TREE_CODE (t) != BIT_FIELD_REF ? (t) : unshare_expr (t))
+
+/* Emit an assignment from SRC to DST, but if DST is a scalarizable
+ BIT_FIELD_REF, turn it into bit operations. */
+
+static gimple_seq
+sra_build_bf_assignment (tree dst, tree src)
+{
+ tree var, type, utype, tmp, tmp2, tmp3;
+ gimple_seq seq;
+ gimple stmt;
+ tree cst, cst2, mask;
+ tree minshift, maxshift;
+
+ if (TREE_CODE (dst) != BIT_FIELD_REF)
+ return sra_build_assignment (dst, src);
+
+ var = TREE_OPERAND (dst, 0);
+
+ if (!scalar_bitfield_p (dst))
+ return sra_build_assignment (REPLDUP (dst), src);
+
+ seq = NULL;
+
+ cst = fold_convert (bitsizetype, TREE_OPERAND (dst, 2));
+ cst2 = size_binop (PLUS_EXPR,
+ fold_convert (bitsizetype, TREE_OPERAND (dst, 1)),
+ cst);
+
+ if (BYTES_BIG_ENDIAN)
+ {
+ maxshift = size_binop (MINUS_EXPR, TYPE_SIZE (TREE_TYPE (var)), cst);
+ minshift = size_binop (MINUS_EXPR, TYPE_SIZE (TREE_TYPE (var)), cst2);
+ }
+ else
+ {
+ maxshift = cst2;
+ minshift = cst;
+ }
+
+ type = TREE_TYPE (var);
+ if (!INTEGRAL_TYPE_P (type))
+ type = lang_hooks.types.type_for_size
+ (TREE_INT_CST_LOW (TYPE_SIZE (TREE_TYPE (var))), 1);
+ if (TYPE_UNSIGNED (type))
+ utype = type;
+ else
+ utype = unsigned_type_for (type);
+
+ mask = build_int_cst_wide (utype, 1, 0);
+ if (TREE_INT_CST_LOW (maxshift) == TYPE_PRECISION (utype))
+ cst = build_int_cst_wide (utype, 0, 0);
+ else
+ cst = int_const_binop (LSHIFT_EXPR, mask, maxshift, true);
+ if (integer_zerop (minshift))
+ cst2 = mask;
+ else
+ cst2 = int_const_binop (LSHIFT_EXPR, mask, minshift, true);
+ mask = int_const_binop (MINUS_EXPR, cst, cst2, true);
+ mask = fold_build1 (BIT_NOT_EXPR, utype, mask);
+
+ if (TYPE_MAIN_VARIANT (utype) != TYPE_MAIN_VARIANT (TREE_TYPE (var))
+ && !integer_zerop (mask))
+ {
+ tmp = var;
+ if (!is_gimple_variable (tmp))
+ tmp = unshare_expr (var);
+
+ tmp2 = make_rename_temp (utype, "SR");
+
+ if (INTEGRAL_TYPE_P (TREE_TYPE (var)))
+ stmt = gimple_build_assign (tmp2, fold_convert (utype, tmp));
+ else
+ stmt = gimple_build_assign (tmp2, fold_build1 (VIEW_CONVERT_EXPR,
+ utype, tmp));
+ gimple_seq_add_stmt (&seq, stmt);
+ }
+ else
+ tmp2 = var;
+
+ if (!integer_zerop (mask))
+ {
+ tmp = make_rename_temp (utype, "SR");
+ stmt = gimple_build_assign (tmp, fold_build2 (BIT_AND_EXPR, utype,
+ tmp2, mask));
+ gimple_seq_add_stmt (&seq, stmt);
+ }
+ else
+ tmp = mask;
+
+ if (is_gimple_reg (src) && INTEGRAL_TYPE_P (TREE_TYPE (src)))
+ tmp2 = src;
+ else if (INTEGRAL_TYPE_P (TREE_TYPE (src)))
+ {
+ gimple_seq tmp_seq;
+ tmp2 = make_rename_temp (TREE_TYPE (src), "SR");
+ tmp_seq = sra_build_assignment (tmp2, src);
+ gimple_seq_add_seq (&seq, tmp_seq);
+ }
+ else
+ {
+ gimple_seq tmp_seq;
+ tmp2 = make_rename_temp
+ (lang_hooks.types.type_for_size
+ (TREE_INT_CST_LOW (TYPE_SIZE (TREE_TYPE (src))),
+ 1), "SR");
+ tmp_seq = sra_build_assignment (tmp2, fold_build1 (VIEW_CONVERT_EXPR,
+ TREE_TYPE (tmp2), src));
+ gimple_seq_add_seq (&seq, tmp_seq);
+ }
+
+ if (!TYPE_UNSIGNED (TREE_TYPE (tmp2)))
+ {
+ gimple_seq tmp_seq;
+ tree ut = unsigned_type_for (TREE_TYPE (tmp2));
+ tmp3 = make_rename_temp (ut, "SR");
+ tmp2 = fold_convert (ut, tmp2);
+ tmp_seq = sra_build_assignment (tmp3, tmp2);
+ gimple_seq_add_seq (&seq, tmp_seq);
+
+ tmp2 = fold_build1 (BIT_NOT_EXPR, utype, mask);
+ tmp2 = int_const_binop (RSHIFT_EXPR, tmp2, minshift, true);
+ tmp2 = fold_convert (ut, tmp2);
+ tmp2 = fold_build2 (BIT_AND_EXPR, ut, tmp3, tmp2);
+
+ if (tmp3 != tmp2)
+ {
+ tmp3 = make_rename_temp (ut, "SR");
+ tmp_seq = sra_build_assignment (tmp3, tmp2);
+ gimple_seq_add_seq (&seq, tmp_seq);
+ }
+
+ tmp2 = tmp3;
+ }
+
+ if (TYPE_MAIN_VARIANT (TREE_TYPE (tmp2)) != TYPE_MAIN_VARIANT (utype))
+ {
+ gimple_seq tmp_seq;
+ tmp3 = make_rename_temp (utype, "SR");
+ tmp2 = fold_convert (utype, tmp2);
+ tmp_seq = sra_build_assignment (tmp3, tmp2);
+ gimple_seq_add_seq (&seq, tmp_seq);
+ tmp2 = tmp3;
+ }
+
+ if (!integer_zerop (minshift))
+ {
+ tmp3 = make_rename_temp (utype, "SR");
+ stmt = gimple_build_assign (tmp3, fold_build2 (LSHIFT_EXPR, utype,
+ tmp2, minshift));
+ gimple_seq_add_stmt (&seq, stmt);
+ tmp2 = tmp3;
+ }
+
+ if (utype != TREE_TYPE (var))
+ tmp3 = make_rename_temp (utype, "SR");
+ else
+ tmp3 = var;
+ stmt = gimple_build_assign (tmp3, fold_build2 (BIT_IOR_EXPR, utype,
+ tmp, tmp2));
+ gimple_seq_add_stmt (&seq, stmt);
+
+ if (tmp3 != var)
+ {
+ if (TREE_TYPE (var) == type)
+ stmt = gimple_build_assign (var, fold_convert (type, tmp3));
+ else
+ stmt = gimple_build_assign (var, fold_build1 (VIEW_CONVERT_EXPR,
+ TREE_TYPE (var), tmp3));
+ gimple_seq_add_stmt (&seq, stmt);
+ }
+
+ return seq;
+}
+
+/* Expand an assignment of SRC to the scalarized representation of
+ ELT. If it is a field group, try to widen the assignment to cover
+ the full variable. */
+
+static gimple_seq
+sra_build_elt_assignment (struct sra_elt *elt, tree src)
+{
+ tree dst = elt->replacement;
+ tree var, tmp, cst, cst2;
+ gimple stmt;
+ gimple_seq seq;
+
+ if (TREE_CODE (dst) != BIT_FIELD_REF
+ || !elt->in_bitfld_block)
+ return sra_build_assignment (REPLDUP (dst), src);
+
+ var = TREE_OPERAND (dst, 0);
+
+ /* Try to widen the assignment to the entire variable.
+ We need the source to be a BIT_FIELD_REF as well, such that, for
+ BIT_FIELD_REF<d,sz,dp> = BIT_FIELD_REF<s,sz,sp>,
+ by design, conditions are met such that we can turn it into
+ d = BIT_FIELD_REF<s,dw,sp-dp>. */
+ if (elt->in_bitfld_block == 2
+ && TREE_CODE (src) == BIT_FIELD_REF)
+ {
+ tmp = src;
+ cst = TYPE_SIZE (TREE_TYPE (var));
+ cst2 = size_binop (MINUS_EXPR, TREE_OPERAND (src, 2),
+ TREE_OPERAND (dst, 2));
+
+ src = TREE_OPERAND (src, 0);
+
+ /* Avoid full-width bit-fields. */
+ if (integer_zerop (cst2)
+ && tree_int_cst_equal (cst, TYPE_SIZE (TREE_TYPE (src))))
+ {
+ if (INTEGRAL_TYPE_P (TREE_TYPE (src))
+ && !TYPE_UNSIGNED (TREE_TYPE (src)))
+ src = fold_convert (unsigned_type_for (TREE_TYPE (src)), src);
+
+ /* If a single conversion won't do, we'll need a statement
+ list. */
+ if (TYPE_MAIN_VARIANT (TREE_TYPE (var))
+ != TYPE_MAIN_VARIANT (TREE_TYPE (src)))
+ {
+ gimple_seq tmp_seq;
+ seq = NULL;
+
+ if (!INTEGRAL_TYPE_P (TREE_TYPE (src)))
+ src = fold_build1 (VIEW_CONVERT_EXPR,
+ lang_hooks.types.type_for_size
+ (TREE_INT_CST_LOW
+ (TYPE_SIZE (TREE_TYPE (src))),
+ 1), src);
+ gcc_assert (TYPE_UNSIGNED (TREE_TYPE (src)));
+
+ tmp = make_rename_temp (TREE_TYPE (src), "SR");
+ stmt = gimple_build_assign (tmp, src);
+ gimple_seq_add_stmt (&seq, stmt);
+
+ tmp_seq = sra_build_assignment (var,
+ fold_convert (TREE_TYPE (var),
+ tmp));
+ gimple_seq_add_seq (&seq, tmp_seq);
+
+ return seq;
+ }
+
+ src = fold_convert (TREE_TYPE (var), src);
+ }
+ else
+ {
+ src = fold_convert (TREE_TYPE (var), tmp);
+ }
+
+ return sra_build_assignment (var, src);
+ }
+
+ return sra_build_bf_assignment (dst, src);
+}
+