+ if (modifier != EXPAND_WRITE)
+ {
+ tree t;
+
+ t = fold_read_from_constant_string (exp);
+ if (t)
+ return expand_expr (t, target, tmode, modifier);
+ }
+
+ if (POINTER_TYPE_P (TREE_TYPE (exp1)))
+ {
+ as = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (exp1)));
+ address_mode = targetm.addr_space.address_mode (as);
+ }
+
+ op0 = expand_expr (exp1, NULL_RTX, VOIDmode, EXPAND_SUM);
+ op0 = memory_address_addr_space (mode, op0, as);
+
+ if (code == ALIGN_INDIRECT_REF)
+ {
+ int align = TYPE_ALIGN_UNIT (type);
+ op0 = gen_rtx_AND (address_mode, op0, GEN_INT (-align));
+ op0 = memory_address_addr_space (mode, op0, as);
+ }
+
+ temp = gen_rtx_MEM (mode, op0);
+
+ set_mem_attributes (temp, exp, 0);
+ set_mem_addr_space (temp, as);
+
+ /* Resolve the misalignment now, so that we don't have to remember
+ to resolve it later. Of course, this only works for reads. */
+ if (code == MISALIGNED_INDIRECT_REF)
+ {
+ int icode;
+ rtx reg, insn;
+
+ gcc_assert (modifier == EXPAND_NORMAL
+ || modifier == EXPAND_STACK_PARM);
+
+ /* The vectorizer should have already checked the mode. */
+ icode = optab_handler (movmisalign_optab, mode)->insn_code;
+ gcc_assert (icode != CODE_FOR_nothing);
+
+ /* We've already validated the memory, and we're creating a
+ new pseudo destination. The predicates really can't fail. */
+ reg = gen_reg_rtx (mode);
+
+ /* Nor can the insn generator. */
+ insn = GEN_FCN (icode) (reg, temp);
+ emit_insn (insn);
+
+ return reg;
+ }
+
+ return temp;
+ }
+
+ case TARGET_MEM_REF:
+ {
+ addr_space_t as = TYPE_ADDR_SPACE (TREE_TYPE (exp));
+ struct mem_address addr;
+
+ get_address_description (exp, &addr);
+ op0 = addr_for_mem_ref (&addr, as, true);
+ op0 = memory_address_addr_space (mode, op0, as);
+ temp = gen_rtx_MEM (mode, op0);
+ set_mem_attributes (temp, TMR_ORIGINAL (exp), 0);
+ set_mem_addr_space (temp, as);
+ }
+ return temp;
+
+ case ARRAY_REF:
+
+ {
+ tree array = treeop0;
+ tree index = treeop1;
+
+ /* Fold an expression like: "foo"[2].
+ This is not done in fold so it won't happen inside &.
+ Don't fold if this is for wide characters since it's too
+ difficult to do correctly and this is a very rare case. */
+
+ if (modifier != EXPAND_CONST_ADDRESS
+ && modifier != EXPAND_INITIALIZER
+ && modifier != EXPAND_MEMORY)
+ {
+ tree t = fold_read_from_constant_string (exp);
+
+ if (t)
+ return expand_expr (t, target, tmode, modifier);
+ }
+
+ /* If this is a constant index into a constant array,
+ just get the value from the array. Handle both the cases when
+ we have an explicit constructor and when our operand is a variable
+ that was declared const. */
+
+ if (modifier != EXPAND_CONST_ADDRESS
+ && modifier != EXPAND_INITIALIZER
+ && modifier != EXPAND_MEMORY
+ && TREE_CODE (array) == CONSTRUCTOR
+ && ! TREE_SIDE_EFFECTS (array)
+ && TREE_CODE (index) == INTEGER_CST)
+ {
+ unsigned HOST_WIDE_INT ix;
+ tree field, value;
+
+ FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (array), ix,
+ field, value)
+ if (tree_int_cst_equal (field, index))
+ {
+ if (!TREE_SIDE_EFFECTS (value))
+ return expand_expr (fold (value), target, tmode, modifier);
+ break;
+ }
+ }
+
+ else if (optimize >= 1
+ && modifier != EXPAND_CONST_ADDRESS
+ && modifier != EXPAND_INITIALIZER
+ && modifier != EXPAND_MEMORY
+ && TREE_READONLY (array) && ! TREE_SIDE_EFFECTS (array)
+ && TREE_CODE (array) == VAR_DECL && DECL_INITIAL (array)
+ && TREE_CODE (DECL_INITIAL (array)) != ERROR_MARK
+ && targetm.binds_local_p (array))
+ {
+ if (TREE_CODE (index) == INTEGER_CST)
+ {
+ tree init = DECL_INITIAL (array);
+
+ if (TREE_CODE (init) == CONSTRUCTOR)
+ {
+ unsigned HOST_WIDE_INT ix;
+ tree field, value;
+
+ FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (init), ix,
+ field, value)
+ if (tree_int_cst_equal (field, index))
+ {
+ if (TREE_SIDE_EFFECTS (value))
+ break;
+
+ if (TREE_CODE (value) == CONSTRUCTOR)
+ {
+ /* If VALUE is a CONSTRUCTOR, this
+ optimization is only useful if
+ this doesn't store the CONSTRUCTOR
+ into memory. If it does, it is more
+ efficient to just load the data from
+ the array directly. */
+ rtx ret = expand_constructor (value, target,
+ modifier, true);
+ if (ret == NULL_RTX)
+ break;
+ }
+
+ return expand_expr (fold (value), target, tmode,
+ modifier);
+ }
+ }
+ else if(TREE_CODE (init) == STRING_CST)
+ {
+ tree index1 = index;
+ tree low_bound = array_ref_low_bound (exp);
+ index1 = fold_convert_loc (loc, sizetype,
+ treeop1);
+
+ /* Optimize the special-case of a zero lower bound.
+
+ We convert the low_bound to sizetype to avoid some problems
+ with constant folding. (E.g. suppose the lower bound is 1,
+ and its mode is QI. Without the conversion,l (ARRAY
+ +(INDEX-(unsigned char)1)) becomes ((ARRAY+(-(unsigned char)1))
+ +INDEX), which becomes (ARRAY+255+INDEX). Opps!) */
+
+ if (! integer_zerop (low_bound))
+ index1 = size_diffop_loc (loc, index1,
+ fold_convert_loc (loc, sizetype,
+ low_bound));
+
+ if (0 > compare_tree_int (index1,
+ TREE_STRING_LENGTH (init)))
+ {
+ tree type = TREE_TYPE (TREE_TYPE (init));
+ enum machine_mode mode = TYPE_MODE (type);
+
+ if (GET_MODE_CLASS (mode) == MODE_INT
+ && GET_MODE_SIZE (mode) == 1)
+ return gen_int_mode (TREE_STRING_POINTER (init)
+ [TREE_INT_CST_LOW (index1)],
+ mode);
+ }
+ }
+ }
+ }
+ }
+ goto normal_inner_ref;
+
+ case COMPONENT_REF:
+ /* If the operand is a CONSTRUCTOR, we can just extract the
+ appropriate field if it is present. */
+ if (TREE_CODE (treeop0) == CONSTRUCTOR)
+ {
+ unsigned HOST_WIDE_INT idx;
+ tree field, value;
+
+ FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (treeop0),
+ idx, field, value)
+ if (field == treeop1
+ /* We can normally use the value of the field in the
+ CONSTRUCTOR. However, if this is a bitfield in
+ an integral mode that we can fit in a HOST_WIDE_INT,
+ we must mask only the number of bits in the bitfield,
+ since this is done implicitly by the constructor. If
+ the bitfield does not meet either of those conditions,
+ we can't do this optimization. */
+ && (! DECL_BIT_FIELD (field)
+ || ((GET_MODE_CLASS (DECL_MODE (field)) == MODE_INT)
+ && (GET_MODE_BITSIZE (DECL_MODE (field))
+ <= HOST_BITS_PER_WIDE_INT))))
+ {
+ if (DECL_BIT_FIELD (field)
+ && modifier == EXPAND_STACK_PARM)
+ target = 0;
+ op0 = expand_expr (value, target, tmode, modifier);
+ if (DECL_BIT_FIELD (field))
+ {
+ HOST_WIDE_INT bitsize = TREE_INT_CST_LOW (DECL_SIZE (field));
+ enum machine_mode imode = TYPE_MODE (TREE_TYPE (field));
+
+ if (TYPE_UNSIGNED (TREE_TYPE (field)))
+ {
+ op1 = GEN_INT (((HOST_WIDE_INT) 1 << bitsize) - 1);
+ op0 = expand_and (imode, op0, op1, target);
+ }
+ else
+ {
+ tree count
+ = build_int_cst (NULL_TREE,
+ GET_MODE_BITSIZE (imode) - bitsize);
+
+ op0 = expand_shift (LSHIFT_EXPR, imode, op0, count,
+ target, 0);
+ op0 = expand_shift (RSHIFT_EXPR, imode, op0, count,
+ target, 0);
+ }
+ }
+
+ return op0;
+ }
+ }
+ goto normal_inner_ref;
+
+ case BIT_FIELD_REF:
+ case ARRAY_RANGE_REF:
+ normal_inner_ref:
+ {
+ enum machine_mode mode1, mode2;
+ HOST_WIDE_INT bitsize, bitpos;
+ tree offset;
+ int volatilep = 0, must_force_mem;
+ tree tem = get_inner_reference (exp, &bitsize, &bitpos, &offset,
+ &mode1, &unsignedp, &volatilep, true);
+ rtx orig_op0, memloc;
+
+ /* If we got back the original object, something is wrong. Perhaps
+ we are evaluating an expression too early. In any event, don't
+ infinitely recurse. */
+ gcc_assert (tem != exp);
+
+ /* If TEM's type is a union of variable size, pass TARGET to the inner
+ computation, since it will need a temporary and TARGET is known
+ to have to do. This occurs in unchecked conversion in Ada. */
+ orig_op0 = op0
+ = expand_expr (tem,
+ (TREE_CODE (TREE_TYPE (tem)) == UNION_TYPE
+ && (TREE_CODE (TYPE_SIZE (TREE_TYPE (tem)))
+ != INTEGER_CST)
+ && modifier != EXPAND_STACK_PARM
+ ? target : NULL_RTX),
+ VOIDmode,
+ (modifier == EXPAND_INITIALIZER
+ || modifier == EXPAND_CONST_ADDRESS
+ || modifier == EXPAND_STACK_PARM)
+ ? modifier : EXPAND_NORMAL);
+
+ mode2
+ = CONSTANT_P (op0) ? TYPE_MODE (TREE_TYPE (tem)) : GET_MODE (op0);
+
+ /* If we have either an offset, a BLKmode result, or a reference
+ outside the underlying object, we must force it to memory.
+ Such a case can occur in Ada if we have unchecked conversion
+ of an expression from a scalar type to an aggregate type or
+ for an ARRAY_RANGE_REF whose type is BLKmode, or if we were
+ passed a partially uninitialized object or a view-conversion
+ to a larger size. */
+ must_force_mem = (offset
+ || mode1 == BLKmode
+ || bitpos + bitsize > GET_MODE_BITSIZE (mode2));
+
+ /* Handle CONCAT first. */
+ if (GET_CODE (op0) == CONCAT && !must_force_mem)
+ {
+ if (bitpos == 0
+ && bitsize == GET_MODE_BITSIZE (GET_MODE (op0)))
+ return op0;
+ if (bitpos == 0
+ && bitsize == GET_MODE_BITSIZE (GET_MODE (XEXP (op0, 0)))
+ && bitsize)
+ {
+ op0 = XEXP (op0, 0);
+ mode2 = GET_MODE (op0);
+ }
+ else if (bitpos == GET_MODE_BITSIZE (GET_MODE (XEXP (op0, 0)))
+ && bitsize == GET_MODE_BITSIZE (GET_MODE (XEXP (op0, 1)))
+ && bitpos
+ && bitsize)
+ {
+ op0 = XEXP (op0, 1);
+ bitpos = 0;
+ mode2 = GET_MODE (op0);
+ }
+ else
+ /* Otherwise force into memory. */
+ must_force_mem = 1;
+ }
+
+ /* If this is a constant, put it in a register if it is a legitimate
+ constant and we don't need a memory reference. */
+ if (CONSTANT_P (op0)
+ && mode2 != BLKmode
+ && LEGITIMATE_CONSTANT_P (op0)
+ && !must_force_mem)
+ op0 = force_reg (mode2, op0);
+
+ /* Otherwise, if this is a constant, try to force it to the constant
+ pool. Note that back-ends, e.g. MIPS, may refuse to do so if it
+ is a legitimate constant. */
+ else if (CONSTANT_P (op0) && (memloc = force_const_mem (mode2, op0)))
+ op0 = validize_mem (memloc);
+
+ /* Otherwise, if this is a constant or the object is not in memory
+ and need be, put it there. */
+ else if (CONSTANT_P (op0) || (!MEM_P (op0) && must_force_mem))
+ {
+ tree nt = build_qualified_type (TREE_TYPE (tem),
+ (TYPE_QUALS (TREE_TYPE (tem))
+ | TYPE_QUAL_CONST));
+ memloc = assign_temp (nt, 1, 1, 1);
+ emit_move_insn (memloc, op0);
+ op0 = memloc;
+ }
+
+ if (offset)
+ {
+ enum machine_mode address_mode;
+ rtx offset_rtx = expand_expr (offset, NULL_RTX, VOIDmode,
+ EXPAND_SUM);
+
+ gcc_assert (MEM_P (op0));
+
+ address_mode
+ = targetm.addr_space.address_mode (MEM_ADDR_SPACE (op0));
+ if (GET_MODE (offset_rtx) != address_mode)
+ offset_rtx = convert_to_mode (address_mode, offset_rtx, 0);
+
+ if (GET_MODE (op0) == BLKmode
+ /* A constant address in OP0 can have VOIDmode, we must
+ not try to call force_reg in that case. */
+ && GET_MODE (XEXP (op0, 0)) != VOIDmode
+ && bitsize != 0
+ && (bitpos % bitsize) == 0
+ && (bitsize % GET_MODE_ALIGNMENT (mode1)) == 0
+ && MEM_ALIGN (op0) == GET_MODE_ALIGNMENT (mode1))
+ {
+ op0 = adjust_address (op0, mode1, bitpos / BITS_PER_UNIT);
+ bitpos = 0;
+ }
+
+ op0 = offset_address (op0, offset_rtx,
+ highest_pow2_factor (offset));
+ }
+
+ /* If OFFSET is making OP0 more aligned than BIGGEST_ALIGNMENT,
+ record its alignment as BIGGEST_ALIGNMENT. */
+ if (MEM_P (op0) && bitpos == 0 && offset != 0
+ && is_aligning_offset (offset, tem))
+ set_mem_align (op0, BIGGEST_ALIGNMENT);
+
+ /* Don't forget about volatility even if this is a bitfield. */
+ if (MEM_P (op0) && volatilep && ! MEM_VOLATILE_P (op0))
+ {
+ if (op0 == orig_op0)
+ op0 = copy_rtx (op0);
+
+ MEM_VOLATILE_P (op0) = 1;
+ }
+
+ /* In cases where an aligned union has an unaligned object
+ as a field, we might be extracting a BLKmode value from
+ an integer-mode (e.g., SImode) object. Handle this case
+ by doing the extract into an object as wide as the field
+ (which we know to be the width of a basic mode), then
+ storing into memory, and changing the mode to BLKmode. */
+ if (mode1 == VOIDmode
+ || REG_P (op0) || GET_CODE (op0) == SUBREG
+ || (mode1 != BLKmode && ! direct_load[(int) mode1]
+ && GET_MODE_CLASS (mode) != MODE_COMPLEX_INT
+ && GET_MODE_CLASS (mode) != MODE_COMPLEX_FLOAT
+ && modifier != EXPAND_CONST_ADDRESS
+ && modifier != EXPAND_INITIALIZER)
+ /* If the field isn't aligned enough to fetch as a memref,
+ fetch it as a bit field. */
+ || (mode1 != BLKmode
+ && (((TYPE_ALIGN (TREE_TYPE (tem)) < GET_MODE_ALIGNMENT (mode)
+ || (bitpos % GET_MODE_ALIGNMENT (mode) != 0)
+ || (MEM_P (op0)
+ && (MEM_ALIGN (op0) < GET_MODE_ALIGNMENT (mode1)
+ || (bitpos % GET_MODE_ALIGNMENT (mode1) != 0))))
+ && ((modifier == EXPAND_CONST_ADDRESS
+ || modifier == EXPAND_INITIALIZER)
+ ? STRICT_ALIGNMENT
+ : SLOW_UNALIGNED_ACCESS (mode1, MEM_ALIGN (op0))))
+ || (bitpos % BITS_PER_UNIT != 0)))
+ /* If the type and the field are a constant size and the
+ size of the type isn't the same size as the bitfield,
+ we must use bitfield operations. */
+ || (bitsize >= 0
+ && TYPE_SIZE (TREE_TYPE (exp))
+ && TREE_CODE (TYPE_SIZE (TREE_TYPE (exp))) == INTEGER_CST
+ && 0 != compare_tree_int (TYPE_SIZE (TREE_TYPE (exp)),
+ bitsize)))
+ {
+ enum machine_mode ext_mode = mode;
+
+ if (ext_mode == BLKmode
+ && ! (target != 0 && MEM_P (op0)
+ && MEM_P (target)
+ && bitpos % BITS_PER_UNIT == 0))
+ ext_mode = mode_for_size (bitsize, MODE_INT, 1);
+
+ if (ext_mode == BLKmode)
+ {
+ if (target == 0)
+ target = assign_temp (type, 0, 1, 1);
+
+ if (bitsize == 0)
+ return target;
+
+ /* In this case, BITPOS must start at a byte boundary and
+ TARGET, if specified, must be a MEM. */
+ gcc_assert (MEM_P (op0)
+ && (!target || MEM_P (target))
+ && !(bitpos % BITS_PER_UNIT));
+
+ emit_block_move (target,
+ adjust_address (op0, VOIDmode,
+ bitpos / BITS_PER_UNIT),
+ GEN_INT ((bitsize + BITS_PER_UNIT - 1)
+ / BITS_PER_UNIT),
+ (modifier == EXPAND_STACK_PARM
+ ? BLOCK_OP_CALL_PARM : BLOCK_OP_NORMAL));
+
+ return target;
+ }