static tree init_noncopied_parts PROTO((tree, tree));
static int safe_from_p PROTO((rtx, tree));
static int fixed_type_p PROTO((tree));
+static rtx var_rtx PROTO((tree));
static int get_pointer_alignment PROTO((tree, unsigned));
static tree string_constant PROTO((tree, tree *));
static tree c_strlen PROTO((tree));
if (unsignedp && GET_MODE_CLASS (mode) == MODE_INT
&& GET_MODE_BITSIZE (mode) == 2 * HOST_BITS_PER_WIDE_INT
&& GET_CODE (x) == CONST_INT && INTVAL (x) < 0)
- return immed_double_const (INTVAL (x), (HOST_WIDE_INT) 0, mode);
+ {
+ HOST_WIDE_INT val = INTVAL (x);
+
+ if (oldmode != VOIDmode
+ && HOST_BITS_PER_WIDE_INT > GET_MODE_BITSIZE (oldmode))
+ {
+ int width = GET_MODE_BITSIZE (oldmode);
+
+ /* We need to zero extend VAL. */
+ val &= ((HOST_WIDE_INT) 1 << width) - 1;
+ }
+
+ return immed_double_const (val, (HOST_WIDE_INT) 0, mode);
+ }
/* We can do this with a gen_lowpart if both desired and current modes
are integer, and this is either a constant integer, a register, or a
}
/* The code above should have handled everything. */
- if (data.len != 0)
+ if (data.len > 0)
abort ();
}
{
if (GET_MODE (target_reg) == GET_MODE (y))
source = y;
+ /* Allow for the target_reg to be smaller than the input register
+ to allow for AIX with 4 DF arguments after a single SI arg. The
+ last DF argument will only load 1 word into the integer registers,
+ but load a DF value into the float registers. */
else if (GET_MODE_SIZE (GET_MODE (target_reg))
- == GET_MODE_SIZE (GET_MODE (y)))
+ <= GET_MODE_SIZE (GET_MODE (y)))
source = gen_rtx (SUBREG, GET_MODE (target_reg), y, 0);
else
abort ();
plus_constant (XEXP (x, 0),
INTVAL (XEXP (element, 1))));
else if (XEXP (element, 1) == const0_rtx)
- target = x;
+ {
+ target = x;
+ if (GET_MODE (target) != GET_MODE (source_reg))
+ target = gen_lowpart (GET_MODE (source_reg), target);
+ }
else
abort ();
int alignment;
push_temp_slots ();
- tem = get_inner_reference (to, &bitsize, &bitpos, &offset,
- &mode1, &unsignedp, &volatilep);
+ tem = get_inner_reference (to, &bitsize, &bitpos, &offset, &mode1,
+ &unsignedp, &volatilep, &alignment);
/* If we are going to use store_bit_field and extract_bit_field,
make sure to_rtx will be safe for multiple use. */
if (mode1 == VOIDmode && want_value)
tem = stabilize_reference (tem);
- alignment = TYPE_ALIGN (TREE_TYPE (tem)) / BITS_PER_UNIT;
to_rtx = expand_expr (tem, NULL_RTX, VOIDmode, 0);
if (offset != 0)
{
to_rtx = change_address (to_rtx, VOIDmode,
gen_rtx (PLUS, ptr_mode, XEXP (to_rtx, 0),
force_reg (ptr_mode, offset_rtx)));
- /* If we have a variable offset, the known alignment
- is only that of the innermost structure containing the field.
- (Actually, we could sometimes do better by using the
- align of an element of the innermost array, but no need.) */
- if (TREE_CODE (to) == COMPONENT_REF
- || TREE_CODE (to) == BIT_FIELD_REF)
- alignment
- = TYPE_ALIGN (TREE_TYPE (TREE_OPERAND (to, 0))) / BITS_PER_UNIT;
}
if (volatilep)
{
/* If we don't want a value, we can do the conversion inside EXP,
which will often result in some optimizations. Do the conversion
in two steps: first change the signedness, if needed, then
- the extend. */
- if (! want_value)
+ the extend. But don't do this if the type of EXP is a subtype
+ of something else since then the conversion might involve
+ more than just converting modes. */
+ if (! want_value && INTEGRAL_TYPE_P (TREE_TYPE (exp))
+ && TREE_TYPE (TREE_TYPE (exp)) == 0)
{
if (TREE_UNSIGNED (TREE_TYPE (exp))
!= SUBREG_PROMOTED_UNSIGNED_P (target))
{
#ifdef TARGET_MEM_FUNCTIONS
emit_library_call (memset_libfunc, 0, VOIDmode, 3,
- addr, Pmode,
+ addr, ptr_mode,
const0_rtx, TYPE_MODE (integer_type_node),
convert_to_mode (TYPE_MODE (sizetype),
size,
TYPE_MODE (sizetype));
#else
emit_library_call (bzero_libfunc, 0, VOIDmode, 2,
- addr, Pmode,
+ addr, ptr_mode,
convert_to_mode (TYPE_MODE (integer_type_node),
size,
TREE_UNSIGNED (integer_type_node)),
It is also needed to check for missing elements. */
for (elt = CONSTRUCTOR_ELTS (exp);
elt != NULL_TREE;
- elt = TREE_CHAIN (elt), i++)
+ elt = TREE_CHAIN (elt))
{
tree index = TREE_PURPOSE (elt);
HOST_WIDE_INT this_node_count;
giving the variable offset (in units) in *POFFSET.
This offset is in addition to the bit position.
If the position is not variable, we store 0 in *POFFSET.
+ We set *PALIGNMENT to the alignment in bytes of the address that will be
+ computed. This is the alignment of the thing we return if *POFFSET
+ is zero, but can be more less strictly aligned if *POFFSET is nonzero.
If any of the extraction expressions is volatile,
we store 1 in *PVOLATILEP. Otherwise we don't change that.
If the field describes a variable-sized object, *PMODE is set to
VOIDmode and *PBITSIZE is set to -1. An access cannot be made in
- this case, but the address of the object can be found. */
+ this case, but the address of the object can be found. */
tree
get_inner_reference (exp, pbitsize, pbitpos, poffset, pmode,
- punsignedp, pvolatilep)
+ punsignedp, pvolatilep, palignment)
tree exp;
int *pbitsize;
int *pbitpos;
enum machine_mode *pmode;
int *punsignedp;
int *pvolatilep;
+ int *palignment;
{
tree orig_exp = exp;
tree size_tree = 0;
enum machine_mode mode = VOIDmode;
tree offset = integer_zero_node;
+ int alignment = BIGGEST_ALIGNMENT;
if (TREE_CODE (exp) == COMPONENT_REF)
{
/* If any reference in the chain is volatile, the effect is volatile. */
if (TREE_THIS_VOLATILE (exp))
*pvolatilep = 1;
+
+ /* If the offset is non-constant already, then we can't assume any
+ alignment more than the alignment here. */
+ if (! integer_zerop (offset))
+ alignment = MIN (alignment, TYPE_ALIGN (TREE_TYPE (exp)));
+
exp = TREE_OPERAND (exp, 0);
}
+ if (TREE_CODE_CLASS (TREE_CODE (exp)) == 'd')
+ alignment = MIN (alignment, DECL_ALIGN (exp));
+ else if (TREE_TYPE (exp) != 0)
+ alignment = MIN (alignment, TYPE_ALIGN (TREE_TYPE (exp)));
+
if (integer_zerop (offset))
offset = 0;
*pmode = mode;
*poffset = offset;
+ *palignment = alignment / BITS_PER_UNIT;
return exp;
}
\f
return 1;
return 0;
}
+
+/* Subroutine of expand_expr: return rtx if EXP is a
+ variable or parameter; else return 0. */
+
+static rtx
+var_rtx (exp)
+ tree exp;
+{
+ STRIP_NOPS (exp);
+ switch (TREE_CODE (exp))
+ {
+ case PARM_DECL:
+ case VAR_DECL:
+ return DECL_RTL (exp);
+ default:
+ return 0;
+ }
+}
\f
/* expand_expr: generate code for computing expression EXP.
An rtx for the computed value is returned. The value is never null.
else
{
- if (target == 0 || ! safe_from_p (target, exp))
+ /* Handle calls that pass values in multiple non-contiguous
+ locations. The Irix 6 ABI has examples of this. */
+ if (target == 0 || ! safe_from_p (target, exp)
+ || GET_CODE (target) == PARALLEL)
{
if (mode != BLKmode && ! TREE_ADDRESSABLE (exp))
target = gen_reg_rtx (tmode != VOIDmode ? tmode : mode);
int bitpos;
tree offset;
int volatilep = 0;
- tree tem = get_inner_reference (exp, &bitsize, &bitpos, &offset,
- &mode1, &unsignedp, &volatilep);
int alignment;
+ tree tem = get_inner_reference (exp, &bitsize, &bitpos, &offset,
+ &mode1, &unsignedp, &volatilep,
+ &alignment);
/* If we got back the original object, something is wrong. Perhaps
we are evaluating an expression too early. In any event, don't
op0 = validize_mem (force_const_mem (mode, op0));
}
- alignment = TYPE_ALIGN (TREE_TYPE (tem)) / BITS_PER_UNIT;
if (offset != 0)
{
rtx offset_rtx = expand_expr (offset, NULL_RTX, VOIDmode, 0);
op0 = change_address (op0, VOIDmode,
gen_rtx (PLUS, ptr_mode, XEXP (op0, 0),
force_reg (ptr_mode, offset_rtx)));
- /* If we have a variable offset, the known alignment
- is only that of the innermost structure containing the field.
- (Actually, we could sometimes do better by using the
- size of an element of the innermost array, but no need.) */
- if (TREE_CODE (exp) == COMPONENT_REF
- || TREE_CODE (exp) == BIT_FIELD_REF)
- alignment = (TYPE_ALIGN (TREE_TYPE (TREE_OPERAND (exp, 0)))
- / BITS_PER_UNIT);
}
/* Don't forget about volatility even if this is a bitfield. */
|| GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG
|| (modifier != EXPAND_CONST_ADDRESS
&& modifier != EXPAND_INITIALIZER
- && ((mode1 != BLKmode && ! direct_load[(int) mode1])
+ && ((mode1 != BLKmode && ! direct_load[(int) mode1]
+ && GET_MODE_CLASS (mode) != MODE_COMPLEX_INT
+ && GET_MODE_CLASS (mode) != MODE_COMPLEX_FLOAT)
/* If the field isn't aligned enough to fetch as a memref,
fetch it as a bit field. */
|| (SLOW_UNALIGNED_ACCESS
return target;
}
- /* If we are not to produce a result, we have no target. Otherwise,
- if a target was specified use it; it will not be used as an
- intermediate target unless it is safe. If no target, use a
- temporary. */
-
- if (ignore)
- temp = 0;
- else if (original_target
- && safe_from_p (original_target, TREE_OPERAND (exp, 0))
- && GET_MODE (original_target) == mode
- && ! (GET_CODE (original_target) == MEM
- && MEM_VOLATILE_P (original_target)))
- temp = original_target;
- else if (TREE_ADDRESSABLE (type))
- abort ();
- else
- temp = assign_temp (type, 0, 0, 1);
-
/* Check for X ? A + B : A. If we have this, we can copy
A to the output and conditionally add B. Similarly for unary
operations. Don't do this if X has side-effects because
TREE_OPERAND (TREE_OPERAND (exp, 2), 0), 0))
singleton = TREE_OPERAND (exp, 1), unary_op = TREE_OPERAND (exp, 2);
+ /* If we are not to produce a result, we have no target. Otherwise,
+ if a target was specified use it; it will not be used as an
+ intermediate target unless it is safe. If no target, use a
+ temporary. */
+
+ if (ignore)
+ temp = 0;
+ else if (original_target
+ && (safe_from_p (original_target, TREE_OPERAND (exp, 0))
+ || (singleton && GET_CODE (original_target) == REG
+ && REGNO (original_target) >= FIRST_PSEUDO_REGISTER
+ && original_target == var_rtx (singleton)))
+ && GET_MODE (original_target) == mode
+ && ! (GET_CODE (original_target) == MEM
+ && MEM_VOLATILE_P (original_target)))
+ temp = original_target;
+ else if (TREE_ADDRESSABLE (type))
+ abort ();
+ else
+ temp = assign_temp (type, 0, 0, 1);
+
/* If we had X ? A + 1 : A and we can do the test of X as a store-flag
operation, do this as A + (X != 0). Similarly for other simple
binary operators. */
return enqueue_insn (op0, GEN_FCN (icode) (op0, op0, op1));
}
+ if (icode != (int) CODE_FOR_nothing && GET_CODE (op0) == MEM)
+ {
+ rtx addr = force_reg (Pmode, XEXP (op0, 0));
+ rtx temp, result;
+
+ op0 = change_address (op0, VOIDmode, addr);
+ temp = force_reg (GET_MODE (op0), op0);
+ if (! (*insn_operand_predicate[icode][2]) (op1, mode))
+ op1 = force_reg (mode, op1);
+
+ /* The increment queue is LIFO, thus we have to `queue'
+ the instructions in reverse order. */
+ enqueue_insn (op0, gen_move_insn (op0, temp));
+ result = enqueue_insn (temp, GEN_FCN (icode) (temp, temp, op1));
+ return result;
+ }
}
/* Preincrement, or we can't increment with one simple insn. */
tree type;
tree offset;
int volatilep = 0;
+ int alignment;
/* Get description of this reference. We don't actually care
about the underlying object here. */
get_inner_reference (exp, &bitsize, &bitpos, &offset,
- &mode, &unsignedp, &volatilep);
+ &mode, &unsignedp, &volatilep,
+ &alignment);
type = type_for_size (bitsize, unsignedp);
if (! SLOW_BYTE_ACCESS
else
{
+ rtx seq1, seq2;
+ tree cleanups_left_side, cleanups_right_side, old_cleanups;
+
register rtx label1 = gen_label_rtx ();
drop_through_label = gen_label_rtx ();
+
do_jump (TREE_OPERAND (exp, 0), label1, NULL_RTX);
+
+ /* We need to save the cleanups for the lhs and rhs separately.
+ Keep track of the cleanups seen before the lhs. */
+ old_cleanups = cleanups_this_call;
+ start_sequence ();
/* Now the THEN-expression. */
do_jump (TREE_OPERAND (exp, 1),
if_false_label ? if_false_label : drop_through_label,
/* In case the do_jump just above never jumps. */
do_pending_stack_adjust ();
emit_label (label1);
+ seq1 = get_insns ();
+ /* Now grab the cleanups for the lhs. */
+ cleanups_left_side = defer_cleanups_to (old_cleanups);
+ end_sequence ();
+
+ /* And keep track of where we start before the rhs. */
+ old_cleanups = cleanups_this_call;
+ start_sequence ();
/* Now the ELSE-expression. */
do_jump (TREE_OPERAND (exp, 2),
if_false_label ? if_false_label : drop_through_label,
if_true_label ? if_true_label : drop_through_label);
+ seq2 = get_insns ();
+ /* Grab the cleanups for the rhs. */
+ cleanups_right_side = defer_cleanups_to (old_cleanups);
+ end_sequence ();
+
+ if (cleanups_left_side || cleanups_right_side)
+ {
+ /* Make the cleanups for the THEN and ELSE clauses
+ conditional based on which half is executed. */
+ rtx flag = gen_reg_rtx (word_mode);
+ tree new_cleanups;
+ tree cond;
+
+ /* Set the flag to 0 so that we know we executed the lhs. */
+ emit_move_insn (flag, const0_rtx);
+ emit_insns (seq1);
+
+ /* Set the flag to 1 so that we know we executed the rhs. */
+ emit_move_insn (flag, const1_rtx);
+ emit_insns (seq2);
+
+ /* Make sure the cleanup lives on the function_obstack. */
+ push_obstacks_nochange ();
+ resume_temporary_allocation ();
+
+ /* Now, build up a COND_EXPR that tests the value of the
+ flag, and then either do the cleanups for the lhs or the
+ rhs. */
+ cond = make_node (RTL_EXPR);
+ TREE_TYPE (cond) = integer_type_node;
+ RTL_EXPR_RTL (cond) = flag;
+ RTL_EXPR_SEQUENCE (cond) = NULL_RTX;
+ cond = save_expr (cond);
+
+ new_cleanups = build (COND_EXPR, void_type_node,
+ truthvalue_conversion (cond),
+ cleanups_right_side, cleanups_left_side);
+ new_cleanups = fold (new_cleanups);
+
+ pop_obstacks ();
+
+ /* Now add in the conditionalized cleanups. */
+ cleanups_this_call
+ = tree_cons (NULL_TREE, new_cleanups, cleanups_this_call);
+ expand_eh_region_start ();
+ }
+ else
+ {
+ /* No cleanups were needed, so emit the two sequences
+ directly. */
+ emit_insns (seq1);
+ emit_insns (seq2);
+ }
}
break;
}
/* If this failed, we have to do this with set/compare/jump/set code. */
- if (target == 0 || GET_CODE (target) != REG
+ if (GET_CODE (target) != REG
|| reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
target = gen_reg_rtx (GET_MODE (target));