static mem_attrs *get_mem_attrs PARAMS ((HOST_WIDE_INT, tree, rtx,
rtx, unsigned int,
enum machine_mode));
+static tree component_ref_for_mem_expr PARAMS ((tree));
/* Probability of the conditional branch currently proceeded by try_split.
Set to -1 otherwise. */
return (p->alias ^ (p->align * 1000)
^ ((p->offset ? INTVAL (p->offset) : 0) * 50000)
^ ((p->size ? INTVAL (p->size) : 0) * 2500000)
- ^ (long) p->decl);
+ ^ (size_t) p->expr);
}
/* Returns non-zero if the value represented by X (which is really a
mem_attrs *p = (mem_attrs *) x;
mem_attrs *q = (mem_attrs *) y;
- return (p->alias == q->alias && p->decl == q->decl && p->offset == q->offset
+ return (p->alias == q->alias && p->expr == q->expr && p->offset == q->offset
&& p->size == q->size && p->align == q->align);
}
{
mem_attrs *p = (mem_attrs *) x;
- if (p->decl)
- ggc_mark_tree (p->decl);
+ if (p->expr)
+ ggc_mark_tree (p->expr);
if (p->offset)
ggc_mark_rtx (p->offset);
MEM of mode MODE. */
static mem_attrs *
-get_mem_attrs (alias, decl, offset, size, align, mode)
+get_mem_attrs (alias, expr, offset, size, align, mode)
HOST_WIDE_INT alias;
- tree decl;
+ tree expr;
rtx offset;
rtx size;
unsigned int align;
void **slot;
/* If everything is the default, we can just return zero. */
- if (alias == 0 && decl == 0 && offset == 0
+ if (alias == 0 && expr == 0 && offset == 0
&& (size == 0
|| (mode != BLKmode && GET_MODE_SIZE (mode) == INTVAL (size)))
- && (align == 1
+ && (align == BITS_PER_UNIT
|| (mode != BLKmode && align == GET_MODE_ALIGNMENT (mode))))
return 0;
attrs.alias = alias;
- attrs.decl = decl;
+ attrs.expr = expr;
attrs.offset = offset;
attrs.size = size;
attrs.align = align;
long i[4]; /* Only the low 32 bits of each 'long' are used. */
int endian = WORDS_BIG_ENDIAN ? 1 : 0;
+ /* Convert 'r' into an array of four 32-bit words in target word
+ order. */
REAL_VALUE_FROM_CONST_DOUBLE (r, x);
switch (GET_MODE_BITSIZE (GET_MODE (x)))
{
case 32:
- REAL_VALUE_TO_TARGET_SINGLE (r, i[endian]);
- i[1 - endian] = 0;
- break;
+ REAL_VALUE_TO_TARGET_SINGLE (r, i[3 * endian]);
+ i[1] = 0;
+ i[2] = 0;
+ i[3 - 3 * endian] = 0;
+ break;
case 64:
- REAL_VALUE_TO_TARGET_DOUBLE (r, i);
- break;
+ REAL_VALUE_TO_TARGET_DOUBLE (r, i + 2 * endian);
+ i[2 - 2 * endian] = 0;
+ i[3 - 2 * endian] = 0;
+ break;
case 96:
REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, i + endian);
- i[3-3*endian] = 0;
+ i[3 - 3 * endian] = 0;
break;
case 128:
REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, i);
default:
abort ();
}
-
/* Now, pack the 32-bit elements of the array into a CONST_DOUBLE
and return it. */
#if HOST_BITS_PER_WIDE_INT == 32
- return immed_double_const (i[endian], i[1 - endian], mode);
+ return immed_double_const (i[3 * endian], i[1 + endian], mode);
#else
- {
- int c;
-
- if (HOST_BITS_PER_WIDE_INT != 64)
- abort ();
-
- for (c = 0; c < 4; c++)
- i[c] &= ~ (0L);
+ if (HOST_BITS_PER_WIDE_INT != 64)
+ abort ();
- switch (GET_MODE_BITSIZE (GET_MODE (x)))
- {
- case 32:
- case 64:
- return immed_double_const (((unsigned long) i[endian]) |
- (((HOST_WIDE_INT) i[1-endian]) << 32),
- 0, mode);
- case 96:
- case 128:
- return immed_double_const (((unsigned long) i[endian*3]) |
- (((HOST_WIDE_INT) i[1+endian]) << 32),
- ((unsigned long) i[2-endian]) |
- (((HOST_WIDE_INT) i[3-endian*3]) << 32),
- mode);
- default:
- abort ();
- }
- }
+ return immed_double_const ((((unsigned long) i[3 * endian])
+ | ((HOST_WIDE_INT) i[1 + endian] << 32)),
+ (((unsigned long) i[2 - endian])
+ | ((HOST_WIDE_INT) i[3 - 3 * endian] << 32)),
+ mode);
#endif
}
#endif /* ifndef REAL_ARITHMETIC */
&& REG_P (x)
&& REGNO (x) < FIRST_PSEUDO_REGISTER)
internal_error
- ("Can't access real part of complex value in hard register");
+ ("can't access real part of complex value in hard register");
else if (WORDS_BIG_ENDIAN)
return gen_highpart (mode, x);
else
??? This is a potential portability problem and should
be fixed at some point.
- We must excercise caution with the sign bit. By definition there
+ We must exercise caution with the sign bit. By definition there
are 32 significant bits in K; there may be more in a HOST_WIDE_INT.
Consider a host with a 32-bit long and a 64-bit HOST_WIDE_INT.
So we explicitly mask and sign-extend as necessary. */
}
}
\f
+/* Within a MEM_EXPR, we care about either (1) a component ref of a decl,
+ or (2) a component ref of something variable. Represent the later with
+ a NULL expression. */
+
+static tree
+component_ref_for_mem_expr (ref)
+ tree ref;
+{
+ tree inner = TREE_OPERAND (ref, 0);
+
+ if (TREE_CODE (inner) == COMPONENT_REF)
+ inner = component_ref_for_mem_expr (inner);
+ else
+ {
+ tree placeholder_ptr = 0;
+
+ /* Now remove any conversions: they don't change what the underlying
+ object is. Likewise for SAVE_EXPR. Also handle PLACEHOLDER_EXPR. */
+ while (TREE_CODE (inner) == NOP_EXPR || TREE_CODE (inner) == CONVERT_EXPR
+ || TREE_CODE (inner) == NON_LVALUE_EXPR
+ || TREE_CODE (inner) == VIEW_CONVERT_EXPR
+ || TREE_CODE (inner) == SAVE_EXPR
+ || TREE_CODE (inner) == PLACEHOLDER_EXPR)
+ if (TREE_CODE (inner) == PLACEHOLDER_EXPR)
+ inner = find_placeholder (inner, &placeholder_ptr);
+ else
+ inner = TREE_OPERAND (inner, 0);
+
+ if (! DECL_P (inner))
+ inner = NULL_TREE;
+ }
+
+ if (inner == TREE_OPERAND (ref, 0))
+ return ref;
+ else
+ return build (COMPONENT_REF, TREE_TYPE (ref), inner,
+ TREE_OPERAND (ref, 1));
+}
/* Given REF, a MEM, and T, either the type of X or the expression
corresponding to REF, set the memory attributes. OBJECTP is nonzero
int objectp;
{
HOST_WIDE_INT alias = MEM_ALIAS_SET (ref);
- tree decl = MEM_DECL (ref);
+ tree expr = MEM_EXPR (ref);
rtx offset = MEM_OFFSET (ref);
rtx size = MEM_SIZE (ref);
unsigned int align = MEM_ALIGN (ref);
if (TREE_THIS_VOLATILE (t))
MEM_VOLATILE_P (ref) = 1;
- /* Now remove any NOPs: they don't change what the underlying object is.
- Likewise for SAVE_EXPR. */
+ /* Now remove any conversions: they don't change what the underlying
+ object is. Likewise for SAVE_EXPR. */
while (TREE_CODE (t) == NOP_EXPR || TREE_CODE (t) == CONVERT_EXPR
- || TREE_CODE (t) == NON_LVALUE_EXPR || TREE_CODE (t) == SAVE_EXPR)
+ || TREE_CODE (t) == NON_LVALUE_EXPR
+ || TREE_CODE (t) == VIEW_CONVERT_EXPR
+ || TREE_CODE (t) == SAVE_EXPR)
t = TREE_OPERAND (t, 0);
/* If this expression can't be addressed (e.g., it contains a reference
/* If this is a decl, set the attributes of the MEM from it. */
if (DECL_P (t))
{
- decl = t;
- offset = GEN_INT (0);
+ expr = t;
+ offset = const0_rtx;
size = (DECL_SIZE_UNIT (t)
&& host_integerp (DECL_SIZE_UNIT (t), 1)
? GEN_INT (tree_low_cst (DECL_SIZE_UNIT (t), 1)) : 0);
align = CONSTANT_ALIGNMENT (t, align);
#endif
}
+
+ /* If this is a field reference and not a bit-field, record it. */
+ /* ??? There is some information that can be gleened from bit-fields,
+ such as the word offset in the structure that might be modified.
+ But skip it for now. */
+ else if (TREE_CODE (t) == COMPONENT_REF
+ && ! DECL_BIT_FIELD (TREE_OPERAND (t, 1)))
+ {
+ expr = component_ref_for_mem_expr (t);
+ offset = const0_rtx;
+ /* ??? Any reason the field size would be different than
+ the size we got from the type? */
+ }
+
+ /* If this is an array reference, look for an outer field reference. */
+ else if (TREE_CODE (t) == ARRAY_REF)
+ {
+ tree off_tree = size_zero_node;
+
+ do
+ {
+ off_tree
+ = fold (build (PLUS_EXPR, sizetype,
+ fold (build (MULT_EXPR, sizetype,
+ TREE_OPERAND (t, 1),
+ TYPE_SIZE_UNIT (TREE_TYPE (t)))),
+ off_tree));
+ t = TREE_OPERAND (t, 0);
+ }
+ while (TREE_CODE (t) == ARRAY_REF);
+
+ if (TREE_CODE (t) == COMPONENT_REF)
+ {
+ expr = component_ref_for_mem_expr (t);
+ if (host_integerp (off_tree, 1))
+ offset = GEN_INT (tree_low_cst (off_tree, 1));
+ /* ??? Any reason the field size would be different than
+ the size we got from the type? */
+ }
+ }
}
/* Now set the attributes we computed above. */
MEM_ATTRS (ref)
- = get_mem_attrs (alias, decl, offset, size, align, GET_MODE (ref));
+ = get_mem_attrs (alias, expr, offset, size, align, GET_MODE (ref));
/* If this is already known to be a scalar or aggregate, we are done. */
if (MEM_IN_STRUCT_P (ref) || MEM_SCALAR_P (ref))
abort ();
#endif
- MEM_ATTRS (mem) = get_mem_attrs (set, MEM_DECL (mem), MEM_OFFSET (mem),
+ MEM_ATTRS (mem) = get_mem_attrs (set, MEM_EXPR (mem), MEM_OFFSET (mem),
MEM_SIZE (mem), MEM_ALIGN (mem),
GET_MODE (mem));
}
rtx mem;
unsigned int align;
{
- MEM_ATTRS (mem) = get_mem_attrs (MEM_ALIAS_SET (mem), MEM_DECL (mem),
+ MEM_ATTRS (mem) = get_mem_attrs (MEM_ALIAS_SET (mem), MEM_EXPR (mem),
MEM_OFFSET (mem), MEM_SIZE (mem), align,
GET_MODE (mem));
}
-/* Set the decl for MEM to DECL. */
+/* Set the expr for MEM to EXPR. */
void
-set_mem_decl (mem, decl)
+set_mem_expr (mem, expr)
rtx mem;
- tree decl;
+ tree expr;
{
MEM_ATTRS (mem)
- = get_mem_attrs (MEM_ALIAS_SET (mem), decl, MEM_OFFSET (mem),
+ = get_mem_attrs (MEM_ALIAS_SET (mem), expr, MEM_OFFSET (mem),
MEM_SIZE (mem), MEM_ALIGN (mem), GET_MODE (mem));
}
+
+/* Set the offset of MEM to OFFSET. */
+
+void
+set_mem_offset (mem, offset)
+ rtx mem, offset;
+{
+ MEM_ATTRS (mem) = get_mem_attrs (MEM_ALIAS_SET (mem), MEM_EXPR (mem),
+ offset, MEM_SIZE (mem), MEM_ALIGN (mem),
+ GET_MODE (mem));
+}
\f
/* Return a memory reference like MEMREF, but with its mode changed to MODE
and its address changed to ADDR. (VOIDmode means don't change the mode.
else if (MEM_SIZE (memref))
size = plus_constant (MEM_SIZE (memref), -offset);
- MEM_ATTRS (new) = get_mem_attrs (MEM_ALIAS_SET (memref), MEM_DECL (memref),
+ MEM_ATTRS (new) = get_mem_attrs (MEM_ALIAS_SET (memref), MEM_EXPR (memref),
memoffset, size, memalign, GET_MODE (new));
/* At some point, we should validate that this offset is within the object,
/* Update the alignment to reflect the offset. Reset the offset, which
we don't know. */
- MEM_ATTRS (new) = get_mem_attrs (MEM_ALIAS_SET (memref), MEM_DECL (memref),
+ MEM_ATTRS (new) = get_mem_attrs (MEM_ALIAS_SET (memref), MEM_EXPR (memref),
0, 0, MIN (MEM_ALIGN (memref),
pow2 * BITS_PER_UNIT),
GET_MODE (new));
&& !find_reg_note (insn, REG_BR_PROB, 0))
{
/* We can preserve the REG_BR_PROB notes only if exactly
- one jump is created, otherwise the machinde description
+ one jump is created, otherwise the machine description
is responsible for this step using
split_branch_probability variable. */
if (njumps != 1)
{
set_block_for_insn (insn, bb);
/* Should not happen as first in the BB is always
- eigther NOTE or LABEL. */
+ either NOTE or LABEL. */
if (bb->end == after
/* Avoid clobbering of structure when creating new BB. */
&& GET_CODE (insn) != BARRIER
{
set_block_for_insn (insn, bb);
/* Should not happen as first in the BB is always
- eigther NOTE or LABEl. */
+ either NOTE or LABEl. */
if (bb->head == insn
/* Avoid clobbering of structure when creating new BB. */
&& GET_CODE (insn) != BARRIER
{
rtx note = find_reg_note (insn, kind, NULL_RTX);
- /* Don't add ASM_OPERAND REG_EQUAL/REG_EQUIV notes.
- It serves no useful purpose and breaks eliminate_regs. */
- if ((kind == REG_EQUAL || kind == REG_EQUIV)
- && GET_CODE (datum) == ASM_OPERANDS)
- return NULL_RTX;
+ switch (kind)
+ {
+ case REG_EQUAL:
+ case REG_EQUIV:
+ /* Don't add REG_EQUAL/REG_EQUIV notes if the insn
+ has multiple sets (some callers assume single_set
+ means the insn only has one set, when in fact it
+ means the insn only has one * useful * set). */
+ if (GET_CODE (PATTERN (insn)) == PARALLEL && multiple_sets (insn))
+ {
+ if (note)
+ abort ();
+ return NULL_RTX;
+ }
+
+ /* Don't add ASM_OPERAND REG_EQUAL/REG_EQUIV notes.
+ It serves no useful purpose and breaks eliminate_regs. */
+ if (GET_CODE (datum) == ASM_OPERANDS)
+ return NULL_RTX;
+ break;
+
+ default:
+ break;
+ }
if (note)
{
#ifdef INIT_EXPANDERS
/* This is to initialize {init|mark|free}_machine_status before the first
call to push_function_context_to. This is needed by the Chill front
- end which calls push_function_context_to before the first cal to
+ end which calls push_function_context_to before the first call to
init_function_start. */
INIT_EXPANDERS;
#endif