/* Convert tree expression to rtl instructions, for GNU compiler.
- Copyright (C) 1988, 92-98, 1999 Free Software Foundation, Inc.
+ Copyright (C) 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000
+ Free Software Foundation, Inc.
This file is part of GNU CC.
#include "defaults.h"
#include "toplev.h"
#include "ggc.h"
+#include "intl.h"
#include "tm_p.h"
-#define CEIL(x,y) (((x) + (y) - 1) / (y))
+#ifndef ACCUMULATE_OUTGOING_ARGS
+#define ACCUMULATE_OUTGOING_ARGS 0
+#endif
+
+/* Supply a default definition for PUSH_ARGS. */
+#ifndef PUSH_ARGS
+#ifdef PUSH_ROUNDING
+#define PUSH_ARGS !ACCUMULATE_OUTGOING_ARGS
+#else
+#define PUSH_ARGS 0
+#endif
+#endif
/* Decide whether a function's arguments should be processed
from first to last or from last to first.
extern struct obstack permanent_obstack;
-static rtx get_push_address PROTO ((int));
-
-static rtx enqueue_insn PROTO((rtx, rtx));
-static int move_by_pieces_ninsns PROTO((unsigned int, int));
-static void move_by_pieces_1 PROTO((rtx (*) (rtx, ...), enum machine_mode,
- struct move_by_pieces *));
-static void clear_by_pieces PROTO((rtx, int, int));
-static void clear_by_pieces_1 PROTO((rtx (*) (rtx, ...),
- enum machine_mode,
- struct clear_by_pieces *));
-static int is_zeros_p PROTO((tree));
-static int mostly_zeros_p PROTO((tree));
-static void store_constructor_field PROTO((rtx, int, int, enum machine_mode,
- tree, tree, int, int));
-static void store_constructor PROTO((tree, rtx, int, int));
-static rtx store_field PROTO((rtx, int, int, enum machine_mode, tree,
- enum machine_mode, int, int,
- int, int));
+static rtx get_push_address PARAMS ((int));
+
+static rtx enqueue_insn PARAMS ((rtx, rtx));
+static int move_by_pieces_ninsns PARAMS ((unsigned int, unsigned int));
+static void move_by_pieces_1 PARAMS ((rtx (*) (rtx, ...), enum machine_mode,
+ struct move_by_pieces *));
+static void clear_by_pieces PARAMS ((rtx, int, unsigned int));
+static void clear_by_pieces_1 PARAMS ((rtx (*) (rtx, ...),
+ enum machine_mode,
+ struct clear_by_pieces *));
+static int is_zeros_p PARAMS ((tree));
+static int mostly_zeros_p PARAMS ((tree));
+static void store_constructor_field PARAMS ((rtx, unsigned HOST_WIDE_INT,
+ HOST_WIDE_INT, enum machine_mode,
+ tree, tree, unsigned int, int));
+static void store_constructor PARAMS ((tree, rtx, unsigned int, int,
+ HOST_WIDE_INT));
+static rtx store_field PARAMS ((rtx, HOST_WIDE_INT,
+ HOST_WIDE_INT, enum machine_mode,
+ tree, enum machine_mode, int,
+ unsigned int, HOST_WIDE_INT, int));
static enum memory_use_mode
- get_memory_usage_from_modifier PROTO((enum expand_modifier));
-static tree save_noncopied_parts PROTO((tree, tree));
-static tree init_noncopied_parts PROTO((tree, tree));
-static int safe_from_p PROTO((rtx, tree, int));
-static int fixed_type_p PROTO((tree));
-static rtx var_rtx PROTO((tree));
-static int readonly_fields_p PROTO((tree));
-static rtx expand_expr_unaligned PROTO((tree, int *));
-static rtx expand_increment PROTO((tree, int, int));
-static void preexpand_calls PROTO((tree));
-static void do_jump_by_parts_greater PROTO((tree, int, rtx, rtx));
-static void do_jump_by_parts_equality PROTO((tree, rtx, rtx));
-static void do_compare_and_jump PROTO((tree, enum rtx_code, enum rtx_code, rtx, rtx));
-static rtx do_store_flag PROTO((tree, rtx, enum machine_mode, int));
+ get_memory_usage_from_modifier PARAMS ((enum expand_modifier));
+static tree save_noncopied_parts PARAMS ((tree, tree));
+static tree init_noncopied_parts PARAMS ((tree, tree));
+static int safe_from_p PARAMS ((rtx, tree, int));
+static int fixed_type_p PARAMS ((tree));
+static rtx var_rtx PARAMS ((tree));
+static int readonly_fields_p PARAMS ((tree));
+static rtx expand_expr_unaligned PARAMS ((tree, unsigned int *));
+static rtx expand_increment PARAMS ((tree, int, int));
+static void preexpand_calls PARAMS ((tree));
+static void do_jump_by_parts_greater PARAMS ((tree, int, rtx, rtx));
+static void do_jump_by_parts_equality PARAMS ((tree, rtx, rtx));
+static void do_compare_and_jump PARAMS ((tree, enum rtx_code, enum rtx_code,
+ rtx, rtx));
+static rtx do_store_flag PARAMS ((tree, rtx, enum machine_mode, int));
/* Record for each mode whether we can move a register directly to or
from an object of that mode in memory. If we can't, we won't try
/* This macro is used to determine whether move_by_pieces should be called
to perform a structure copy. */
#ifndef MOVE_BY_PIECES_P
-#define MOVE_BY_PIECES_P(SIZE, ALIGN) (move_by_pieces_ninsns \
- (SIZE, ALIGN) < MOVE_RATIO)
+#define MOVE_BY_PIECES_P(SIZE, ALIGN) \
+ (move_by_pieces_ninsns (SIZE, ALIGN) < MOVE_RATIO)
#endif
/* This array records the insn_code of insns to perform block moves. */
/* SLOW_UNALIGNED_ACCESS is non-zero if unaligned accesses are very slow. */
#ifndef SLOW_UNALIGNED_ACCESS
-#define SLOW_UNALIGNED_ACCESS STRICT_ALIGNMENT
+#define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
#endif
\f
/* This is run once per compilation to set up which modes can be used
void
init_expr ()
{
- current_function->expr
- = (struct expr_status *) xmalloc (sizeof (struct expr_status));
+ cfun->expr = (struct expr_status *) xmalloc (sizeof (struct expr_status));
pending_chain = 0;
pending_stack_adjust = 0;
+ stack_pointer_delta = 0;
inhibit_defer_pop = 0;
saveregs_value = 0;
apply_args_value = 0;
/* MOVE_MAX_PIECES is the number of bytes at a time which we can
move efficiently, as opposed to MOVE_MAX which is the maximum
- number of bhytes we can move with a single instruction. */
+ number of bytes we can move with a single instruction. */
#ifndef MOVE_MAX_PIECES
#define MOVE_MAX_PIECES MOVE_MAX
from block FROM to block TO. (These are MEM rtx's with BLKmode).
The caller must pass FROM and TO
through protect_from_queue before calling.
- ALIGN (in bytes) is maximum alignment we can assume. */
+ ALIGN is maximum alignment we can assume. */
void
move_by_pieces (to, from, len, align)
rtx to, from;
- int len, align;
+ int len;
+ unsigned int align;
{
struct move_by_pieces data;
rtx to_addr = XEXP (to, 0), from_addr = XEXP (from, 0);
- int max_size = MOVE_MAX_PIECES + 1;
+ unsigned int max_size = MOVE_MAX_PIECES + 1;
enum machine_mode mode = VOIDmode, tmode;
enum insn_code icode;
data.to_addr = copy_addr_to_reg (to_addr);
}
- if (! SLOW_UNALIGNED_ACCESS
- || align > MOVE_MAX || align >= BIGGEST_ALIGNMENT / BITS_PER_UNIT)
- align = MOVE_MAX;
+ if (! SLOW_UNALIGNED_ACCESS (word_mode, align)
+ || align > MOVE_MAX * BITS_PER_UNIT || align >= BIGGEST_ALIGNMENT)
+ align = MOVE_MAX * BITS_PER_UNIT;
/* First move what we can in the largest integer mode, then go to
successively smaller modes. */
break;
icode = mov_optab->handlers[(int) mode].insn_code;
- if (icode != CODE_FOR_nothing
- && align >= MIN (BIGGEST_ALIGNMENT / BITS_PER_UNIT,
- GET_MODE_SIZE (mode)))
+ if (icode != CODE_FOR_nothing && align >= GET_MODE_ALIGNMENT (mode))
move_by_pieces_1 (GEN_FCN (icode), mode, &data);
max_size = GET_MODE_SIZE (mode);
static int
move_by_pieces_ninsns (l, align)
unsigned int l;
- int align;
+ unsigned int align;
{
register int n_insns = 0;
- int max_size = MOVE_MAX + 1;
+ unsigned int max_size = MOVE_MAX + 1;
- if (! SLOW_UNALIGNED_ACCESS
- || align > MOVE_MAX || align >= BIGGEST_ALIGNMENT / BITS_PER_UNIT)
- align = MOVE_MAX;
+ if (! SLOW_UNALIGNED_ACCESS (word_mode, align)
+ || align > MOVE_MAX * BITS_PER_UNIT || align >= BIGGEST_ALIGNMENT)
+ align = MOVE_MAX * BITS_PER_UNIT;
while (max_size > 1)
{
break;
icode = mov_optab->handlers[(int) mode].insn_code;
- if (icode != CODE_FOR_nothing
- && align >= MIN (BIGGEST_ALIGNMENT / BITS_PER_UNIT,
- GET_MODE_SIZE (mode)))
+ if (icode != CODE_FOR_nothing && align >= GET_MODE_ALIGNMENT (mode))
n_insns += l / GET_MODE_SIZE (mode), l %= GET_MODE_SIZE (mode);
max_size = GET_MODE_SIZE (mode);
static void
move_by_pieces_1 (genfun, mode, data)
- rtx (*genfun) PROTO ((rtx, ...));
+ rtx (*genfun) PARAMS ((rtx, ...));
enum machine_mode mode;
struct move_by_pieces *data;
{
Both X and Y must be MEM rtx's (perhaps inside VOLATILE)
with mode BLKmode.
SIZE is an rtx that says how long they are.
- ALIGN is the maximum alignment we can assume they have,
- measured in bytes.
+ ALIGN is the maximum alignment we can assume they have.
Return the address of the new block, if memcpy is called and returns it,
0 otherwise. */
emit_block_move (x, y, size, align)
rtx x, y;
rtx size;
- int align;
+ unsigned int align;
{
rtx retval = 0;
#ifdef TARGET_MEM_FUNCTIONS
including more than one in the machine description unless
the more limited one has some advantage. */
- rtx opalign = GEN_INT (align);
+ rtx opalign = GEN_INT (align / BITS_PER_UNIT);
enum machine_mode mode;
for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
void
emit_group_load (dst, orig_src, ssize, align)
rtx dst, orig_src;
- int align, ssize;
+ unsigned int align;
+ int ssize;
{
rtx *tmps, src;
int start, i;
for (i = start; i < XVECLEN (dst, 0); i++)
{
enum machine_mode mode = GET_MODE (XEXP (XVECEXP (dst, 0, i), 0));
- int bytepos = INTVAL (XEXP (XVECEXP (dst, 0, i), 1));
- int bytelen = GET_MODE_SIZE (mode);
+ HOST_WIDE_INT bytepos = INTVAL (XEXP (XVECEXP (dst, 0, i), 1));
+ unsigned int bytelen = GET_MODE_SIZE (mode);
int shift = 0;
/* Handle trailing fragments that run over the size of the struct. */
shift = (bytelen - (ssize - bytepos)) * BITS_PER_UNIT;
bytelen = ssize - bytepos;
if (bytelen <= 0)
- abort();
+ abort ();
}
/* Optimize the access just a bit. */
if (GET_CODE (src) == MEM
- && align*BITS_PER_UNIT >= GET_MODE_ALIGNMENT (mode)
- && bytepos*BITS_PER_UNIT % GET_MODE_ALIGNMENT (mode) == 0
+ && align >= GET_MODE_ALIGNMENT (mode)
+ && bytepos * BITS_PER_UNIT % GET_MODE_ALIGNMENT (mode) == 0
&& bytelen == GET_MODE_SIZE (mode))
{
tmps[i] = gen_reg_rtx (mode);
abort ();
}
else
- {
- tmps[i] = extract_bit_field (src, bytelen*BITS_PER_UNIT,
- bytepos*BITS_PER_UNIT, 1, NULL_RTX,
- mode, mode, align, ssize);
- }
+ tmps[i] = extract_bit_field (src, bytelen * BITS_PER_UNIT,
+ bytepos * BITS_PER_UNIT, 1, NULL_RTX,
+ mode, mode, align, ssize);
if (BYTES_BIG_ENDIAN && shift)
- {
- expand_binop (mode, ashl_optab, tmps[i], GEN_INT (shift),
- tmps[i], 0, OPTAB_WIDEN);
- }
+ expand_binop (mode, ashl_optab, tmps[i], GEN_INT (shift),
+ tmps[i], 0, OPTAB_WIDEN);
}
+
emit_queue();
/* Copy the extracted pieces into the proper (probable) hard regs. */
void
emit_group_store (orig_dst, src, ssize, align)
rtx orig_dst, src;
- int ssize, align;
+ int ssize;
+ unsigned int align;
{
rtx *tmps, dst;
int start, i;
/* Process the pieces. */
for (i = start; i < XVECLEN (src, 0); i++)
{
- int bytepos = INTVAL (XEXP (XVECEXP (src, 0, i), 1));
+ HOST_WIDE_INT bytepos = INTVAL (XEXP (XVECEXP (src, 0, i), 1));
enum machine_mode mode = GET_MODE (tmps[i]);
- int bytelen = GET_MODE_SIZE (mode);
+ unsigned int bytelen = GET_MODE_SIZE (mode);
/* Handle trailing fragments that run over the size of the struct. */
if (ssize >= 0 && bytepos + bytelen > ssize)
/* Optimize the access just a bit. */
if (GET_CODE (dst) == MEM
- && align*BITS_PER_UNIT >= GET_MODE_ALIGNMENT (mode)
- && bytepos*BITS_PER_UNIT % GET_MODE_ALIGNMENT (mode) == 0
+ && align >= GET_MODE_ALIGNMENT (mode)
+ && bytepos * BITS_PER_UNIT % GET_MODE_ALIGNMENT (mode) == 0
&& bytelen == GET_MODE_SIZE (mode))
- {
- emit_move_insn (change_address (dst, mode,
- plus_constant (XEXP (dst, 0),
- bytepos)),
- tmps[i]);
- }
+ emit_move_insn (change_address (dst, mode,
+ plus_constant (XEXP (dst, 0),
+ bytepos)),
+ tmps[i]);
else
- {
- store_bit_field (dst, bytelen*BITS_PER_UNIT, bytepos*BITS_PER_UNIT,
+ store_bit_field (dst, bytelen * BITS_PER_UNIT, bytepos * BITS_PER_UNIT,
mode, tmps[i], align, ssize);
- }
}
+
emit_queue();
/* Copy from the pseudo into the (probable) hard reg. */
The primary purpose of this routine is to handle functions
that return BLKmode structures in registers. Some machines
(the PA for example) want to return all small structures
- in registers regardless of the structure's alignment.
- */
+ in registers regardless of the structure's alignment. */
rtx
-copy_blkmode_from_reg(tgtblk,srcreg,type)
+copy_blkmode_from_reg (tgtblk, srcreg, type)
rtx tgtblk;
rtx srcreg;
tree type;
{
- int bytes = int_size_in_bytes (type);
- rtx src = NULL, dst = NULL;
- int bitsize = MIN (TYPE_ALIGN (type), (unsigned int) BITS_PER_WORD);
- int bitpos, xbitpos, big_endian_correction = 0;
-
- if (tgtblk == 0)
- {
- tgtblk = assign_stack_temp (BLKmode, bytes, 0);
- MEM_SET_IN_STRUCT_P (tgtblk, AGGREGATE_TYPE_P (type));
- preserve_temp_slots (tgtblk);
- }
+ unsigned HOST_WIDE_INT bytes = int_size_in_bytes (type);
+ rtx src = NULL, dst = NULL;
+ unsigned HOST_WIDE_INT bitsize = MIN (TYPE_ALIGN (type), BITS_PER_WORD);
+ unsigned HOST_WIDE_INT bitpos, xbitpos, big_endian_correction = 0;
+
+ if (tgtblk == 0)
+ {
+ tgtblk = assign_stack_temp (BLKmode, bytes, 0);
+ MEM_SET_IN_STRUCT_P (tgtblk, AGGREGATE_TYPE_P (type));
+ preserve_temp_slots (tgtblk);
+ }
- /* This code assumes srcreg is at least a full word. If it isn't,
- copy it into a new pseudo which is a full word. */
- if (GET_MODE (srcreg) != BLKmode
- && GET_MODE_SIZE (GET_MODE (srcreg)) < UNITS_PER_WORD)
- srcreg = convert_to_mode (word_mode, srcreg,
- TREE_UNSIGNED (type));
-
- /* Structures whose size is not a multiple of a word are aligned
- to the least significant byte (to the right). On a BYTES_BIG_ENDIAN
- machine, this means we must skip the empty high order bytes when
- calculating the bit offset. */
- if (BYTES_BIG_ENDIAN && bytes % UNITS_PER_WORD)
- big_endian_correction = (BITS_PER_WORD - ((bytes % UNITS_PER_WORD)
- * BITS_PER_UNIT));
-
- /* Copy the structure BITSIZE bites at a time.
-
- We could probably emit more efficient code for machines
- which do not use strict alignment, but it doesn't seem
- worth the effort at the current time. */
- for (bitpos = 0, xbitpos = big_endian_correction;
- bitpos < bytes * BITS_PER_UNIT;
- bitpos += bitsize, xbitpos += bitsize)
- {
-
- /* We need a new source operand each time xbitpos is on a
- word boundary and when xbitpos == big_endian_correction
- (the first time through). */
- if (xbitpos % BITS_PER_WORD == 0
- || xbitpos == big_endian_correction)
- src = operand_subword_force (srcreg,
- xbitpos / BITS_PER_WORD,
- BLKmode);
-
- /* We need a new destination operand each time bitpos is on
- a word boundary. */
- if (bitpos % BITS_PER_WORD == 0)
- dst = operand_subword (tgtblk, bitpos / BITS_PER_WORD, 1, BLKmode);
+ /* This code assumes srcreg is at least a full word. If it isn't,
+ copy it into a new pseudo which is a full word. */
+ if (GET_MODE (srcreg) != BLKmode
+ && GET_MODE_SIZE (GET_MODE (srcreg)) < UNITS_PER_WORD)
+ srcreg = convert_to_mode (word_mode, srcreg, TREE_UNSIGNED (type));
+
+ /* Structures whose size is not a multiple of a word are aligned
+ to the least significant byte (to the right). On a BYTES_BIG_ENDIAN
+ machine, this means we must skip the empty high order bytes when
+ calculating the bit offset. */
+ if (BYTES_BIG_ENDIAN && bytes % UNITS_PER_WORD)
+ big_endian_correction
+ = (BITS_PER_WORD - ((bytes % UNITS_PER_WORD) * BITS_PER_UNIT));
+
+ /* Copy the structure BITSIZE bites at a time.
+
+ We could probably emit more efficient code for machines which do not use
+ strict alignment, but it doesn't seem worth the effort at the current
+ time. */
+ for (bitpos = 0, xbitpos = big_endian_correction;
+ bitpos < bytes * BITS_PER_UNIT;
+ bitpos += bitsize, xbitpos += bitsize)
+ {
+ /* We need a new source operand each time xbitpos is on a
+ word boundary and when xbitpos == big_endian_correction
+ (the first time through). */
+ if (xbitpos % BITS_PER_WORD == 0
+ || xbitpos == big_endian_correction)
+ src = operand_subword_force (srcreg, xbitpos / BITS_PER_WORD, BLKmode);
+
+ /* We need a new destination operand each time bitpos is on
+ a word boundary. */
+ if (bitpos % BITS_PER_WORD == 0)
+ dst = operand_subword (tgtblk, bitpos / BITS_PER_WORD, 1, BLKmode);
- /* Use xbitpos for the source extraction (right justified) and
- xbitpos for the destination store (left justified). */
- store_bit_field (dst, bitsize, bitpos % BITS_PER_WORD, word_mode,
- extract_bit_field (src, bitsize,
- xbitpos % BITS_PER_WORD, 1,
- NULL_RTX, word_mode,
- word_mode,
- bitsize / BITS_PER_UNIT,
- BITS_PER_WORD),
- bitsize / BITS_PER_UNIT, BITS_PER_WORD);
- }
- return tgtblk;
+ /* Use xbitpos for the source extraction (right justified) and
+ xbitpos for the destination store (left justified). */
+ store_bit_field (dst, bitsize, bitpos % BITS_PER_WORD, word_mode,
+ extract_bit_field (src, bitsize,
+ xbitpos % BITS_PER_WORD, 1,
+ NULL_RTX, word_mode, word_mode,
+ bitsize, BITS_PER_WORD),
+ bitsize, BITS_PER_WORD);
+ }
+
+ return tgtblk;
}
}
}
\f
-/* Generate several move instructions to clear LEN bytes of block TO.
- (A MEM rtx with BLKmode). The caller must pass TO through
- protect_from_queue before calling. ALIGN (in bytes) is maximum alignment
- we can assume. */
+/* Generate several move instructions to clear LEN bytes of block TO. (A MEM
+ rtx with BLKmode). The caller must pass TO through protect_from_queue
+ before calling. ALIGN is maximum alignment we can assume. */
static void
clear_by_pieces (to, len, align)
rtx to;
- int len, align;
+ int len;
+ unsigned int align;
{
struct clear_by_pieces data;
rtx to_addr = XEXP (to, 0);
- int max_size = MOVE_MAX_PIECES + 1;
+ unsigned int max_size = MOVE_MAX_PIECES + 1;
enum machine_mode mode = VOIDmode, tmode;
enum insn_code icode;
data.to_addr = copy_addr_to_reg (to_addr);
}
- if (! SLOW_UNALIGNED_ACCESS
- || align > MOVE_MAX || align >= BIGGEST_ALIGNMENT / BITS_PER_UNIT)
- align = MOVE_MAX;
+ if (! SLOW_UNALIGNED_ACCESS (word_mode, align)
+ || align > MOVE_MAX * BITS_PER_UNIT || align >= BIGGEST_ALIGNMENT)
+ align = MOVE_MAX * BITS_PER_UNIT;
/* First move what we can in the largest integer mode, then go to
successively smaller modes. */
break;
icode = mov_optab->handlers[(int) mode].insn_code;
- if (icode != CODE_FOR_nothing
- && align >= MIN (BIGGEST_ALIGNMENT / BITS_PER_UNIT,
- GET_MODE_SIZE (mode)))
+ if (icode != CODE_FOR_nothing && align >= GET_MODE_ALIGNMENT (mode))
clear_by_pieces_1 (GEN_FCN (icode), mode, &data);
max_size = GET_MODE_SIZE (mode);
static void
clear_by_pieces_1 (genfun, mode, data)
- rtx (*genfun) PROTO ((rtx, ...));
+ rtx (*genfun) PARAMS ((rtx, ...));
enum machine_mode mode;
struct clear_by_pieces *data;
{
}
}
\f
-/* Write zeros through the storage of OBJECT.
- If OBJECT has BLKmode, SIZE is its length in bytes and ALIGN is
- the maximum alignment we can is has, measured in bytes.
+/* Write zeros through the storage of OBJECT. If OBJECT has BLKmode, SIZE is
+ its length in bytes and ALIGN is the maximum alignment we can is has.
If we call a function that returns the length of the block, return it. */
clear_storage (object, size, align)
rtx object;
rtx size;
- int align;
+ unsigned int align;
{
#ifdef TARGET_MEM_FUNCTIONS
static tree fn;
if (GET_CODE (size) == CONST_INT
&& MOVE_BY_PIECES_P (INTVAL (size), align))
clear_by_pieces (object, INTVAL (size), align);
-
else
{
/* Try the most limited insn first, because there's no point
including more than one in the machine description unless
the more limited one has some advantage. */
- rtx opalign = GEN_INT (align);
+ rtx opalign = GEN_INT (align / BITS_PER_UNIT);
enum machine_mode mode;
for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
enum machine_mode mode = GET_MODE (x);
enum machine_mode submode;
enum mode_class class = GET_MODE_CLASS (mode);
- int i;
+ unsigned int i;
if (mode >= MAX_MACHINE_MODE)
abort ();
}
else
{
+ rtx realpart_x, realpart_y;
+ rtx imagpart_x, imagpart_y;
+
/* If this is a complex value with each part being smaller than a
word, the usual calling sequence will likely pack the pieces into
a single register. Unfortunately, SUBREG of hard registers only
rtx cmem = change_address (mem, mode, NULL_RTX);
- current_function->cannot_inline
- = "function uses short complex types";
+ cfun->cannot_inline = N_("function using short complex types cannot be inline");
if (packed_dest_p)
{
}
}
- /* Show the output dies here. This is necessary for pseudos;
+ realpart_x = gen_realpart (submode, x);
+ realpart_y = gen_realpart (submode, y);
+ imagpart_x = gen_imagpart (submode, x);
+ imagpart_y = gen_imagpart (submode, y);
+
+ /* Show the output dies here. This is necessary for SUBREGs
+ of pseudos since we cannot track their lifetimes correctly;
hard regs shouldn't appear here except as return values.
We never want to emit such a clobber after reload. */
if (x != y
- && ! (reload_in_progress || reload_completed))
+ && ! (reload_in_progress || reload_completed)
+ && (GET_CODE (realpart_x) == SUBREG
+ || GET_CODE (imagpart_x) == SUBREG))
{
emit_insn (gen_rtx_CLOBBER (VOIDmode, x));
}
emit_insn (GEN_FCN (mov_optab->handlers[(int) submode].insn_code)
- (gen_realpart (submode, x), gen_realpart (submode, y)));
+ (realpart_x, realpart_y));
emit_insn (GEN_FCN (mov_optab->handlers[(int) submode].insn_code)
- (gen_imagpart (submode, x), gen_imagpart (submode, y)));
+ (imagpart_x, imagpart_y));
}
return get_last_insn ();
else if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
{
rtx last_insn = 0;
+ rtx seq;
+ int need_clobber;
#ifdef PUSH_ROUNDING
}
#endif
- /* Show the output dies here. This is necessary for pseudos;
- hard regs shouldn't appear here except as return values.
- We never want to emit such a clobber after reload. */
- if (x != y
- && ! (reload_in_progress || reload_completed))
- {
- emit_insn (gen_rtx_CLOBBER (VOIDmode, x));
- }
+ start_sequence ();
+ need_clobber = 0;
for (i = 0;
i < (GET_MODE_SIZE (mode) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD;
i++)
if (xpart == 0 || ypart == 0)
abort ();
+ need_clobber |= (GET_CODE (xpart) == SUBREG);
+
last_insn = emit_move_insn (xpart, ypart);
}
+ seq = gen_sequence ();
+ end_sequence ();
+
+ /* Show the output dies here. This is necessary for SUBREGs
+ of pseudos since we cannot track their lifetimes correctly;
+ hard regs shouldn't appear here except as return values.
+ We never want to emit such a clobber after reload. */
+ if (x != y
+ && ! (reload_in_progress || reload_completed)
+ && need_clobber != 0)
+ {
+ emit_insn (gen_rtx_CLOBBER (VOIDmode, x));
+ }
+
+ emit_insn (seq);
+
return last_insn;
}
else
anti_adjust_stack (temp);
}
-#if defined (STACK_GROWS_DOWNWARD) \
- || (defined (ARGS_GROW_DOWNWARD) \
- && !defined (ACCUMULATE_OUTGOING_ARGS))
-
- /* Return the lowest stack address when STACK or ARGS grow downward and
- we are not aaccumulating outgoing arguments (the c4x port uses such
- conventions). */
- temp = virtual_outgoing_args_rtx;
- if (extra != 0 && below)
- temp = plus_constant (temp, extra);
+#ifndef STACK_GROWS_DOWNWARD
+#ifdef ARGS_GROW_DOWNWARD
+ if (!ACCUMULATE_OUTGOING_ARGS)
#else
- if (GET_CODE (size) == CONST_INT)
- temp = plus_constant (virtual_outgoing_args_rtx,
- - INTVAL (size) - (below ? 0 : extra));
- else if (extra != 0 && !below)
- temp = gen_rtx_PLUS (Pmode, virtual_outgoing_args_rtx,
- negate_rtx (Pmode, plus_constant (size, extra)));
- else
- temp = gen_rtx_PLUS (Pmode, virtual_outgoing_args_rtx,
- negate_rtx (Pmode, size));
+ if (0)
+#endif
+#else
+ if (1)
#endif
+ {
+
+ /* Return the lowest stack address when STACK or ARGS grow downward and
+ we are not aaccumulating outgoing arguments (the c4x port uses such
+ conventions). */
+ temp = virtual_outgoing_args_rtx;
+ if (extra != 0 && below)
+ temp = plus_constant (temp, extra);
+ }
+ else
+ {
+ if (GET_CODE (size) == CONST_INT)
+ temp = plus_constant (virtual_outgoing_args_rtx,
+ - INTVAL (size) - (below ? 0 : extra));
+ else if (extra != 0 && !below)
+ temp = gen_rtx_PLUS (Pmode, virtual_outgoing_args_rtx,
+ negate_rtx (Pmode, plus_constant (size, extra)));
+ else
+ temp = gen_rtx_PLUS (Pmode, virtual_outgoing_args_rtx,
+ negate_rtx (Pmode, size));
+ }
return memory_address (GET_CLASS_NARROWEST_MODE (MODE_INT), temp);
}
SIZE is an rtx for the size of data to be copied (in bytes),
needed only if X is BLKmode.
- ALIGN (in bytes) is maximum alignment we can assume.
+ ALIGN is maximum alignment we can assume.
If PARTIAL and REG are both nonzero, then copy that many of the first
words of X into registers starting with REG, and push the rest of X.
enum machine_mode mode;
tree type;
rtx size;
- int align;
+ unsigned int align;
int partial;
rtx reg;
int extra;
and if there is no difficulty with push insns that skip bytes
on the stack for alignment purposes. */
if (args_addr == 0
+ && PUSH_ARGS
&& GET_CODE (size) == CONST_INT
&& skip == 0
&& (MOVE_BY_PIECES_P ((unsigned) INTVAL (size) - used, align))
/* Here we avoid the case of a structure whose weak alignment
forces many pushes of a small amount of data,
and such small pushes do rounding that causes trouble. */
- && ((! SLOW_UNALIGNED_ACCESS)
- || align >= BIGGEST_ALIGNMENT / BITS_PER_UNIT
+ && ((! SLOW_UNALIGNED_ACCESS (word_mode, align))
+ || align >= BIGGEST_ALIGNMENT
|| PUSH_ROUNDING (align) == align)
&& PUSH_ROUNDING (INTVAL (size)) == INTVAL (size))
{
&& where_pad != none && where_pad != stack_direction)
anti_adjust_stack (GEN_INT (extra));
+ stack_pointer_delta += INTVAL (size) - used;
move_by_pieces (gen_rtx_MEM (BLKmode, gen_push_operand ()), xinner,
INTVAL (size) - used, align);
/* TEMP is the address of the block. Copy the data there. */
if (GET_CODE (size) == CONST_INT
- && (MOVE_BY_PIECES_P ((unsigned) INTVAL (size), align)))
+ && MOVE_BY_PIECES_P ((unsigned) INTVAL (size), align))
{
move_by_pieces (gen_rtx_MEM (BLKmode, temp), xinner,
INTVAL (size), align);
}
else
{
- rtx opalign = GEN_INT (align);
+ rtx opalign = GEN_INT (align / BITS_PER_UNIT);
enum machine_mode mode;
rtx target = gen_rtx_MEM (BLKmode, temp);
}
}
-#ifndef ACCUMULATE_OUTGOING_ARGS
- /* If the source is referenced relative to the stack pointer,
- copy it to another register to stabilize it. We do not need
- to do this if we know that we won't be changing sp. */
+ if (!ACCUMULATE_OUTGOING_ARGS)
+ {
+ /* If the source is referenced relative to the stack pointer,
+ copy it to another register to stabilize it. We do not need
+ to do this if we know that we won't be changing sp. */
- if (reg_mentioned_p (virtual_stack_dynamic_rtx, temp)
- || reg_mentioned_p (virtual_outgoing_args_rtx, temp))
- temp = copy_to_reg (temp);
-#endif
+ if (reg_mentioned_p (virtual_stack_dynamic_rtx, temp)
+ || reg_mentioned_p (virtual_outgoing_args_rtx, temp))
+ temp = copy_to_reg (temp);
+ }
/* Make inhibit_defer_pop nonzero around the library call
to force it to pop the bcopy-arguments right away. */
anti_adjust_stack (GEN_INT (extra));
#ifdef PUSH_ROUNDING
- if (args_addr == 0)
- addr = gen_push_operand ();
+ if (args_addr == 0 && PUSH_ARGS)
+ {
+ addr = gen_push_operand ();
+ stack_pointer_delta += PUSH_ROUNDING (GET_MODE_SIZE (mode));
+ }
else
#endif
{
|| TREE_CODE (to) == ARRAY_REF)
{
enum machine_mode mode1;
- int bitsize;
- int bitpos;
+ HOST_WIDE_INT bitsize, bitpos;
tree offset;
int unsignedp;
int volatilep = 0;
tree tem;
- int alignment;
+ unsigned int alignment;
push_temp_slots ();
tem = get_inner_reference (to, &bitsize, &bitpos, &offset, &mode1,
&& bitsize
&& (bitpos % bitsize) == 0
&& (bitsize % GET_MODE_ALIGNMENT (mode1)) == 0
- && (alignment * BITS_PER_UNIT) == GET_MODE_ALIGNMENT (mode1))
+ && alignment == GET_MODE_ALIGNMENT (mode1))
{
rtx temp = change_address (to_rtx, mode1,
plus_constant (XEXP (to_rtx, 0),
TYPE_MODE (integer_type_node));
}
- result = store_field (to_rtx, bitsize, bitpos, mode1, from,
- (want_value
- /* Spurious cast makes HPUX compiler happy. */
- ? (enum machine_mode) TYPE_MODE (TREE_TYPE (to))
- : VOIDmode),
- unsignedp,
- /* Required alignment of containing datum. */
- alignment,
- int_size_in_bytes (TREE_TYPE (tem)),
- get_alias_set (to));
- preserve_temp_slots (result);
- free_temp_slots ();
- pop_temp_slots ();
+ /* If this is a varying-length object, we must get the address of
+ the source and do an explicit block move. */
+ if (bitsize < 0)
+ {
+ unsigned int from_align;
+ rtx from_rtx = expand_expr_unaligned (from, &from_align);
+ rtx inner_to_rtx
+ = change_address (to_rtx, VOIDmode,
+ plus_constant (XEXP (to_rtx, 0),
+ bitpos / BITS_PER_UNIT));
+
+ emit_block_move (inner_to_rtx, from_rtx, expr_size (from),
+ MIN (alignment, from_align));
+ free_temp_slots ();
+ pop_temp_slots ();
+ return to_rtx;
+ }
+ else
+ {
+ result = store_field (to_rtx, bitsize, bitpos, mode1, from,
+ (want_value
+ /* Spurious cast for HPUX compiler. */
+ ? ((enum machine_mode)
+ TYPE_MODE (TREE_TYPE (to)))
+ : VOIDmode),
+ unsignedp,
+ alignment,
+ int_size_in_bytes (TREE_TYPE (tem)),
+ get_alias_set (to));
- /* If the value is meaningful, convert RESULT to the proper mode.
- Otherwise, return nothing. */
- return (want_value ? convert_modes (TYPE_MODE (TREE_TYPE (to)),
- TYPE_MODE (TREE_TYPE (from)),
- result,
- TREE_UNSIGNED (TREE_TYPE (to)))
- : NULL_RTX);
+ preserve_temp_slots (result);
+ free_temp_slots ();
+ pop_temp_slots ();
+
+ /* If the value is meaningful, convert RESULT to the proper mode.
+ Otherwise, return nothing. */
+ return (want_value ? convert_modes (TYPE_MODE (TREE_TYPE (to)),
+ TYPE_MODE (TREE_TYPE (from)),
+ result,
+ TREE_UNSIGNED (TREE_TYPE (to)))
+ : NULL_RTX);
+ }
}
/* If the rhs is a function call and its value is not an aggregate,
val = setjmp (buf) on machines where reference to val
requires loading up part of an address in a separate insn.
- Don't do this if TO is a VAR_DECL whose DECL_RTL is REG since it might be
- a promoted variable where the zero- or sign- extension needs to be done.
- Handling this in the normal way is safe because no computation is done
- before the call. */
+ Don't do this if TO is a VAR_DECL or PARM_DECL whose DECL_RTL is REG
+ since it might be a promoted variable where the zero- or sign- extension
+ needs to be done. Handling this in the normal way is safe because no
+ computation is done before the call. */
if (TREE_CODE (from) == CALL_EXPR && ! aggregate_value_p (from)
&& TREE_CODE (TYPE_SIZE (TREE_TYPE (from))) == INTEGER_CST
- && ! (TREE_CODE (to) == VAR_DECL && GET_CODE (DECL_RTL (to)) == REG))
+ && ! ((TREE_CODE (to) == VAR_DECL || TREE_CODE (to) == PARM_DECL)
+ && GET_CODE (DECL_RTL (to)) == REG))
{
rtx value;
The Irix 6 ABI has examples of this. */
if (GET_CODE (to_rtx) == PARALLEL)
emit_group_load (to_rtx, value, int_size_in_bytes (TREE_TYPE (from)),
- TYPE_ALIGN (TREE_TYPE (from)) / BITS_PER_UNIT);
+ TYPE_ALIGN (TREE_TYPE (from)));
else if (GET_MODE (to_rtx) == BLKmode)
emit_block_move (to_rtx, value, expr_size (from),
- TYPE_ALIGN (TREE_TYPE (from)) / BITS_PER_UNIT);
+ TYPE_ALIGN (TREE_TYPE (from)));
else
{
#ifdef POINTERS_EXTEND_UNSIGNED
if (GET_CODE (to_rtx) == PARALLEL)
emit_group_load (to_rtx, temp, int_size_in_bytes (TREE_TYPE (from)),
- TYPE_ALIGN (TREE_TYPE (from)) / BITS_PER_UNIT);
+ TYPE_ALIGN (TREE_TYPE (from)));
else
emit_move_insn (to_rtx, temp);
size = expr_size (exp);
if (GET_CODE (size) == CONST_INT
&& INTVAL (size) < TREE_STRING_LENGTH (exp))
- emit_block_move (target, temp, size,
- TYPE_ALIGN (TREE_TYPE (exp)) / BITS_PER_UNIT);
+ emit_block_move (target, temp, size, TYPE_ALIGN (TREE_TYPE (exp)));
else
{
/* Compute the size of the data to copy from the string. */
tree copy_size
= size_binop (MIN_EXPR,
make_tree (sizetype, size),
- convert (sizetype,
- build_int_2 (TREE_STRING_LENGTH (exp), 0)));
+ size_int (TREE_STRING_LENGTH (exp)));
rtx copy_size_rtx = expand_expr (copy_size, NULL_RTX,
VOIDmode, 0);
rtx label = 0;
/* Copy that much. */
emit_block_move (target, temp, copy_size_rtx,
- TYPE_ALIGN (TREE_TYPE (exp)) / BITS_PER_UNIT);
+ TYPE_ALIGN (TREE_TYPE (exp)));
/* Figure out how much is left in TARGET that we have to clear.
Do all calculations in ptr_mode. */
The Irix 6 ABI has examples of this. */
else if (GET_CODE (target) == PARALLEL)
emit_group_load (target, temp, int_size_in_bytes (TREE_TYPE (exp)),
- TYPE_ALIGN (TREE_TYPE (exp)) / BITS_PER_UNIT);
+ TYPE_ALIGN (TREE_TYPE (exp)));
else if (GET_MODE (temp) == BLKmode)
emit_block_move (target, temp, expr_size (exp),
- TYPE_ALIGN (TREE_TYPE (exp)) / BITS_PER_UNIT);
+ TYPE_ALIGN (TREE_TYPE (exp)));
else
emit_move_insn (target, temp);
}
return is_zeros_p (TREE_OPERAND (exp, 0));
case INTEGER_CST:
- return TREE_INT_CST_LOW (exp) == 0 && TREE_INT_CST_HIGH (exp) == 0;
+ return integer_zerop (exp);
case COMPLEX_CST:
return
store_constructor_field (target, bitsize, bitpos,
mode, exp, type, align, cleared)
rtx target;
- int bitsize, bitpos;
+ unsigned HOST_WIDE_INT bitsize;
+ HOST_WIDE_INT bitpos;
enum machine_mode mode;
tree exp, type;
- int align;
+ unsigned int align;
int cleared;
{
if (TREE_CODE (exp) == CONSTRUCTOR
? BLKmode : VOIDmode,
plus_constant (XEXP (target, 0),
bitpos / BITS_PER_UNIT));
- store_constructor (exp, target, align, cleared);
+ store_constructor (exp, target, align, cleared, bitsize / BITS_PER_UNIT);
}
else
- store_field (target, bitsize, bitpos, mode, exp, VOIDmode, 0,
- (align + BITS_PER_UNIT - 1) / BITS_PER_UNIT,
- int_size_in_bytes (type), cleared);
+ store_field (target, bitsize, bitpos, mode, exp, VOIDmode, 0, align,
+ int_size_in_bytes (type), 0);
}
/* Store the value of constructor EXP into the rtx TARGET.
TARGET is either a REG or a MEM.
- ALIGN is the maximum known alignment for TARGET, in bits.
- CLEARED is true if TARGET is known to have been zero'd. */
+ ALIGN is the maximum known alignment for TARGET.
+ CLEARED is true if TARGET is known to have been zero'd.
+ SIZE is the number of bytes of TARGET we are allowed to modify: this
+ may not be the same as the size of EXP if we are assigning to a field
+ which has been packed to exclude padding bits. */
static void
-store_constructor (exp, target, align, cleared)
+store_constructor (exp, target, align, cleared, size)
tree exp;
rtx target;
- int align;
+ unsigned int align;
int cleared;
+ HOST_WIDE_INT size;
{
tree type = TREE_TYPE (exp);
#ifdef WORD_REGISTER_OPERATIONS
- rtx exp_size = expr_size (exp);
+ HOST_WIDE_INT exp_size = int_size_in_bytes (type);
#endif
/* We know our target cannot conflict, since safe_from_p has been called. */
if (GET_CODE (target) == REG && REGNO (target) < FIRST_PSEUDO_REGISTER)
{
rtx temp = gen_reg_rtx (GET_MODE (target));
- store_constructor (exp, temp, 0);
+ store_constructor (exp, temp, align, cleared, size);
emit_move_insn (target, temp);
return;
}
/* If the constructor is empty, clear the union. */
if (! CONSTRUCTOR_ELTS (exp) && ! cleared)
- clear_storage (target, expr_size (exp),
- TYPE_ALIGN (type) / BITS_PER_UNIT);
+ clear_storage (target, expr_size (exp), TYPE_ALIGN (type));
}
/* If we are building a static constructor into a register,
/* If the constructor has fewer fields than the structure
or if we are initializing the structure to mostly zeros,
clear the whole structure first. */
- else if ((list_length (CONSTRUCTOR_ELTS (exp))
- != list_length (TYPE_FIELDS (type)))
- || mostly_zeros_p (exp))
+ else if (size > 0
+ && ((list_length (CONSTRUCTOR_ELTS (exp))
+ != fields_length (type))
+ || mostly_zeros_p (exp)))
{
if (! cleared)
- clear_storage (target, expr_size (exp),
- (align + BITS_PER_UNIT - 1) / BITS_PER_UNIT);
+ clear_storage (target, GEN_INT (size), align);
cleared = 1;
}
tree value = TREE_VALUE (elt);
#endif
register enum machine_mode mode;
- int bitsize;
- int bitpos = 0;
+ HOST_WIDE_INT bitsize;
+ HOST_WIDE_INT bitpos = 0;
int unsignedp;
- tree pos, constant = 0, offset = 0;
+ tree offset;
rtx to_rtx = target;
/* Just ignore missing fields.
if (cleared && is_zeros_p (TREE_VALUE (elt)))
continue;
- if (TREE_CODE (DECL_SIZE (field)) == INTEGER_CST)
- bitsize = TREE_INT_CST_LOW (DECL_SIZE (field));
+ if (host_integerp (DECL_SIZE (field), 1))
+ bitsize = tree_low_cst (DECL_SIZE (field), 1);
else
bitsize = -1;
if (DECL_BIT_FIELD (field))
mode = VOIDmode;
- pos = DECL_FIELD_BITPOS (field);
- if (TREE_CODE (pos) == INTEGER_CST)
- constant = pos;
- else if (TREE_CODE (pos) == PLUS_EXPR
- && TREE_CODE (TREE_OPERAND (pos, 1)) == INTEGER_CST)
- constant = TREE_OPERAND (pos, 1), offset = TREE_OPERAND (pos, 0);
+ offset = DECL_FIELD_OFFSET (field);
+ if (host_integerp (offset, 0)
+ && host_integerp (bit_position (field), 0))
+ {
+ bitpos = int_bit_position (field);
+ offset = 0;
+ }
else
- offset = pos;
-
- if (constant)
- bitpos = TREE_INT_CST_LOW (constant);
-
+ bitpos = tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 0);
+
if (offset)
{
rtx offset_rtx;
offset = build (WITH_RECORD_EXPR, sizetype,
offset, make_tree (TREE_TYPE (exp), target));
- offset = size_binop (EXACT_DIV_EXPR, offset,
- size_int (BITS_PER_UNIT));
-
offset_rtx = expand_expr (offset, NULL_RTX, VOIDmode, 0);
if (GET_CODE (to_rtx) != MEM)
abort ();
gen_rtx_PLUS (ptr_mode, XEXP (to_rtx, 0),
force_reg (ptr_mode,
offset_rtx)));
+ align = DECL_OFFSET_ALIGN (field);
}
if (TREE_READONLY (field))
start of a word, try to widen it to a full word.
This special case allows us to output C++ member function
initializations in a form that the optimizers can understand. */
- if (constant
- && GET_CODE (target) == REG
+ if (GET_CODE (target) == REG
&& bitsize < BITS_PER_WORD
&& bitpos % BITS_PER_WORD == 0
&& GET_MODE_CLASS (mode) == MODE_INT
&& TREE_CODE (value) == INTEGER_CST
- && GET_CODE (exp_size) == CONST_INT
- && bitpos + BITS_PER_WORD <= INTVAL (exp_size) * BITS_PER_UNIT)
+ && exp_size >= 0
+ && bitpos + BITS_PER_WORD <= exp_size * BITS_PER_UNIT)
{
tree type = TREE_TYPE (value);
if (TYPE_PRECISION (type) < BITS_PER_WORD)
}
#endif
store_constructor_field (to_rtx, bitsize, bitpos, mode,
- TREE_VALUE (elt), type,
- MIN (align,
- DECL_ALIGN (TREE_PURPOSE (elt))),
- cleared);
+ TREE_VALUE (elt), type, align, cleared);
}
}
else if (TREE_CODE (type) == ARRAY_TYPE)
{
tree index = TREE_PURPOSE (elt);
HOST_WIDE_INT this_node_count;
+
if (index != NULL_TREE && TREE_CODE (index) == RANGE_EXPR)
{
tree lo_index = TREE_OPERAND (index, 0);
tree hi_index = TREE_OPERAND (index, 1);
- if (TREE_CODE (lo_index) != INTEGER_CST
- || TREE_CODE (hi_index) != INTEGER_CST)
+
+ if (! host_integerp (lo_index, 1)
+ || ! host_integerp (hi_index, 1))
{
need_to_clear = 1;
break;
}
- this_node_count = TREE_INT_CST_LOW (hi_index)
- - TREE_INT_CST_LOW (lo_index) + 1;
+
+ this_node_count = (tree_low_cst (hi_index, 1)
+ - tree_low_cst (lo_index, 1) + 1);
}
else
this_node_count = 1;
|| 4 * zero_count >= 3 * count)
need_to_clear = 1;
}
- if (need_to_clear)
+ if (need_to_clear && size > 0)
{
if (! cleared)
- clear_storage (target, expr_size (exp),
- (align + BITS_PER_UNIT - 1) / BITS_PER_UNIT);
+ clear_storage (target, GEN_INT (size), align);
cleared = 1;
}
else
elt = TREE_CHAIN (elt), i++)
{
register enum machine_mode mode;
- int bitsize;
- int bitpos;
+ HOST_WIDE_INT bitsize;
+ HOST_WIDE_INT bitpos;
int unsignedp;
tree value = TREE_VALUE (elt);
- int align = TYPE_ALIGN (TREE_TYPE (value));
+ unsigned int align = TYPE_ALIGN (TREE_TYPE (value));
tree index = TREE_PURPOSE (elt);
rtx xtarget = target;
unsignedp = TREE_UNSIGNED (elttype);
mode = TYPE_MODE (elttype);
if (mode == BLKmode)
- {
- if (TREE_CODE (TYPE_SIZE (elttype)) == INTEGER_CST
- && TREE_INT_CST_HIGH (TYPE_SIZE (elttype)) == 0)
- bitsize = TREE_INT_CST_LOW (TYPE_SIZE (elttype));
- else
- bitsize = -1;
- }
+ bitsize = (host_integerp (TYPE_SIZE (elttype), 1)
+ ? tree_low_cst (TYPE_SIZE (elttype), 1)
+ : -1);
else
bitsize = GET_MODE_BITSIZE (mode);
tree position;
/* If the range is constant and "small", unroll the loop. */
- if (TREE_CODE (lo_index) == INTEGER_CST
- && TREE_CODE (hi_index) == INTEGER_CST
- && (lo = TREE_INT_CST_LOW (lo_index),
- hi = TREE_INT_CST_LOW (hi_index),
+ if (host_integerp (lo_index, 0)
+ && host_integerp (hi_index, 0)
+ && (lo = tree_low_cst (lo_index, 0),
+ hi = tree_low_cst (hi_index, 0),
count = hi - lo + 1,
(GET_CODE (target) != MEM
|| count <= 2
- || (TREE_CODE (TYPE_SIZE (elttype)) == INTEGER_CST
- && TREE_INT_CST_LOW (TYPE_SIZE (elttype)) * count
- <= 40 * 8))))
+ || (host_integerp (TYPE_SIZE (elttype), 1)
+ && (tree_low_cst (TYPE_SIZE (elttype), 1) * count
+ <= 40 * 8)))))
{
lo -= minelt; hi -= minelt;
for (; lo <= hi; lo++)
{
- bitpos = lo * TREE_INT_CST_LOW (TYPE_SIZE (elttype));
+ bitpos = lo * tree_low_cst (TYPE_SIZE (elttype), 0);
store_constructor_field (target, bitsize, bitpos, mode,
value, type, align, cleared);
}
loop = expand_start_loop (0);
/* Assign value to element index. */
- position = size_binop (EXACT_DIV_EXPR, TYPE_SIZE (elttype),
- size_int (BITS_PER_UNIT));
- position = size_binop (MULT_EXPR,
- size_binop (MINUS_EXPR, index,
- TYPE_MIN_VALUE (domain)),
- position);
+ position
+ = convert (ssizetype,
+ fold (build (MINUS_EXPR, TREE_TYPE (index),
+ index, TYPE_MIN_VALUE (domain))));
+ position = size_binop (MULT_EXPR, position,
+ convert (ssizetype,
+ TYPE_SIZE_UNIT (elttype)));
+
pos_rtx = expand_expr (position, 0, VOIDmode, 0);
addr = gen_rtx_PLUS (Pmode, XEXP (target, 0), pos_rtx);
xtarget = change_address (target, mode, addr);
if (TREE_CODE (value) == CONSTRUCTOR)
- store_constructor (value, xtarget, align, cleared);
+ store_constructor (value, xtarget, align, cleared,
+ bitsize / BITS_PER_UNIT);
else
store_expr (value, xtarget, 0);
index, integer_one_node), 0, 0);
expand_end_loop ();
emit_label (loop_end);
-
- /* Needed by stupid register allocation. to extend the
- lifetime of pseudo-regs used by target past the end
- of the loop. */
- emit_insn (gen_rtx_USE (GET_MODE (target), target));
}
}
- else if ((index != 0 && TREE_CODE (index) != INTEGER_CST)
- || TREE_CODE (TYPE_SIZE (elttype)) != INTEGER_CST)
+ else if ((index != 0 && ! host_integerp (index, 0))
+ || ! host_integerp (TYPE_SIZE (elttype), 1))
{
rtx pos_rtx, addr;
tree position;
if (index == 0)
- index = size_int (i);
+ index = ssize_int (1);
if (minelt)
- index = size_binop (MINUS_EXPR, index,
- TYPE_MIN_VALUE (domain));
- position = size_binop (EXACT_DIV_EXPR, TYPE_SIZE (elttype),
- size_int (BITS_PER_UNIT));
- position = size_binop (MULT_EXPR, index, position);
+ index = convert (ssizetype,
+ fold (build (MINUS_EXPR, index,
+ TYPE_MIN_VALUE (domain))));
+
+ position = size_binop (MULT_EXPR, index,
+ convert (ssizetype,
+ TYPE_SIZE_UNIT (elttype)));
pos_rtx = expand_expr (position, 0, VOIDmode, 0);
addr = gen_rtx_PLUS (Pmode, XEXP (target, 0), pos_rtx);
xtarget = change_address (target, mode, addr);
else
{
if (index != 0)
- bitpos = ((TREE_INT_CST_LOW (index) - minelt)
- * TREE_INT_CST_LOW (TYPE_SIZE (elttype)));
+ bitpos = ((tree_low_cst (index, 0) - minelt)
+ * tree_low_cst (TYPE_SIZE (elttype), 1));
else
- bitpos = (i * TREE_INT_CST_LOW (TYPE_SIZE (elttype)));
+ bitpos = (i * tree_low_cst (TYPE_SIZE (elttype), 1));
+
store_constructor_field (target, bitsize, bitpos, mode, value,
type, align, cleared);
}
}
}
- /* set constructor assignments */
+
+ /* Set constructor assignments */
else if (TREE_CODE (type) == SET_TYPE)
{
tree elt = CONSTRUCTOR_ELTS (exp);
- int nbytes = int_size_in_bytes (type), nbits;
+ unsigned HOST_WIDE_INT nbytes = int_size_in_bytes (type), nbits;
tree domain = TYPE_DOMAIN (type);
tree domain_min, domain_max, bitlength;
bzero/memset), and set the bits we want. */
/* Check for all zeros. */
- if (elt == NULL_TREE)
+ if (elt == NULL_TREE && size > 0)
{
if (!cleared)
- clear_storage (target, expr_size (exp),
- TYPE_ALIGN (type) / BITS_PER_UNIT);
+ clear_storage (target, GEN_INT (size), TYPE_ALIGN (type));
return;
}
domain_min = convert (sizetype, TYPE_MIN_VALUE (domain));
domain_max = convert (sizetype, TYPE_MAX_VALUE (domain));
bitlength = size_binop (PLUS_EXPR,
- size_binop (MINUS_EXPR, domain_max, domain_min),
- size_one_node);
+ size_diffop (domain_max, domain_min),
+ ssize_int (1));
- if (nbytes < 0 || TREE_CODE (bitlength) != INTEGER_CST)
- abort ();
- nbits = TREE_INT_CST_LOW (bitlength);
+ nbits = tree_low_cst (bitlength, 1);
/* For "small" sets, or "medium-sized" (up to 32 bytes) sets that
are "complicated" (more than one range), initialize (the
if (GET_MODE (target) != BLKmode || nbits <= 2 * BITS_PER_WORD
|| (nbytes <= 32 && TREE_CHAIN (elt) != NULL_TREE))
{
- int set_word_size = TYPE_ALIGN (TREE_TYPE (exp));
+ unsigned int set_word_size = TYPE_ALIGN (TREE_TYPE (exp));
enum machine_mode mode = mode_for_size (set_word_size, MODE_INT, 1);
char *bit_buffer = (char *) alloca (nbits);
HOST_WIDE_INT word = 0;
- int bit_pos = 0;
- int ibit = 0;
- int offset = 0; /* In bytes from beginning of set. */
+ unsigned int bit_pos = 0;
+ unsigned int ibit = 0;
+ unsigned int offset = 0; /* In bytes from beginning of set. */
+
elt = get_set_constructor_bits (exp, bit_buffer, nbits);
for (;;)
{
else
word |= 1 << bit_pos;
}
+
bit_pos++; ibit++;
if (bit_pos >= set_word_size || ibit == nbits)
{
{
rtx datum = GEN_INT (word);
rtx to_rtx;
+
/* The assumption here is that it is safe to use
XEXP if the set is multi-word, but not if
it's single-word. */
abort ();
emit_move_insn (to_rtx, datum);
}
+
if (ibit == nbits)
break;
word = 0;
}
}
else if (!cleared)
- {
- /* Don't bother clearing storage if the set is all ones. */
- if (TREE_CHAIN (elt) != NULL_TREE
- || (TREE_PURPOSE (elt) == NULL_TREE
- ? nbits != 1
- : (TREE_CODE (TREE_VALUE (elt)) != INTEGER_CST
- || TREE_CODE (TREE_PURPOSE (elt)) != INTEGER_CST
- || (TREE_INT_CST_LOW (TREE_VALUE (elt))
- - TREE_INT_CST_LOW (TREE_PURPOSE (elt)) + 1
- != nbits))))
- clear_storage (target, expr_size (exp),
- TYPE_ALIGN (type) / BITS_PER_UNIT);
- }
+ /* Don't bother clearing storage if the set is all ones. */
+ if (TREE_CHAIN (elt) != NULL_TREE
+ || (TREE_PURPOSE (elt) == NULL_TREE
+ ? nbits != 1
+ : ( ! host_integerp (TREE_VALUE (elt), 0)
+ || ! host_integerp (TREE_PURPOSE (elt), 0)
+ || (tree_low_cst (TREE_VALUE (elt), 0)
+ - tree_low_cst (TREE_PURPOSE (elt), 0) + 1
+ != (HOST_WIDE_INT) nbits))))
+ clear_storage (target, expr_size (exp), TYPE_ALIGN (type));
for (; elt != NULL_TREE; elt = TREE_CHAIN (elt))
{
#ifdef TARGET_MEM_FUNCTIONS
HOST_WIDE_INT startb, endb;
#endif
- rtx bitlength_rtx, startbit_rtx, endbit_rtx, targetx;
+ rtx bitlength_rtx, startbit_rtx, endbit_rtx, targetx;
bitlength_rtx = expand_expr (bitlength,
- NULL_RTX, MEM, EXPAND_CONST_ADDRESS);
+ NULL_RTX, MEM, EXPAND_CONST_ADDRESS);
/* handle non-range tuple element like [ expr ] */
if (startbit == NULL_TREE)
startbit = save_expr (endbit);
endbit = startbit;
}
+
startbit = convert (sizetype, startbit);
endbit = convert (sizetype, endbit);
if (! integer_zerop (domain_min))
0);
emit_move_insn (targetx, target);
}
+
else if (GET_CODE (target) == MEM)
targetx = target;
else
}
else
#endif
- {
- emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__setbits"),
- 0, VOIDmode, 4, XEXP (targetx, 0), Pmode,
- bitlength_rtx, TYPE_MODE (sizetype),
- startbit_rtx, TYPE_MODE (sizetype),
- endbit_rtx, TYPE_MODE (sizetype));
- }
+ emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__setbits"),
+ 0, VOIDmode, 4, XEXP (targetx, 0), Pmode,
+ bitlength_rtx, TYPE_MODE (sizetype),
+ startbit_rtx, TYPE_MODE (sizetype),
+ endbit_rtx, TYPE_MODE (sizetype));
+
if (REG_P (target))
emit_move_insn (target, targetx);
}
has mode VALUE_MODE if that is convenient to do.
In this case, UNSIGNEDP must be nonzero if the value is an unsigned type.
- ALIGN is the alignment that TARGET is known to have, measured in bytes.
+ ALIGN is the alignment that TARGET is known to have.
TOTAL_SIZE is the size in bytes of the structure, or -1 if varying.
ALIAS_SET is the alias set for the destination. This value will
store_field (target, bitsize, bitpos, mode, exp, value_mode,
unsignedp, align, total_size, alias_set)
rtx target;
- int bitsize, bitpos;
+ HOST_WIDE_INT bitsize;
+ HOST_WIDE_INT bitpos;
enum machine_mode mode;
tree exp;
enum machine_mode value_mode;
int unsignedp;
- int align;
- int total_size;
+ unsigned int align;
+ HOST_WIDE_INT total_size;
int alias_set;
{
HOST_WIDE_INT width_mask = 0;
return blk_object;
}
+ if (GET_CODE (target) == CONCAT)
+ {
+ /* We're storing into a struct containing a single __complex. */
+
+ if (bitpos != 0)
+ abort ();
+ return store_expr (exp, target, 0);
+ }
+
/* If the structure is in a register or if the component
is a bit field, we cannot use addressing to access it.
Use bit-field techniques or SUBREG to store in it. */
|| GET_CODE (target) == SUBREG
/* If the field isn't aligned enough to store as an ordinary memref,
store it as a bit field. */
- || (mode != BLKmode && SLOW_UNALIGNED_ACCESS
- && (align * BITS_PER_UNIT < GET_MODE_ALIGNMENT (mode)
+ || (mode != BLKmode && SLOW_UNALIGNED_ACCESS (mode, align)
+ && (align < GET_MODE_ALIGNMENT (mode)
|| bitpos % GET_MODE_ALIGNMENT (mode)))
- || (mode == BLKmode && SLOW_UNALIGNED_ACCESS
- && (TYPE_ALIGN (TREE_TYPE (exp)) > align * BITS_PER_UNIT
+ || (mode == BLKmode && SLOW_UNALIGNED_ACCESS (mode, align)
+ && (TYPE_ALIGN (TREE_TYPE (exp)) > align
|| bitpos % TYPE_ALIGN (TREE_TYPE (exp)) != 0))
/* If the RHS and field are a constant size and the size of the
RHS isn't the same size as the bitfield, we must use bitfield
operations. */
- || ((bitsize >= 0
- && TREE_CODE (TYPE_SIZE (TREE_TYPE (exp))) == INTEGER_CST)
- && (TREE_INT_CST_HIGH (TYPE_SIZE (TREE_TYPE (exp))) != 0
- || TREE_INT_CST_LOW (TYPE_SIZE (TREE_TYPE (exp))) != bitsize)))
+ || (bitsize >= 0
+ && TREE_CODE (TYPE_SIZE (TREE_TYPE (exp))) == INTEGER_CST
+ && compare_tree_int (TYPE_SIZE (TREE_TYPE (exp)), bitsize) != 0))
{
rtx temp = expand_expr (exp, NULL_RTX, VOIDmode, 0);
boundary. If so, we simply do a block copy. */
if (GET_MODE (target) == BLKmode && GET_MODE (temp) == BLKmode)
{
+ unsigned int exp_align = expr_align (exp);
+
if (GET_CODE (target) != MEM || GET_CODE (temp) != MEM
|| bitpos % BITS_PER_UNIT != 0)
abort ();
plus_constant (XEXP (target, 0),
bitpos / BITS_PER_UNIT));
+ /* Make sure that ALIGN is no stricter than the alignment of EXP. */
+ align = MIN (exp_align, align);
+
/* Find an alignment that is consistent with the bit position. */
- while ((bitpos % (align * BITS_PER_UNIT)) != 0)
+ while ((bitpos % align) != 0)
align >>= 1;
emit_block_move (target, temp,
giving the variable offset (in units) in *POFFSET.
This offset is in addition to the bit position.
If the position is not variable, we store 0 in *POFFSET.
- We set *PALIGNMENT to the alignment in bytes of the address that will be
+ We set *PALIGNMENT to the alignment of the address that will be
computed. This is the alignment of the thing we return if *POFFSET
is zero, but can be more less strictly aligned if *POFFSET is nonzero.
get_inner_reference (exp, pbitsize, pbitpos, poffset, pmode,
punsignedp, pvolatilep, palignment)
tree exp;
- int *pbitsize;
- int *pbitpos;
+ HOST_WIDE_INT *pbitsize;
+ HOST_WIDE_INT *pbitpos;
tree *poffset;
enum machine_mode *pmode;
int *punsignedp;
int *pvolatilep;
- int *palignment;
+ unsigned int *palignment;
{
- tree orig_exp = exp;
tree size_tree = 0;
enum machine_mode mode = VOIDmode;
- tree offset = integer_zero_node;
+ tree offset = size_zero_node;
+ tree bit_offset = bitsize_zero_node;
unsigned int alignment = BIGGEST_ALIGNMENT;
+ tree tem;
+ /* First get the mode, signedness, and size. We do this from just the
+ outermost expression. */
if (TREE_CODE (exp) == COMPONENT_REF)
{
size_tree = DECL_SIZE (TREE_OPERAND (exp, 1));
if (! DECL_BIT_FIELD (TREE_OPERAND (exp, 1)))
mode = DECL_MODE (TREE_OPERAND (exp, 1));
+
*punsignedp = TREE_UNSIGNED (TREE_OPERAND (exp, 1));
}
else if (TREE_CODE (exp) == BIT_FIELD_REF)
else
{
mode = TYPE_MODE (TREE_TYPE (exp));
+ *punsignedp = TREE_UNSIGNED (TREE_TYPE (exp));
+
if (mode == BLKmode)
size_tree = TYPE_SIZE (TREE_TYPE (exp));
-
- *pbitsize = GET_MODE_BITSIZE (mode);
- *punsignedp = TREE_UNSIGNED (TREE_TYPE (exp));
+ else
+ *pbitsize = GET_MODE_BITSIZE (mode);
}
- if (size_tree)
+ if (size_tree != 0)
{
- if (TREE_CODE (size_tree) != INTEGER_CST)
+ if (! host_integerp (size_tree, 1))
mode = BLKmode, *pbitsize = -1;
else
- *pbitsize = TREE_INT_CST_LOW (size_tree);
+ *pbitsize = tree_low_cst (size_tree, 1);
}
/* Compute cumulative bit-offset for nested component-refs and array-refs,
and find the ultimate containing object. */
-
- *pbitpos = 0;
-
while (1)
{
- if (TREE_CODE (exp) == COMPONENT_REF || TREE_CODE (exp) == BIT_FIELD_REF)
+ if (TREE_CODE (exp) == BIT_FIELD_REF)
+ bit_offset = size_binop (PLUS_EXPR, bit_offset, TREE_OPERAND (exp, 2));
+ else if (TREE_CODE (exp) == COMPONENT_REF)
{
- tree pos = (TREE_CODE (exp) == COMPONENT_REF
- ? DECL_FIELD_BITPOS (TREE_OPERAND (exp, 1))
- : TREE_OPERAND (exp, 2));
- tree constant = integer_zero_node, var = pos;
+ tree field = TREE_OPERAND (exp, 1);
+ tree this_offset = DECL_FIELD_OFFSET (field);
/* If this field hasn't been filled in yet, don't go
past it. This should only happen when folding expressions
made during type construction. */
- if (pos == 0)
+ if (this_offset == 0)
break;
+ else if (! TREE_CONSTANT (this_offset)
+ && contains_placeholder_p (this_offset))
+ this_offset = build (WITH_RECORD_EXPR, sizetype, this_offset, exp);
- /* Assume here that the offset is a multiple of a unit.
- If not, there should be an explicitly added constant. */
- if (TREE_CODE (pos) == PLUS_EXPR
- && TREE_CODE (TREE_OPERAND (pos, 1)) == INTEGER_CST)
- constant = TREE_OPERAND (pos, 1), var = TREE_OPERAND (pos, 0);
- else if (TREE_CODE (pos) == INTEGER_CST)
- constant = pos, var = integer_zero_node;
+ offset = size_binop (PLUS_EXPR, offset, this_offset);
+ bit_offset = size_binop (PLUS_EXPR, bit_offset,
+ DECL_FIELD_BIT_OFFSET (field));
- *pbitpos += TREE_INT_CST_LOW (constant);
- offset = size_binop (PLUS_EXPR, offset,
- size_binop (EXACT_DIV_EXPR, var,
- size_int (BITS_PER_UNIT)));
+ if (! host_integerp (offset, 0))
+ alignment = MIN (alignment, DECL_OFFSET_ALIGN (field));
}
else if (TREE_CODE (exp) == ARRAY_REF)
{
- /* This code is based on the code in case ARRAY_REF in expand_expr
- below. We assume here that the size of an array element is
- always an integral multiple of BITS_PER_UNIT. */
-
tree index = TREE_OPERAND (exp, 1);
tree domain = TYPE_DOMAIN (TREE_TYPE (TREE_OPERAND (exp, 0)));
- tree low_bound
- = domain ? TYPE_MIN_VALUE (domain) : integer_zero_node;
- tree index_type = TREE_TYPE (index);
- tree xindex;
-
- if (TYPE_PRECISION (index_type) != TYPE_PRECISION (sizetype))
- {
- index = convert (type_for_size (TYPE_PRECISION (sizetype), 0),
- index);
- index_type = TREE_TYPE (index);
- }
+ tree low_bound = (domain ? TYPE_MIN_VALUE (domain) : 0);
+ tree unit_size = TYPE_SIZE_UNIT (TREE_TYPE (exp));
+
+ /* We assume all arrays have sizes that are a multiple of a byte.
+ First subtract the lower bound, if any, in the type of the
+ index, then convert to sizetype and multiply by the size of the
+ array element. */
+ if (low_bound != 0 && ! integer_zerop (low_bound))
+ index = fold (build (MINUS_EXPR, TREE_TYPE (index),
+ index, low_bound));
+
+ /* If the index has a self-referential type, pass it to a
+ WITH_RECORD_EXPR; if the component size is, pass our
+ component to one. */
+ if (! TREE_CONSTANT (index)
+ && contains_placeholder_p (index))
+ index = build (WITH_RECORD_EXPR, TREE_TYPE (index), index, exp);
+ if (! TREE_CONSTANT (unit_size)
+ && contains_placeholder_p (unit_size))
+ unit_size = build (WITH_RECORD_EXPR, sizetype, unit_size,
+ TREE_OPERAND (exp, 0));
- /* Optimize the special-case of a zero lower bound.
-
- We convert the low_bound to sizetype to avoid some problems
- with constant folding. (E.g. suppose the lower bound is 1,
- and its mode is QI. Without the conversion, (ARRAY
- +(INDEX-(unsigned char)1)) becomes ((ARRAY+(-(unsigned char)1))
- +INDEX), which becomes (ARRAY+255+INDEX). Oops!)
-
- But sizetype isn't quite right either (especially if
- the lowbound is negative). FIXME */
-
- if (! integer_zerop (low_bound))
- index = fold (build (MINUS_EXPR, index_type, index,
- convert (sizetype, low_bound)));
-
- if (TREE_CODE (index) == INTEGER_CST)
- {
- index = convert (sbitsizetype, index);
- index_type = TREE_TYPE (index);
- }
-
- xindex = fold (build (MULT_EXPR, sbitsizetype, index,
- convert (sbitsizetype,
- TYPE_SIZE (TREE_TYPE (exp)))));
-
- if (TREE_CODE (xindex) == INTEGER_CST
- && TREE_INT_CST_HIGH (xindex) == 0)
- *pbitpos += TREE_INT_CST_LOW (xindex);
- else
- {
- /* Either the bit offset calculated above is not constant, or
- it overflowed. In either case, redo the multiplication
- against the size in units. This is especially important
- in the non-constant case to avoid a division at runtime. */
- xindex = fold (build (MULT_EXPR, ssizetype, index,
- convert (ssizetype,
- TYPE_SIZE_UNIT (TREE_TYPE (exp)))));
-
- if (contains_placeholder_p (xindex))
- xindex = build (WITH_RECORD_EXPR, sizetype, xindex, exp);
-
- offset = size_binop (PLUS_EXPR, offset, xindex);
- }
+ offset = size_binop (PLUS_EXPR, offset,
+ size_binop (MULT_EXPR,
+ convert (sizetype, index),
+ unit_size));
}
+
else if (TREE_CODE (exp) != NON_LVALUE_EXPR
&& ! ((TREE_CODE (exp) == NOP_EXPR
|| TREE_CODE (exp) == CONVERT_EXPR)
/* If the offset is non-constant already, then we can't assume any
alignment more than the alignment here. */
- if (! integer_zerop (offset))
+ if (! TREE_CONSTANT (offset))
alignment = MIN (alignment, TYPE_ALIGN (TREE_TYPE (exp)));
exp = TREE_OPERAND (exp, 0);
}
- if (TREE_CODE_CLASS (TREE_CODE (exp)) == 'd')
+ if (DECL_P (exp))
alignment = MIN (alignment, DECL_ALIGN (exp));
else if (TREE_TYPE (exp) != 0)
alignment = MIN (alignment, TYPE_ALIGN (TREE_TYPE (exp)));
- if (integer_zerop (offset))
- offset = 0;
-
- if (offset != 0 && contains_placeholder_p (offset))
- offset = build (WITH_RECORD_EXPR, sizetype, offset, orig_exp);
+ /* If OFFSET is constant, see if we can return the whole thing as a
+ constant bit position. Otherwise, split it up. */
+ if (host_integerp (offset, 0)
+ && 0 != (tem = size_binop (MULT_EXPR, convert (bitsizetype, offset),
+ bitsize_unit_node))
+ && 0 != (tem = size_binop (PLUS_EXPR, tem, bit_offset))
+ && host_integerp (tem, 0))
+ *pbitpos = tree_low_cst (tem, 0), *poffset = 0;
+ else
+ *pbitpos = tree_low_cst (bit_offset, 0), *poffset = offset;
*pmode = mode;
- *poffset = offset;
- *palignment = alignment / BITS_PER_UNIT;
+ *palignment = alignment;
return exp;
}
/* Subroutine of expand_exp: compute memory_usage from modifier. */
+
static enum memory_use_mode
get_memory_usage_from_modifier (modifier)
enum expand_modifier modifier;
So we assume here that something at a higher level has prevented a
clash. This is somewhat bogus, but the best we can do. Only
do this when X is BLKmode and when we are at the top level. */
- || (top_p && TREE_TYPE (exp) != 0 && TYPE_SIZE (TREE_TYPE (exp)) != 0
+ || (top_p && TREE_TYPE (exp) != 0 && COMPLETE_TYPE_P (TREE_TYPE (exp))
&& TREE_CODE (TYPE_SIZE (TREE_TYPE (exp))) != INTEGER_CST
&& (TREE_CODE (TREE_TYPE (exp)) != ARRAY_TYPE
|| TYPE_ARRAY_MAX_SIZE (TREE_TYPE (exp)) == NULL_TREE
case VAR_DECL:
/* If a static var's type was incomplete when the decl was written,
but the type is complete now, lay out the decl now. */
- if (DECL_SIZE (exp) == 0 && TYPE_SIZE (TREE_TYPE (exp)) != 0
+ if (DECL_SIZE (exp) == 0 && COMPLETE_TYPE_P (TREE_TYPE (exp))
&& (TREE_STATIC (exp) || DECL_EXTERNAL (exp)))
{
push_obstacks_nochange ();
memory protection).
Aggregates are not checked here; they're handled elsewhere. */
- if (current_function && current_function_check_memory_usage
+ if (cfun && current_function_check_memory_usage
&& code == VAR_DECL
&& GET_CODE (DECL_RTL (exp)) == MEM
&& ! AGGREGATE_TYPE_P (TREE_TYPE (exp)))
if (temp != 0)
{
if (GET_CODE (temp) == MEM && GET_CODE (XEXP (temp, 0)) == REG)
- mark_reg_pointer (XEXP (temp, 0),
- DECL_ALIGN (exp) / BITS_PER_UNIT);
+ mark_reg_pointer (XEXP (temp, 0), DECL_ALIGN (exp));
return temp;
}
case INTEGER_CST:
return immed_double_const (TREE_INT_CST_LOW (exp),
- TREE_INT_CST_HIGH (exp),
- mode);
+ TREE_INT_CST_HIGH (exp), mode);
case CONST_DECL:
return expand_expr (DECL_INITIAL (exp), target, VOIDmode,
&& ((mode == BLKmode
&& ! (target != 0 && safe_from_p (target, exp, 1)))
|| TREE_ADDRESSABLE (exp)
- || (TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
- && (!MOVE_BY_PIECES_P
- (TREE_INT_CST_LOW (TYPE_SIZE (type))/BITS_PER_UNIT,
- TYPE_ALIGN (type) / BITS_PER_UNIT))
+ || (host_integerp (TYPE_SIZE_UNIT (type), 1)
+ && (! MOVE_BY_PIECES_P
+ (tree_low_cst (TYPE_SIZE_UNIT (type), 1),
+ TYPE_ALIGN (type)))
&& ! mostly_zeros_p (exp))))
|| (modifier == EXPAND_INITIALIZER && TREE_CONSTANT (exp)))
{
rtx constructor = output_constant_def (exp);
+
if (modifier != EXPAND_CONST_ADDRESS
&& modifier != EXPAND_INITIALIZER
&& modifier != EXPAND_SUM
RTX_UNCHANGING_P (target) = 1;
}
- store_constructor (exp, target, TYPE_ALIGN (TREE_TYPE (exp)), 0);
+ store_constructor (exp, target, TYPE_ALIGN (TREE_TYPE (exp)), 0,
+ int_size_in_bytes (TREE_TYPE (exp)));
return target;
}
tree exp2;
tree index;
tree string = string_constant (exp1, &index);
- int i;
/* Try to optimize reads from const strings. */
if (string
&& TREE_CODE (string) == STRING_CST
&& TREE_CODE (index) == INTEGER_CST
- && !TREE_INT_CST_HIGH (index)
- && (i = TREE_INT_CST_LOW (index)) < TREE_STRING_LENGTH (string)
+ && compare_tree_int (index, TREE_STRING_LENGTH (string)) < 0
&& GET_MODE_CLASS (mode) == MODE_INT
&& GET_MODE_SIZE (mode) == 1
&& modifier != EXPAND_MEMORY_USE_WO)
- return GEN_INT (TREE_STRING_POINTER (string)[i]);
+ return
+ GEN_INT (TREE_STRING_POINTER (string)[TREE_INT_CST_LOW (index)]);
op0 = expand_expr (exp1, NULL_RTX, VOIDmode, EXPAND_SUM);
op0 = memory_address (mode, op0);
- if (current_function && current_function_check_memory_usage
+ if (cfun && current_function_check_memory_usage
&& ! AGGREGATE_TYPE_P (TREE_TYPE (exp)))
{
enum memory_use_mode memory_usage;
tree array = TREE_OPERAND (exp, 0);
tree domain = TYPE_DOMAIN (TREE_TYPE (array));
tree low_bound = domain ? TYPE_MIN_VALUE (domain) : integer_zero_node;
- tree index = TREE_OPERAND (exp, 1);
- tree index_type = TREE_TYPE (index);
+ tree index = convert (sizetype, TREE_OPERAND (exp, 1));
HOST_WIDE_INT i;
/* Optimize the special-case of a zero lower bound.
with constant folding. (E.g. suppose the lower bound is 1,
and its mode is QI. Without the conversion, (ARRAY
+(INDEX-(unsigned char)1)) becomes ((ARRAY+(-(unsigned char)1))
- +INDEX), which becomes (ARRAY+255+INDEX). Oops!)
-
- But sizetype isn't quite right either (especially if
- the lowbound is negative). FIXME */
+ +INDEX), which becomes (ARRAY+255+INDEX). Oops!) */
if (! integer_zerop (low_bound))
- index = fold (build (MINUS_EXPR, index_type, index,
- convert (sizetype, low_bound)));
+ index = size_diffop (index, convert (sizetype, low_bound));
/* Fold an expression like: "foo"[2].
This is not done in fold so it won't happen inside &.
if (TREE_CODE (array) == STRING_CST
&& TREE_CODE (index) == INTEGER_CST
- && !TREE_INT_CST_HIGH (index)
- && (i = TREE_INT_CST_LOW (index)) < TREE_STRING_LENGTH (array)
+ && compare_tree_int (index, TREE_STRING_LENGTH (array)) < 0
&& GET_MODE_CLASS (mode) == MODE_INT
&& GET_MODE_SIZE (mode) == 1)
- return GEN_INT (TREE_STRING_POINTER (array)[i]);
+ return
+ GEN_INT (TREE_STRING_POINTER (array)[TREE_INT_CST_LOW (index)]);
/* If this is a constant index into a constant array,
just get the value from the array. Handle both the cases when
we have an explicit constructor and when our operand is a variable
that was declared const. */
- if (TREE_CODE (array) == CONSTRUCTOR && ! TREE_SIDE_EFFECTS (array))
+ if (TREE_CODE (array) == CONSTRUCTOR && ! TREE_SIDE_EFFECTS (array)
+ && TREE_CODE (index) == INTEGER_CST
+ && 0 > compare_tree_int (index,
+ list_length (CONSTRUCTOR_ELTS
+ (TREE_OPERAND (exp, 0)))))
{
- if (TREE_CODE (index) == INTEGER_CST
- && TREE_INT_CST_HIGH (index) == 0)
- {
- tree elem = CONSTRUCTOR_ELTS (TREE_OPERAND (exp, 0));
-
- i = TREE_INT_CST_LOW (index);
- while (elem && i--)
- elem = TREE_CHAIN (elem);
- if (elem)
- return expand_expr (fold (TREE_VALUE (elem)), target,
- tmode, ro_modifier);
- }
+ tree elem;
+
+ for (elem = CONSTRUCTOR_ELTS (TREE_OPERAND (exp, 0)),
+ i = TREE_INT_CST_LOW (index);
+ elem != 0 && i != 0; i--, elem = TREE_CHAIN (elem))
+ ;
+
+ if (elem)
+ return expand_expr (fold (TREE_VALUE (elem)), target,
+ tmode, ro_modifier);
}
else if (optimize >= 1
{
tree init = DECL_INITIAL (array);
- i = TREE_INT_CST_LOW (index);
if (TREE_CODE (init) == CONSTRUCTOR)
{
- tree elem = CONSTRUCTOR_ELTS (init);
+ tree elem;
+
+ for (elem = CONSTRUCTOR_ELTS (init);
+ (elem
+ && !tree_int_cst_equal (TREE_PURPOSE (elem), index));
+ elem = TREE_CHAIN (elem))
+ ;
- while (elem
- && !tree_int_cst_equal (TREE_PURPOSE (elem), index))
- elem = TREE_CHAIN (elem);
if (elem)
return expand_expr (fold (TREE_VALUE (elem)), target,
tmode, ro_modifier);
}
else if (TREE_CODE (init) == STRING_CST
- && TREE_INT_CST_HIGH (index) == 0
- && (TREE_INT_CST_LOW (index)
- < TREE_STRING_LENGTH (init)))
+ && 0 > compare_tree_int (index,
+ TREE_STRING_LENGTH (init)))
return (GEN_INT
(TREE_STRING_POINTER
(init)[TREE_INT_CST_LOW (index)]));
op0 = expand_expr (TREE_VALUE (elt), target, tmode, modifier);
if (DECL_BIT_FIELD (TREE_PURPOSE (elt)))
{
- int bitsize = DECL_FIELD_SIZE (TREE_PURPOSE (elt));
+ HOST_WIDE_INT bitsize
+ = TREE_INT_CST_LOW (DECL_SIZE (TREE_PURPOSE (elt)));
if (TREE_UNSIGNED (TREE_TYPE (TREE_PURPOSE (elt))))
{
{
enum machine_mode mode1;
- int bitsize;
- int bitpos;
+ HOST_WIDE_INT bitsize, bitpos;
tree offset;
int volatilep = 0;
- int alignment;
+ unsigned int alignment;
tree tem = get_inner_reference (exp, &bitsize, &bitpos, &offset,
&mode1, &unsignedp, &volatilep,
&alignment);
&& bitsize != 0
&& (bitpos % bitsize) == 0
&& (bitsize % GET_MODE_ALIGNMENT (mode1)) == 0
- && (alignment * BITS_PER_UNIT) == GET_MODE_ALIGNMENT (mode1))
+ && alignment == GET_MODE_ALIGNMENT (mode1))
{
rtx temp = change_address (op0, mode1,
plus_constant (XEXP (op0, 0),
&& GET_MODE_CLASS (mode) != MODE_COMPLEX_FLOAT)
/* If the field isn't aligned enough to fetch as a memref,
fetch it as a bit field. */
- || (mode1 != BLKmode && SLOW_UNALIGNED_ACCESS
+ || (mode1 != BLKmode
+ && SLOW_UNALIGNED_ACCESS (mode1, alignment)
&& ((TYPE_ALIGN (TREE_TYPE (tem))
- < (unsigned int) GET_MODE_ALIGNMENT (mode))
- || (bitpos % GET_MODE_ALIGNMENT (mode) != 0)))))
+ < GET_MODE_ALIGNMENT (mode))
+ || (bitpos % GET_MODE_ALIGNMENT (mode) != 0)))
+ /* If the type and the field are a constant size and the
+ size of the type isn't the same size as the bitfield,
+ we must use bitfield operations. */
+ || ((bitsize >= 0
+ && (TREE_CODE (TYPE_SIZE (TREE_TYPE (exp)))
+ == INTEGER_CST)
+ && 0 != compare_tree_int (TYPE_SIZE (TREE_TYPE (exp)),
+ bitsize)))))
|| (modifier != EXPAND_CONST_ADDRESS
&& modifier != EXPAND_INITIALIZER
&& mode == BLKmode
- && SLOW_UNALIGNED_ACCESS
- && (TYPE_ALIGN (type) > alignment * BITS_PER_UNIT
+ && SLOW_UNALIGNED_ACCESS (mode, alignment)
+ && (TYPE_ALIGN (type) > alignment
|| bitpos % TYPE_ALIGN (type) != 0)))
{
enum machine_mode ext_mode = mode;
emit_block_move (target, op0,
GEN_INT ((bitsize + BITS_PER_UNIT - 1)
/ BITS_PER_UNIT),
- 1);
+ BITS_PER_UNIT);
return target;
}
* BITS_PER_UNIT),
GET_MODE_BITSIZE (mode)),
0, TYPE_MODE (valtype), TREE_OPERAND (exp, 0),
- VOIDmode, 0, 1, int_size_in_bytes (type), 0);
+ VOIDmode, 0, BITS_PER_UNIT,
+ int_size_in_bytes (type), 0);
else
abort ();
/* If this mode is an integer too wide to compare properly,
compare word by word. Rely on cse to optimize constant cases. */
- if (GET_MODE_CLASS (mode) == MODE_INT && ! can_compare_p (mode, ccp_jump))
+ if (GET_MODE_CLASS (mode) == MODE_INT
+ && ! can_compare_p (GE, mode, ccp_jump))
{
if (code == MAX_EXPR)
do_jump_by_parts_greater_rtx (mode, TREE_UNSIGNED (type),
case GE_EXPR:
case EQ_EXPR:
case NE_EXPR:
+ case UNORDERED_EXPR:
+ case ORDERED_EXPR:
+ case UNLT_EXPR:
+ case UNLE_EXPR:
+ case UNGT_EXPR:
+ case UNGE_EXPR:
+ case UNEQ_EXPR:
preexpand_calls (exp);
temp = do_store_flag (exp, target, tmode != VOIDmode ? tmode : mode, 0);
if (temp != 0)
|| TREE_CODE (rhs) == BIT_AND_EXPR)
&& TREE_OPERAND (rhs, 0) == lhs
&& TREE_CODE (TREE_OPERAND (rhs, 1)) == COMPONENT_REF
- && TREE_INT_CST_LOW (DECL_SIZE (TREE_OPERAND (lhs, 1))) == 1
- && TREE_INT_CST_LOW (DECL_SIZE (TREE_OPERAND (TREE_OPERAND (rhs, 1), 1))) == 1)
+ && integer_onep (DECL_SIZE (TREE_OPERAND (lhs, 1)))
+ && integer_onep (DECL_SIZE (TREE_OPERAND (TREE_OPERAND (rhs, 1), 1))))
{
rtx label = gen_label_rtx ();
if (GET_CODE (op0) == REG
&& ! REG_USERVAR_P (op0))
- mark_reg_pointer (op0, TYPE_ALIGN (TREE_TYPE (type)) / BITS_PER_UNIT);
+ mark_reg_pointer (op0, TYPE_ALIGN (TREE_TYPE (type)));
/* If we might have had a temp slot, add an equivalent address
for it. */
static rtx
expand_expr_unaligned (exp, palign)
register tree exp;
- int *palign;
+ unsigned int *palign;
{
register rtx op0;
tree type = TREE_TYPE (exp);
tree array = TREE_OPERAND (exp, 0);
tree domain = TYPE_DOMAIN (TREE_TYPE (array));
tree low_bound = domain ? TYPE_MIN_VALUE (domain) : integer_zero_node;
- tree index = TREE_OPERAND (exp, 1);
- tree index_type = TREE_TYPE (index);
+ tree index = convert (sizetype, TREE_OPERAND (exp, 1));
HOST_WIDE_INT i;
if (TREE_CODE (TREE_TYPE (TREE_OPERAND (exp, 0))) != ARRAY_TYPE)
with constant folding. (E.g. suppose the lower bound is 1,
and its mode is QI. Without the conversion, (ARRAY
+(INDEX-(unsigned char)1)) becomes ((ARRAY+(-(unsigned char)1))
- +INDEX), which becomes (ARRAY+255+INDEX). Oops!)
-
- But sizetype isn't quite right either (especially if
- the lowbound is negative). FIXME */
+ +INDEX), which becomes (ARRAY+255+INDEX). Oops!) */
if (! integer_zerop (low_bound))
- index = fold (build (MINUS_EXPR, index_type, index,
- convert (sizetype, low_bound)));
+ index = size_diffop (index, convert (sizetype, low_bound));
/* If this is a constant index into a constant array,
just get the value from the array. Handle both the cases when
we have an explicit constructor and when our operand is a variable
that was declared const. */
- if (TREE_CODE (array) == CONSTRUCTOR && ! TREE_SIDE_EFFECTS (array))
+ if (TREE_CODE (array) == CONSTRUCTOR && ! TREE_SIDE_EFFECTS (array)
+ && 0 > compare_tree_int (index,
+ list_length (CONSTRUCTOR_ELTS
+ (TREE_OPERAND (exp, 0)))))
{
- if (TREE_CODE (index) == INTEGER_CST
- && TREE_INT_CST_HIGH (index) == 0)
- {
- tree elem = CONSTRUCTOR_ELTS (TREE_OPERAND (exp, 0));
-
- i = TREE_INT_CST_LOW (index);
- while (elem && i--)
- elem = TREE_CHAIN (elem);
- if (elem)
- return expand_expr_unaligned (fold (TREE_VALUE (elem)),
- palign);
- }
+ tree elem;
+
+ for (elem = CONSTRUCTOR_ELTS (TREE_OPERAND (exp, 0)),
+ i = TREE_INT_CST_LOW (index);
+ elem != 0 && i != 0; i--, elem = TREE_CHAIN (elem))
+ ;
+
+ if (elem)
+ return expand_expr_unaligned (fold (TREE_VALUE (elem)), palign);
}
else if (optimize >= 1
{
tree init = DECL_INITIAL (array);
- i = TREE_INT_CST_LOW (index);
if (TREE_CODE (init) == CONSTRUCTOR)
{
- tree elem = CONSTRUCTOR_ELTS (init);
+ tree elem;
+
+ for (elem = CONSTRUCTOR_ELTS (init);
+ ! tree_int_cst_equal (TREE_PURPOSE (elem), index);
+ elem = TREE_CHAIN (elem))
+ ;
- while (elem
- && !tree_int_cst_equal (TREE_PURPOSE (elem), index))
- elem = TREE_CHAIN (elem);
if (elem)
return expand_expr_unaligned (fold (TREE_VALUE (elem)),
palign);
{
enum machine_mode mode1;
- int bitsize;
- int bitpos;
+ HOST_WIDE_INT bitsize, bitpos;
tree offset;
int volatilep = 0;
- int alignment;
+ unsigned int alignment;
int unsignedp;
tree tem = get_inner_reference (exp, &bitsize, &bitpos, &offset,
&mode1, &unsignedp, &volatilep,
EXPAND_INITIALIZER), then we must not copy to a temporary. */
if (mode1 == VOIDmode
|| GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG
- || (SLOW_UNALIGNED_ACCESS
- && (TYPE_ALIGN (type) > alignment * BITS_PER_UNIT
+ || (SLOW_UNALIGNED_ACCESS (mode1, alignment)
+ && (TYPE_ALIGN (type) > alignment
|| bitpos % TYPE_ALIGN (type) != 0)))
{
enum machine_mode ext_mode = mode_for_size (bitsize, MODE_INT, 1);
return expand_expr (exp, NULL_RTX, VOIDmode, EXPAND_NORMAL);
}
\f
-/* Return the tree node and offset if a given argument corresponds to
- a string constant. */
+/* Return the tree node if a ARG corresponds to a string constant or zero
+ if it doesn't. If we return non-zero, set *PTR_OFFSET to the offset
+ in bytes within the string that ARG is accessing. The type of the
+ offset will be `sizetype'. */
tree
string_constant (arg, ptr_offset)
if (TREE_CODE (arg) == ADDR_EXPR
&& TREE_CODE (TREE_OPERAND (arg, 0)) == STRING_CST)
{
- *ptr_offset = integer_zero_node;
+ *ptr_offset = size_zero_node;
return TREE_OPERAND (arg, 0);
}
else if (TREE_CODE (arg) == PLUS_EXPR)
if (TREE_CODE (arg0) == ADDR_EXPR
&& TREE_CODE (TREE_OPERAND (arg0, 0)) == STRING_CST)
{
- *ptr_offset = arg1;
+ *ptr_offset = convert (sizetype, arg1);
return TREE_OPERAND (arg0, 0);
}
else if (TREE_CODE (arg1) == ADDR_EXPR
&& TREE_CODE (TREE_OPERAND (arg1, 0)) == STRING_CST)
{
- *ptr_offset = arg0;
+ *ptr_offset = convert (sizetype, arg0);
return TREE_OPERAND (arg1, 0);
}
}
&& EXIT_IGNORE_STACK
&& ! (DECL_INLINE (current_function_decl) && ! flag_no_inline)
&& ! flag_inline_functions)
- pending_stack_adjust = 0;
+ {
+ stack_pointer_delta -= pending_stack_adjust,
+ pending_stack_adjust = 0;
+ }
#endif
}
if (! SLOW_BYTE_ACCESS
&& TREE_CODE (TREE_OPERAND (exp, 1)) == INTEGER_CST
&& TYPE_PRECISION (TREE_TYPE (exp)) <= HOST_BITS_PER_WIDE_INT
- && (i = floor_log2 (TREE_INT_CST_LOW (TREE_OPERAND (exp, 1)))) >= 0
+ && (i = tree_floor_log2 (TREE_OPERAND (exp, 1))) >= 0
&& (mode = mode_for_size (i + 1, MODE_INT, 0)) != BLKmode
&& (type = type_for_mode (mode, 1)) != 0
&& TYPE_PRECISION (type) < TYPE_PRECISION (TREE_TYPE (exp))
case BIT_FIELD_REF:
case ARRAY_REF:
{
- int bitsize, bitpos, unsignedp;
+ HOST_WIDE_INT bitsize, bitpos;
+ int unsignedp;
enum machine_mode mode;
tree type;
tree offset;
int volatilep = 0;
- int alignment;
+ unsigned int alignment;
/* Get description of this reference. We don't actually care
about the underlying object here. */
- get_inner_reference (exp, &bitsize, &bitpos, &offset,
- &mode, &unsignedp, &volatilep,
- &alignment);
+ get_inner_reference (exp, &bitsize, &bitpos, &offset, &mode,
+ &unsignedp, &volatilep, &alignment);
type = type_for_size (bitsize, unsignedp);
if (! SLOW_BYTE_ACCESS
do_jump (TREE_OPERAND (exp, 0), if_true_label, if_false_label);
else if (GET_MODE_CLASS (TYPE_MODE (inner_type)) == MODE_INT
- && !can_compare_p (TYPE_MODE (inner_type), ccp_jump))
+ && !can_compare_p (EQ, TYPE_MODE (inner_type), ccp_jump))
do_jump_by_parts_equality (exp, if_false_label, if_true_label);
else
do_compare_and_jump (exp, EQ, EQ, if_false_label, if_true_label);
do_jump (TREE_OPERAND (exp, 0), if_false_label, if_true_label);
else if (GET_MODE_CLASS (TYPE_MODE (inner_type)) == MODE_INT
- && !can_compare_p (TYPE_MODE (inner_type), ccp_jump))
+ && !can_compare_p (NE, TYPE_MODE (inner_type), ccp_jump))
do_jump_by_parts_equality (exp, if_true_label, if_false_label);
else
do_compare_and_jump (exp, NE, NE, if_false_label, if_true_label);
case LT_EXPR:
mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)));
if (GET_MODE_CLASS (mode) == MODE_INT
- && ! can_compare_p (mode, ccp_jump))
+ && ! can_compare_p (LT, mode, ccp_jump))
do_jump_by_parts_greater (exp, 1, if_false_label, if_true_label);
else
do_compare_and_jump (exp, LT, LTU, if_false_label, if_true_label);
case LE_EXPR:
mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)));
if (GET_MODE_CLASS (mode) == MODE_INT
- && ! can_compare_p (mode, ccp_jump))
+ && ! can_compare_p (LE, mode, ccp_jump))
do_jump_by_parts_greater (exp, 0, if_true_label, if_false_label);
else
do_compare_and_jump (exp, LE, LEU, if_false_label, if_true_label);
case GT_EXPR:
mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)));
if (GET_MODE_CLASS (mode) == MODE_INT
- && ! can_compare_p (mode, ccp_jump))
+ && ! can_compare_p (GT, mode, ccp_jump))
do_jump_by_parts_greater (exp, 0, if_false_label, if_true_label);
else
do_compare_and_jump (exp, GT, GTU, if_false_label, if_true_label);
case GE_EXPR:
mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)));
if (GET_MODE_CLASS (mode) == MODE_INT
- && ! can_compare_p (mode, ccp_jump))
+ && ! can_compare_p (GE, mode, ccp_jump))
do_jump_by_parts_greater (exp, 1, if_true_label, if_false_label);
else
do_compare_and_jump (exp, GE, GEU, if_false_label, if_true_label);
break;
+ case UNORDERED_EXPR:
+ case ORDERED_EXPR:
+ {
+ enum rtx_code cmp, rcmp;
+ int do_rev;
+
+ if (code == UNORDERED_EXPR)
+ cmp = UNORDERED, rcmp = ORDERED;
+ else
+ cmp = ORDERED, rcmp = UNORDERED;
+ mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)));
+
+ do_rev = 0;
+ if (! can_compare_p (cmp, mode, ccp_jump)
+ && (can_compare_p (rcmp, mode, ccp_jump)
+ /* If the target doesn't provide either UNORDERED or ORDERED
+ comparisons, canonicalize on UNORDERED for the library. */
+ || rcmp == UNORDERED))
+ do_rev = 1;
+
+ if (! do_rev)
+ do_compare_and_jump (exp, cmp, cmp, if_false_label, if_true_label);
+ else
+ do_compare_and_jump (exp, rcmp, rcmp, if_true_label, if_false_label);
+ }
+ break;
+
+ {
+ enum rtx_code rcode1;
+ enum tree_code tcode2;
+
+ case UNLT_EXPR:
+ rcode1 = UNLT;
+ tcode2 = LT_EXPR;
+ goto unordered_bcc;
+ case UNLE_EXPR:
+ rcode1 = UNLE;
+ tcode2 = LE_EXPR;
+ goto unordered_bcc;
+ case UNGT_EXPR:
+ rcode1 = UNGT;
+ tcode2 = GT_EXPR;
+ goto unordered_bcc;
+ case UNGE_EXPR:
+ rcode1 = UNGE;
+ tcode2 = GE_EXPR;
+ goto unordered_bcc;
+ case UNEQ_EXPR:
+ rcode1 = UNEQ;
+ tcode2 = EQ_EXPR;
+ goto unordered_bcc;
+
+ unordered_bcc:
+ mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)));
+ if (can_compare_p (rcode1, mode, ccp_jump))
+ do_compare_and_jump (exp, rcode1, rcode1, if_false_label,
+ if_true_label);
+ else
+ {
+ tree op0 = save_expr (TREE_OPERAND (exp, 0));
+ tree op1 = save_expr (TREE_OPERAND (exp, 1));
+ tree cmp0, cmp1;
+
+ /* If the target doesn't support combined unordered
+ compares, decompose into UNORDERED + comparison. */
+ cmp0 = fold (build (UNORDERED_EXPR, TREE_TYPE (exp), op0, op1));
+ cmp1 = fold (build (tcode2, TREE_TYPE (exp), op0, op1));
+ exp = build (TRUTH_ORIF_EXPR, TREE_TYPE (exp), cmp0, cmp1);
+ do_jump (exp, if_false_label, if_true_label);
+ }
+ }
+ break;
+
default:
normal:
temp = expand_expr (exp, NULL_RTX, VOIDmode, 0);
emit_jump (target);
}
else if (GET_MODE_CLASS (GET_MODE (temp)) == MODE_INT
- && ! can_compare_p (GET_MODE (temp), ccp_jump))
+ && ! can_compare_p (NE, GET_MODE (temp), ccp_jump))
/* Note swapping the labels gives us not-equal. */
do_jump_by_parts_equality_rtx (temp, if_true_label, if_false_label);
else if (GET_MODE (temp) != VOIDmode)
int unsignedp;
enum machine_mode mode;
rtx size;
- int align;
+ unsigned int align;
{
rtx tem;
int unsignedp;
enum machine_mode mode;
rtx size;
- int align;
+ unsigned int align;
rtx if_false_label, if_true_label;
{
rtx tem;
enum rtx_code signed_code, unsigned_code;
rtx if_false_label, if_true_label;
{
- int align0, align1;
+ unsigned int align0, align1;
register rtx op0, op1;
register tree type;
register enum machine_mode mode;
do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode,
((mode == BLKmode)
? expr_size (TREE_OPERAND (exp, 0)) : NULL_RTX),
- MIN (align0, align1) / BITS_PER_UNIT,
+ MIN (align0, align1),
if_false_label, if_true_label);
}
\f
else
code = unsignedp ? GEU : GE;
break;
+
+ case UNORDERED_EXPR:
+ code = UNORDERED;
+ break;
+ case ORDERED_EXPR:
+ code = ORDERED;
+ break;
+ case UNLT_EXPR:
+ code = UNLT;
+ break;
+ case UNLE_EXPR:
+ code = UNLE;
+ break;
+ case UNGT_EXPR:
+ code = UNGT;
+ break;
+ case UNGE_EXPR:
+ code = UNGE;
+ break;
+ case UNEQ_EXPR:
+ code = UNEQ;
+ break;
+
default:
abort ();
}
if (TREE_CODE (inner) == RSHIFT_EXPR
&& TREE_CODE (TREE_OPERAND (inner, 1)) == INTEGER_CST
&& TREE_INT_CST_HIGH (TREE_OPERAND (inner, 1)) == 0
- && (bitnum + TREE_INT_CST_LOW (TREE_OPERAND (inner, 1))
- < TYPE_PRECISION (type)))
+ && bitnum < TYPE_PRECISION (type)
+ && 0 > compare_tree_int (TREE_OPERAND (inner, 1),
+ bitnum - TYPE_PRECISION (type)))
{
bitnum += TREE_INT_CST_LOW (TREE_OPERAND (inner, 1));
inner = TREE_OPERAND (inner, 0);
}
/* Now see if we are likely to be able to do this. Return if not. */
- if (! can_compare_p (operand_mode, ccp_store_flag))
+ if (! can_compare_p (code, operand_mode, ccp_store_flag))
return 0;
+
icode = setcc_gen_code[(int) code];
if (icode == CODE_FOR_nothing
|| (only_cheap && insn_data[(int) icode].operand[0].mode != mode))