+2004-08-23 Eric Christopher <echristo@redhat.com>
+
+ * defaults.h (VECTOR_MODE_SUPPORTED_P): Remove macro.
+ * system.h (VECTOR_MODE_SUPPORTED_P): Poison.
+ * target-def.h (TARGET_VECTOR_MODE_SUPPORTED_P): Define.
+ * target.h: Ditto.
+ * hooks.h: Include machmode.h.
+ (hook_bool_mode_false): Declare.
+ * hooks.c (hook_bool_mode_false): Define.
+ * expr.c (vector_mode_valid_p): Use targetm.vector_mode_supported_p.
+ * stor-layout.c (layout_type): Ditto.
+ * config/alpha/alpha.c (alpha_vector_mode_supported_p): New function.
+ Define to target macro.
+ * config/alpha/alpha.h (VECTOR_MODE_SUPPORTED_P): Delete.
+ * config/arm/arm.c: Ditto. Use.
+ * config/arm/arm.h: Ditto.
+ * config/arm/arm-protos.h: Ditto.
+ * config/i386/i386.c: Ditto.
+ * config/i386/i386.h: Ditto.
+ * config/rs6000/rs6000.c: Ditto.
+ * config/rs6000/rs6000.h: Ditto.
+ * config/sh/sh.c: Ditto.
+ * config/sh/sh.h: Ditto.
+ * config/sh/sh-protos.h: Ditto.
+ * config/sh/sh.md: Use.
+ * doc/tm.texi: Move documentation for VECTOR_MODE_SUPPORTED_P
+ to TARGET_VECTOR_MODE_SUPPORTED_P.
+
2004-08-23 Nathan Sidwell <nathan@codesourcery.com>
* Makefile.in (BUILD_ERRORS): Set to build-errors.
(copyprop_hardreg_forward_1): Update call to kill_value_regno.
2004-08-20 Daniel Berlin <dberlin@dberlin.org>
-
+
Fix PR tree-optimization/17111
* tree-ssa-pre.c (create_value_expr_from): Don't change the types
of non-value_handles.
(override_options): Added checks for the new options.
(s390_emit_prologue): Emit stack check and trap code and perform
compile time stack size checking.
-
- * config/s390/s390.h (TARGET_OPTIONS): Added new options
+
+ * config/s390/s390.h (TARGET_OPTIONS): Added new options
"warn-framesize", "warn-dynamicstack", "stack-size" and
"stack-guard".
/* Subroutines used for code generation on the DEC Alpha.
Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
- 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
+ 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
This file is part of GCC.
/* Specify which cpu to schedule for. */
enum processor_type alpha_cpu;
-static const char * const alpha_cpu_name[] =
+static const char * const alpha_cpu_name[] =
{
"ev4", "ev5", "ev6"
};
{ "21264a", PROCESSOR_EV6, EV6_MASK|MASK_CIX },
{ 0, 0, 0 }
};
-
+
/* Unicos/Mk doesn't have shared libraries. */
if (TARGET_ABI_UNICOSMK && flag_pic)
{
flag_pic = 0;
}
- /* On Unicos/Mk, the native compiler consistently generates /d suffices for
+ /* On Unicos/Mk, the native compiler consistently generates /d suffices for
floating-point instructions. Make that the default for this target. */
if (TARGET_ABI_UNICOSMK)
alpha_fprm = ALPHA_FPRM_DYN;
alpha_tp = ALPHA_TP_PROG;
alpha_fptm = ALPHA_FPTM_N;
- /* We cannot use su and sui qualifiers for conversion instructions on
+ /* We cannot use su and sui qualifiers for conversion instructions on
Unicos/Mk. I'm not sure if this is due to assembler or hardware
limitations. Right now, we issue a warning if -mieee is specified
and then ignore it; eventually, we should either get it right or
&& ISDIGIT ((unsigned char)alpha_mlat_string[1])
&& alpha_mlat_string[2] == '\0')
{
- static int const cache_latency[][4] =
+ static int const cache_latency[][4] =
{
{ 3, 30, -1 }, /* ev4 -- Bcache is a guess */
{ 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
return 1;
}
-/* Return true if OP is valid for a particular TLS relocation.
+/* Return true if OP is valid for a particular TLS relocation.
We are already guaranteed that OP is a CONST. */
int
}
}
+/* Implements target hook vector_mode_supported_p. */
+static bool
+alpha_vector_mode_supported_p (enum machine_mode mode)
+{
+ if (TARGET_MAX
+ && ((mode == V8QImode)
+ || (mode == V4HImode)
+ || (mode == V2SImode)))
+ return true;
+
+ return false;
+}
+
/* Return 1 if this function can directly return via $26. */
int
tga = get_tls_get_addr ();
dest = gen_reg_rtx (Pmode);
seq = GEN_INT (alpha_next_sequence_number++);
-
+
emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
insn = gen_call_value_osf_tlsgd (r0, tga, seq);
insn = emit_call_insn (insn);
small symbolic operand until after reload. At which point we need
to replace (mem (symbol_ref)) with (mem (lo_sum $29 symbol_ref))
so that sched2 has the proper dependency information. */
-/*
+/*
{"some_small_symbolic_operand", {SET, PARALLEL, PREFETCH, UNSPEC, \
UNSPEC_VOLATILE}},
*/
return false;
}
-
+
/* Try a machine-dependent way of reloading an illegitimate address
operand. If we find one, push the reload and return the new rtx. */
-
+
rtx
alpha_legitimize_reload_address (rtx x,
enum machine_mode mode ATTRIBUTE_UNUSED,
else
*total = COSTS_N_INSNS (2);
return true;
-
+
case CONST:
case SYMBOL_REF:
case LABEL_REF:
/* Otherwise we do a load from the GOT. */
*total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
return true;
-
+
case PLUS:
case MINUS:
if (float_mode_p)
*pbitnum = GEN_INT ((offset & 3) * 8);
}
-/* Similar, but just get the address. Handle the two reload cases.
+/* Similar, but just get the address. Handle the two reload cases.
Add EXTRA_OFFSET to the address we return. */
rtx
}
/* On the Alpha, all (non-symbolic) constants except zero go into
- a floating-point register via memory. Note that we cannot
+ a floating-point register via memory. Note that we cannot
return anything that is not a subset of CLASS, and that some
symbolic constants cannot be dropped to memory. */
/* Loading and storing HImode or QImode values to and from memory
usually requires a scratch register. The exceptions are loading
QImode and HImode from an aligned address to a general register
- unless byte instructions are permitted.
+ unless byte instructions are permitted.
We also cannot load an unaligned address or a paradoxical SUBREG
- into an FP register.
+ into an FP register.
We also cannot do integral arithmetic into FP regs, as might result
from register elimination into a DImode fp register. */
if (GET_CODE (ref) != MEM)
return;
- /* This is only called from alpha.md, after having had something
+ /* This is only called from alpha.md, after having had something
generated from one of the insn patterns. So if everything is
zero, the pattern is already up-to-date. */
if (!MEM_VOLATILE_P (ref)
{
/* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
but that meant that we can't handle INT_MIN on 32-bit machines
- (like NT/Alpha), because we recurse indefinitely through
+ (like NT/Alpha), because we recurse indefinitely through
emit_move_insn to gen_movdi. So instead, since we know exactly
what we want, create it explicitly. */
else
{
/* ??? We mark the branch mode to be CCmode to prevent the
- compare and branch from being combined, since the compare
+ compare and branch from being combined, since the compare
insn follows IEEE rules that the branch does not. */
branch_mode = CCmode;
}
rtx libcall;
};
-static GTY(()) struct xfloating_op xfloating_ops[] =
+static GTY(()) struct xfloating_op xfloating_ops[] =
{
{ PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
{ MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
Note that these functions do not follow normal calling conventions:
TFmode arguments are passed in two integer registers (as opposed to
- indirect); TFmode return values appear in R16+R17.
+ indirect); TFmode return values appear in R16+R17.
FUNC is the function to call.
TARGET is where the output belongs.
out_operands[0] = operands[1];
out_operands[1] = operands[2];
out_operands[2] = GEN_INT (mode);
- alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
+ alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
gen_rtx_fmt_ee (code, TFmode, operands[1],
operands[2]));
}
abort ();
}
-/* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
- op2 is a register containing the sign bit, operation is the
+/* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
+ op2 is a register containing the sign bit, operation is the
logical operation to be performed. */
void
mema = force_reg (Pmode, mema);
/* AND addresses cannot be in any alias set, since they may implicitly
- alias surrounding code. Ideally we'd have some alias set that
+ alias surrounding code. Ideally we'd have some alias set that
covered all types except those with alignment 8 or higher. */
tmp = change_address (mem, DImode,
- gen_rtx_AND (DImode,
+ gen_rtx_AND (DImode,
plus_constant (mema, ofs),
GEN_INT (-8)));
set_mem_alias_set (tmp, 0);
emit_move_insn (meml, tmp);
tmp = change_address (mem, DImode,
- gen_rtx_AND (DImode,
+ gen_rtx_AND (DImode,
plus_constant (mema, ofs + size - 1),
GEN_INT (-8)));
set_mem_alias_set (tmp, 0);
addr for the target, because addr is marked as a pointer and combine
knows that pointers are always sign-extended 32 bit values. */
addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
- addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
+ addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
addr, 1, OPTAB_WIDEN);
}
else
HOST_WIDE_INT size, HOST_WIDE_INT ofs)
{
rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
-
+
dstl = gen_reg_rtx (DImode);
dsth = gen_reg_rtx (DImode);
insl = gen_reg_rtx (DImode);
dsta = force_reg (Pmode, dsta);
/* AND addresses cannot be in any alias set, since they may implicitly
- alias surrounding code. Ideally we'd have some alias set that
+ alias surrounding code. Ideally we'd have some alias set that
covered all types except those with alignment 8 or higher. */
meml = change_address (dst, DImode,
- gen_rtx_AND (DImode,
+ gen_rtx_AND (DImode,
plus_constant (dsta, ofs),
GEN_INT (-8)));
set_mem_alias_set (meml, 0);
memh = change_address (dst, DImode,
- gen_rtx_AND (DImode,
+ gen_rtx_AND (DImode,
plus_constant (dsta, ofs + size - 1),
GEN_INT (-8)));
set_mem_alias_set (memh, 0);
dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
}
-
+
if (WORDS_BIG_ENDIAN)
{
emit_move_insn (meml, dstl);
if (ofs != 0)
smem = adjust_address (smem, GET_MODE (smem), ofs);
-
+
/* Load up all of the source data. */
for (i = 0; i < words; ++i)
{
emit_move_insn (data_regs[words], tmp);
/* Extract the half-word fragments. Unfortunately DEC decided to make
- extxh with offset zero a noop instead of zeroing the register, so
+ extxh with offset zero a noop instead of zeroing the register, so
we must take care of that edge condition ourselves with cmov. */
sreg = copy_addr_to_reg (smema);
- areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
+ areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
1, OPTAB_WIDEN);
if (WORDS_BIG_ENDIAN)
emit_move_insn (sreg, plus_constant (sreg, 7));
ins_tmps[i] = gen_reg_rtx(DImode);
st_tmp_1 = gen_reg_rtx(DImode);
st_tmp_2 = gen_reg_rtx(DImode);
-
+
if (ofs != 0)
dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
rtx data_regs[2 * MAX_MOVE_WORDS + 16];
rtx tmp;
unsigned int i, words, ofs, nregs = 0;
-
+
if (orig_bytes <= 0)
return 1;
else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
src_align = 16;
}
}
-
+
tmp = XEXP (orig_dst, 0);
if (GET_CODE (tmp) == REG)
dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
else
alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
words, ofs);
-
+
i += words;
ofs += words * 8;
}
rtx orig_dst = operands[0];
rtx tmp;
int i, words, ofs = 0;
-
+
if (orig_bytes <= 0)
return 1;
if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
static struct machine_function *
alpha_init_machine_status (void)
{
- return ((struct machine_function *)
+ return ((struct machine_function *)
ggc_alloc_cleared (sizeof (struct machine_function)));
}
{
case ALPHA_FPRM_NORM:
return NULL;
- case ALPHA_FPRM_MINF:
+ case ALPHA_FPRM_MINF:
return "m";
case ALPHA_FPRM_CHOP:
return "c";
if (GET_CODE (x) != CONST_INT
|| (unsigned HOST_WIDE_INT) INTVAL (x) >= (WORDS_BIG_ENDIAN
? 56
- : 64)
+ : 64)
|| (INTVAL (x) & 7) != 0)
output_operand_lossage ("invalid %%s value");
if (offset)
fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
-
+
addr = XEXP (addr, 0);
if (GET_CODE (addr) == REG)
basereg = REGNO (addr);
code. CXT is an RTX for the static chain value for the function.
The three offset parameters are for the individual template's
- layout. A JMPOFS < 0 indicates that the trampoline does not
+ layout. A JMPOFS < 0 indicates that the trampoline does not
contain instructions at all.
We assume here that a function will be called many more times than
break;
default:
- /* ??? We get called on all sorts of random stuff from
+ /* ??? We get called on all sorts of random stuff from
aggregate_value_p. We can't abort, but it's not clear
what's safe to return. Pretend it's a struct I guess. */
return true;
return gen_rtx_REG (mode, regnum);
}
-/* TCmode complex values are passed by invisible reference. We
+/* TCmode complex values are passed by invisible reference. We
should not split these values. */
static bool
in order to account for the integer arg registers which are counted
in argsize above, but which are not actually stored on the stack.
Must further be careful here about structures straddling the last
- integer argument register; that futzes with pretend_args_size,
+ integer argument register; that futzes with pretend_args_size,
which changes the meaning of AP. */
if (NUM_ARGS <= 6)
zero in the prologue of _Unwind_RaiseException et al. */
imask |= 1UL << 31;
}
-
+
/* If any register spilled, then spill the return address also. */
/* ??? This is required by the Digital stack unwind specification
and isn't needed if we're doing Dwarf2 unwinding. */
if (current_function_has_nonlocal_goto)
return 1;
- /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
+ /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
Even if we are a static function, we still need to do this in case
our address is taken and passed to something like qsort. */
frame_size = get_frame_size ();
if (TARGET_ABI_OPEN_VMS)
- frame_size = ALPHA_ROUND (sa_size
+ frame_size = ALPHA_ROUND (sa_size
+ (alpha_procedure_type == PT_STACK ? 8 : 0)
+ frame_size
+ current_function_pretend_args_size);
4096 bytes (we can probably get away without the latter test) and
every 8192 bytes in between. If the frame size is > 32768, we
do this in a loop. Otherwise, we generate the explicit probe
- instructions.
+ instructions.
Note that we are only allowed to adjust sp once in the prologue. */
/* For NT stack unwind (done by 'reverse execution'), it's
not OK to take the result of a loop, even though the value
is already in ptr, so we reload it via a single operation
- and subtract it to sp.
+ and subtract it to sp.
Yes, that's correct -- we have to reload the whole constant
into a temporary via ldah+lda then subtract from sp. */
if (low + sa_size <= 0x8000)
bias = reg_offset - low, reg_offset = low;
- else
+ else
bias = reg_offset, reg_offset = 0;
sa_reg = gen_rtx_REG (DImode, 24);
FRP (emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx,
GEN_INT (bias))));
}
-
+
/* Save regs in stack order. Beginning with VMS PV. */
if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
{
if (current_function_outgoing_args_size != 0)
{
rtx seq
- = emit_move_insn (stack_pointer_rtx,
+ = emit_move_insn (stack_pointer_rtx,
plus_constant
(hard_frame_pointer_rtx,
- (ALPHA_ROUND
(current_function_outgoing_args_size))));
-
+
/* Only set FRAME_RELATED_P on the stack adjustment we just emitted
if ! frame_pointer_needed. Setting the bit will change the CFA
computation rule to use sp again, which would be wrong if we had
(clobber:BLK (scratch)), but this doesn't work for fp insns. So we
have to prevent all such scheduling with a blockage.
- Linux, on the other hand, never bothered to implement OSF/1's
+ Linux, on the other hand, never bothered to implement OSF/1's
exception handling, and so doesn't care about such things. Anyone
planning to use dwarf2 frame-unwind info can also omit the blockage. */
frame_size = get_frame_size ();
if (TARGET_ABI_OPEN_VMS)
- frame_size = ALPHA_ROUND (sa_size
+ frame_size = ALPHA_ROUND (sa_size
+ (alpha_procedure_type == PT_STACK ? 8 : 0)
+ frame_size
+ current_function_pretend_args_size);
/* Write function epilogue. */
-/* ??? At some point we will want to support full unwind, and so will
+/* ??? At some point we will want to support full unwind, and so will
need to mark the epilogue as well. At the moment, we just confuse
dwarf2out. */
#undef FRP
frame_size = get_frame_size ();
if (TARGET_ABI_OPEN_VMS)
- frame_size = ALPHA_ROUND (sa_size
+ frame_size = ALPHA_ROUND (sa_size
+ (alpha_procedure_type == PT_STACK ? 8 : 0)
+ frame_size
+ current_function_pretend_args_size);
if (low + sa_size <= 0x8000)
bias = reg_offset - low, reg_offset = low;
- else
+ else
bias = reg_offset, reg_offset = 0;
sa_reg = gen_rtx_REG (DImode, 22);
FRP (emit_move_insn (sa_reg, sa_reg_exp));
}
-
+
/* Restore registers in order, excepting a true frame pointer. */
mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
FRP (emit_move_insn (stack_pointer_rtx,
gen_rtx_PLUS (DImode, sp_adj1, sp_adj2)));
}
- else
+ else
{
if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
{
case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
- case SQRT: case FFS:
+ case SQRT: case FFS:
summarize_insn (XEXP (x, 0), sum, 0);
break;
shadow.used.fp = 0;
shadow.used.mem = 0;
shadow.defd = shadow.used;
-
+
for (i = get_insns (); i ; i = NEXT_INSN (i))
{
if (GET_CODE (i) == NOTE)
}
}
-/* IN_USE is a mask of the slots currently filled within the insn group.
+/* IN_USE is a mask of the slots currently filled within the insn group.
The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
- the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
+ the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
LEN is, of course, the length of the group in bytes. */
abort();
}
len += 4;
-
+
/* Haifa doesn't do well scheduling branches. */
if (GET_CODE (insn) == JUMP_INSN)
goto next_and_done;
return insn;
}
-/* IN_USE is a mask of the slots currently filled within the insn group.
+/* IN_USE is a mask of the slots currently filled within the insn group.
The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
- the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
+ the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
LEN is, of course, the length of the group in bytes. */
len = get_attr_length (insn);
goto next_and_done;
- /* ??? Most of the places below, we would like to abort, as
- it would indicate an error either in Haifa, or in the
- scheduling description. Unfortunately, Haifa never
+ /* ??? Most of the places below, we would like to abort, as
+ it would indicate an error either in Haifa, or in the
+ scheduling description. Unfortunately, Haifa never
schedules the last instruction of the BB, so we don't
have an accurate TI bit to go off. */
case EV5_E01:
abort();
}
len += 4;
-
+
/* Haifa doesn't do well scheduling branches. */
/* ??? If this is predicted not-taken, slotting continues, except
that no more IBR, FBR, or JSR insns may be slotted. */
else
where = i;
- do
+ do
emit_insn_before ((*next_nop)(&prev_in_use), where);
while (--nop_count);
ofs = 0;
if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
fprintf (asm_out_file,
"\t.arch %s\n",
- TARGET_CPU_EV6 ? "ev6"
+ TARGET_CPU_EV6 ? "ev6"
: (TARGET_CPU_EV5
? (TARGET_MAX ? "pca56" : TARGET_BWX ? "ev56" : "ev5")
: "ev4"));
if (!alpha_funcs_tree)
alpha_funcs_tree = splay_tree_new_ggc ((splay_tree_compare_fn)
splay_tree_compare_pointers);
-
+
cfaf = (struct alpha_funcs *) ggc_alloc (sizeof (struct alpha_funcs));
cfaf->links = 0;
al->rkind = KIND_CODEADDR;
else
al->rkind = KIND_LINKAGE;
-
+
if (lflag)
return gen_rtx_MEM (Pmode, plus_constant (al->linkage, 8));
else
/* Record an element in the table of global constructors. SYMBOL is
a SYMBOL_REF of the function to be called; PRIORITY is a number
- between 0 and MAX_INIT_PRIORITY.
+ between 0 and MAX_INIT_PRIORITY.
Differs from default_ctors_section_asm_out_constructor in that the
width of the .ctors entry is always 64 bits, rather than the 32 bits
unicosmk_initial_elimination_offset (int from, int to)
{
int fixed_size;
-
+
fixed_size = alpha_sa_size();
if (fixed_size != 0)
fixed_size += 48;
if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
- return -fixed_size;
+ return -fixed_size;
else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
return 0;
else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
+ ALPHA_ROUND (get_frame_size()));
else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
return (ALPHA_ROUND (fixed_size)
- + ALPHA_ROUND (get_frame_size()
+ + ALPHA_ROUND (get_frame_size()
+ current_function_outgoing_args_size));
else
abort ();
unsigned len = strlen (name);
char *clean_name = alloca (len + 2);
char *ptr = clean_name;
-
+
/* CAM only accepts module names that start with a letter or '$'. We
prefix the module name with a '$' if necessary. */
const char *name;
int len;
- if (!decl)
+ if (!decl)
abort ();
name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
{
char *string;
- /* It is essential that we prefix the section name here because
- otherwise the section names generated for constructors and
+ /* It is essential that we prefix the section name here because
+ otherwise the section names generated for constructors and
destructors confuse collect2. */
string = alloca (len + 6);
/* Output an alignment directive. We have to use the macro 'gcc@code@align'
in code sections because .align fill unused space with zeroes. */
-
+
void
unicosmk_output_align (FILE *file, int align)
{
unicosmk_defer_case_vector (rtx lab, rtx vec)
{
struct machine_function *machine = cfun->machine;
-
+
vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
machine->addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec,
- machine->addr_list);
+ machine->addr_list);
}
/* Output a case vector. */
static const char *
unicosmk_ssib_name (void)
{
- /* This is ok since CAM won't be able to deal with names longer than that
+ /* This is ok since CAM won't be able to deal with names longer than that
anyway. */
static char name[256];
return name;
}
-/* Set up the dynamic subprogram information block (DSIB) and update the
- frame pointer register ($15) for subroutines which have a frame. If the
+/* Set up the dynamic subprogram information block (DSIB) and update the
+ frame pointer register ($15) for subroutines which have a frame. If the
subroutine doesn't have a frame, simply increment $15. */
static void
unicosmk_text_section (void)
{
static int count = 0;
- sprintf (unicosmk_section_buf, "\t.endp\n\n\t.psect\tgcc@text___%d,code",
+ sprintf (unicosmk_section_buf, "\t.endp\n\n\t.psect\tgcc@text___%d,code",
count++);
return unicosmk_section_buf;
}
unicosmk_data_section (void)
{
static int count = 1;
- sprintf (unicosmk_section_buf, "\t.endp\n\n\t.psect\tgcc@data___%d,data",
+ sprintf (unicosmk_section_buf, "\t.endp\n\n\t.psect\tgcc@data___%d,data",
count++);
return unicosmk_section_buf;
}
len = strlen (user_label_prefix);
for (p = unicosmk_extern_head; p != 0; p = p->next)
{
- /* We have to strip the encoding and possibly remove user_label_prefix
+ /* We have to strip the encoding and possibly remove user_label_prefix
from the identifier in order to handle -fleading-underscore and
explicit asm names correctly (cf. gcc.dg/asm-names-1.c). */
real_name = default_strip_name_encoding (p->name);
if (len && p->name[0] == '*'
&& !memcmp (real_name, user_label_prefix, len))
real_name += len;
-
+
name_tree = get_identifier (real_name);
if (! TREE_ASM_WRITTEN (name_tree))
{
}
}
}
-
+
/* Record an extern. */
void
const char *name;
};
-/* List of identifiers which have been replaced by DEX expressions. The DEX
+/* List of identifiers which have been replaced by DEX expressions. The DEX
number is determined by the position in the list. */
-static struct unicosmk_dex *unicosmk_dex_list = NULL;
+static struct unicosmk_dex *unicosmk_dex_list = NULL;
/* The number of elements in the DEX list. */
struct unicosmk_dex *dex;
const char *name;
int i;
-
+
if (GET_CODE (x) != SYMBOL_REF)
return 0;
return i;
--i;
}
-
+
dex = (struct unicosmk_dex *) xmalloc (sizeof (struct unicosmk_dex));
dex->name = name;
dex->next = unicosmk_dex_list;
putc ('\n', file);
--i;
}
-
+
fprintf (file, "\t.dexend\n");
}
/* Output text that to appear at the beginning of an assembler file. */
-static void
+static void
unicosmk_file_start (void)
{
int i;
unicosmk_output_externs (asm_out_file);
- /* Output dex definitions used for functions whose names conflict with
+ /* Output dex definitions used for functions whose names conflict with
register names. */
unicosmk_output_dex (asm_out_file);
#define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
#undef TARGET_GIMPLIFY_VA_ARG_EXPR
#define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
+#undef TARGET_VECTOR_MODE_SUPPORTED_P
+#define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
#undef TARGET_BUILD_BUILTIN_VA_LIST
#define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
\f
#include "gt-alpha.h"
-
#define MASK_SMALL_TEXT (1 << 15)
#define TARGET_SMALL_TEXT (target_flags & MASK_SMALL_TEXT)
-/* This means use IEEE quad-format for long double. Assumes the
+/* This means use IEEE quad-format for long double. Assumes the
presence of the GEM support library routines. */
#define MASK_LONG_DOUBLE_128 (1 << 16)
#define TARGET_LONG_DOUBLE_128 (target_flags & MASK_LONG_DOUBLE_128)
#define WCHAR_TYPE_SIZE 32
/* Define this macro if it is advisable to hold scalars in registers
- in a wider mode than that declared by the program. In such cases,
+ in a wider mode than that declared by the program. In such cases,
the value is constrained to be within the bounds of the declared
type, but kept valid in the wider mode. The signedness of the
extension may differ from that of the type.
We define all 32 integer registers, even though $31 is always zero,
and all 32 floating-point registers, even though $f31 is also
always zero. We do not bother defining the FP status register and
- there are no other registers.
+ there are no other registers.
Since $31 is always zero, we will use register number 31 as the
argument pointer. It will never appear in the generated code
? (MODE) == SFmode || (MODE) == DFmode || (MODE) == DImode \
: 1)
-/* Value is 1 if MODE is a supported vector mode. */
-
-#define VECTOR_MODE_SUPPORTED_P(MODE) \
- (TARGET_MAX \
- && ((MODE) == V8QImode || (MODE) == V4HImode || (MODE) == V2SImode))
-
/* A C expression that is nonzero if a value of mode
MODE1 is accessible in mode MODE2 without copying.
/* Base register for access to local variables of function. */
#define FRAME_POINTER_REGNUM 63
-/* Register in which static-chain is passed to a function.
+/* Register in which static-chain is passed to a function.
For the Alpha, this is based on an example; the calling sequence
doesn't seem to specify this. */
For any two classes, it is very desirable that there be another
class that represents their union. */
-
+
enum reg_class {
NO_REGS, R0_REG, R24_REG, R25_REG, R27_REG,
GENERAL_REGS, FLOAT_REGS, ALL_REGS,
`R' is a SYMBOL_REF that has SYMBOL_REF_FLAG set or is the current
function.
- 'S' is a 6-bit constant (valid for a shift insn).
+ 'S' is a 6-bit constant (valid for a shift insn).
'T' is a HIGH.
? reg_classes_intersect_p (FLOAT_REGS, CLASS) : 0)
/* Define the cost of moving between registers of various classes. Moving
- between FLOAT_REGS and anything else except float regs is expensive.
+ between FLOAT_REGS and anything else except float regs is expensive.
In fact, we make it quite expensive because we really don't want to
do these moves unless it is clearly worth it. Optimizations may
reduce the impact of not being able to allocate a pseudo to a
#define ASM_DECLARE_FUNCTION_SIZE(FILE,NAME,DECL) \
alpha_end_function(FILE,NAME,DECL)
-
+
/* Output any profiling code before the prologue. */
#define PROFILE_BEFORE_PROLOGUE 1
of a trampoline, leaving space for the variable parts.
The trampoline should set the static chain pointer to value placed
- into the trampoline and should branch to the specified routine.
+ into the trampoline and should branch to the specified routine.
Note that $27 has been set to the address of the trampoline, so we can
use it for addressability of the two data items. */
/* Try a machine-dependent way of reloading an illegitimate address
operand. If we find one, push the reload and jump to WIN. This
macro is used in only one place: `find_reloads_address' in reload.c. */
-
+
#define LEGITIMIZE_RELOAD_ADDRESS(X,MODE,OPNUM,TYPE,IND_L,WIN) \
do { \
rtx new_x = alpha_legitimize_reload_address (X, MODE, OPNUM, TYPE, IND_L); \
/* Nonzero if access to memory by bytes is no faster than for words.
Also nonzero if doing byte operations (specifically shifts) in registers
- is undesirable.
+ is undesirable.
On the Alpha, we want to not use the byte operation and instead use
masking operations to access fields; these will save instructions. */
extern void arm_encode_call_attribute (tree, int);
#endif
#ifdef RTX_CODE
+extern bool arm_vector_mode_supported_p (enum machine_mode);
extern int arm_hard_regno_mode_ok (unsigned int, enum machine_mode);
extern int const_ok_for_arm (HOST_WIDE_INT);
extern int arm_split_constant (RTX_CODE, enum machine_mode, rtx,
extern rtx arm_function_value(tree, tree);
#endif
-#if defined AOF_ASSEMBLER
+#if defined AOF_ASSEMBLER
extern rtx aof_pic_entry (rtx);
extern char *aof_text_section (void);
extern char *aof_data_section (void);
along with GCC; see the file COPYING. If not, write to
the Free Software Foundation, 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
-
+
#include "config.h"
#include "system.h"
#include "coretypes.h"
#undef TARGET_ADDRESS_COST
#define TARGET_ADDRESS_COST arm_address_cost
+#undef TARGET_VECTOR_MODE_SUPPORTED_P
+#define TARGET_VECTOR_MODE_SUPPORTED_P arm_vector_mode_supported_p
+
#undef TARGET_MACHINE_DEPENDENT_REORG
#define TARGET_MACHINE_DEPENDENT_REORG arm_reorg
int thumb_code = 0;
/* Nonzero if we should define __THUMB_INTERWORK__ in the
- preprocessor.
+ preprocessor.
XXX This is a bit of a hack, it's intended to help work around
problems in GLD which doesn't understand that armv5t code is
interworking clean. */
/* ARM Architectures */
/* We don't specify rtx_costs here as it will be figured out
from the core. */
-
+
{"armv2", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
{"armv2a", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
{"armv3", arm6, "3", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3, NULL},
struct arm_cpu_select arm_select[] =
{
- /* string name processors */
+ /* string name processors */
{ NULL, "-mcpu=", all_cores },
{ NULL, "-march=", all_architectures },
{ NULL, "-mtune=", all_cores }
bit_count (unsigned long value)
{
unsigned long count = 0;
-
+
while (value)
{
count++;
set_optab_libfunc (smul_optab, SFmode, "__aeabi_fmul");
set_optab_libfunc (neg_optab, SFmode, "__aeabi_fneg");
set_optab_libfunc (sub_optab, SFmode, "__aeabi_fsub");
-
+
/* Single-precision comparisions. Table 5. */
set_optab_libfunc (eq_optab, SFmode, "__aeabi_fcmpeq");
set_optab_libfunc (ne_optab, SFmode, NULL);
for (i = ARRAY_SIZE (arm_select); i--;)
{
struct arm_cpu_select * ptr = arm_select + i;
-
+
if (ptr->string != NULL && ptr->string[0] != '\0')
{
const struct processors * sel;
if (insn_flags != 0 && (insn_flags ^ sel->flags))
warning ("switch -mcpu=%s conflicts with -march= switch",
ptr->string);
-
+
insn_flags = sel->flags;
}
-
+
break;
}
error ("bad value (%s) for %s switch", ptr->string, ptr->name);
}
}
-
+
/* If the user did not specify a processor, choose one for them. */
if (insn_flags == 0)
{
/* Now check to see if the user has specified some command line
switch that require certain abilities from the cpu. */
sought = 0;
-
+
if (TARGET_INTERWORK || TARGET_THUMB)
{
sought |= (FL_THUMB | FL_MODE32);
-
+
/* There are no ARM processors that support both APCS-26 and
interworking. Therefore we force FL_MODE26 to be removed
from insn_flags here (if it was set), so that the search
below will always be able to find a compatible processor. */
insn_flags &= ~FL_MODE26;
}
-
+
if (sought != 0 && ((sought & insn_flags) != sought))
{
/* Try to locate a CPU type that supports all of the abilities
{
unsigned current_bit_count = 0;
const struct processors * best_fit = NULL;
-
+
/* Ideally we would like to issue an error message here
saying that it was not possible to find a CPU compatible
with the default CPU, but which also supports the command
if (arm_tune == arm_none)
arm_tune = (enum processor_type) (sel - all_cores);
}
-
+
/* The processor for which we should tune should now have been
chosen. */
if (arm_tune == arm_none)
abort ();
-
+
tune_flags = all_cores[(int)arm_tune].flags;
if (optimize_size)
targetm.rtx_costs = arm_size_rtx_costs;
warning ("target CPU does not support interworking" );
target_flags &= ~ARM_FLAG_INTERWORK;
}
-
+
if (TARGET_THUMB && !(insn_flags & FL_THUMB))
{
warning ("target CPU does not support THUMB instructions");
warning ("-mapcs-stack-check incompatible with -mno-apcs-frame");
target_flags |= ARM_FLAG_APCS_FRAME;
}
-
+
if (TARGET_POKE_FUNCTION_NAME)
target_flags |= ARM_FLAG_APCS_FRAME;
-
+
if (TARGET_APCS_REENT && flag_pic)
error ("-fpic and -mapcs-reent are incompatible");
-
+
if (TARGET_APCS_REENT)
warning ("APCS reentrant code not supported. Ignored");
-
+
/* If this target is normally configured to use APCS frames, warn if they
are turned off and debugging is turned on. */
if (TARGET_ARM
&& !TARGET_APCS_FRAME
&& (TARGET_DEFAULT & ARM_FLAG_APCS_FRAME))
warning ("-g with -mno-apcs-frame may not give sensible debugging");
-
+
/* If stack checking is disabled, we can use r10 as the PIC register,
which keeps r9 available. */
if (flag_pic)
arm_pic_register = TARGET_APCS_STACK ? 9 : 10;
-
+
if (TARGET_APCS_FLOAT)
warning ("passing floating point arguments in fp regs not yet supported");
-
+
/* Initialize boolean versions of the flags, for use in the arm.md file. */
arm_arch3m = (insn_flags & FL_ARCH3M) != 0;
arm_arch4 = (insn_flags & FL_ARCH4) != 0;
/* If soft-float is specified then don't use FPU. */
if (TARGET_SOFT_FLOAT)
arm_fpu_arch = FPUTYPE_NONE;
-
+
/* For arm2/3 there is no need to do any scheduling if there is only
a floating point emulator, or we are doing software floating-point. */
if ((TARGET_SOFT_FLOAT
|| arm_fpu_tune == FPUTYPE_FPA_EMU3)
&& (tune_flags & FL_MODE32) == 0)
flag_schedule_insns = flag_schedule_insns_after_reload = 0;
-
+
/* Override the default structure alignment for AAPCS ABI. */
if (arm_abi == ARM_ABI_AAPCS)
arm_structure_size_boundary = 8;
unsigned long type = ARM_FT_UNKNOWN;
tree a;
tree attr;
-
+
if (TREE_CODE (current_function_decl) != FUNCTION_DECL)
abort ();
&& TREE_NOTHROW (current_function_decl)
&& TREE_THIS_VOLATILE (current_function_decl))
type |= ARM_FT_VOLATILE;
-
+
if (cfun->static_chain_decl != NULL)
type |= ARM_FT_NESTED;
attr = DECL_ATTRIBUTES (current_function_decl);
-
+
a = lookup_attribute ("naked", attr);
if (a != NULL_TREE)
type |= ARM_FT_NAKED;
a = lookup_attribute ("isr", attr);
if (a == NULL_TREE)
a = lookup_attribute ("interrupt", attr);
-
+
if (a == NULL_TREE)
type |= TARGET_INTERWORK ? ARM_FT_INTERWORKED : ARM_FT_NORMAL;
else
type |= arm_isr_value (TREE_VALUE (a));
-
+
return type;
}
return cfun->machine->func_type;
}
\f
-/* Return 1 if it is possible to return using a single instruction.
+/* Return 1 if it is possible to return using a single instruction.
If SIBLING is non-null, this is a test for a return before a sibling
call. SIBLING is the call insn, so we can examine its register usage. */
pointer won't be correctly restored if the instruction takes a
page fault. We work around this problem by popping r3 along with
the other registers, since that is never slower than executing
- another instruction.
+ another instruction.
We test for !arm_arch5 here, because code for any architecture
less than this could potentially be run on one of the buggy
taken and multiple registers have been stacked. */
if (iscond && arm_is_strong)
{
- /* Conditional return when just the LR is stored is a simple
+ /* Conditional return when just the LR is stored is a simple
conditional-load instruction, that's not expensive. */
if (saved_int_regs != 0 && saved_int_regs != (1 << LR_REGNUM))
return 0;
{
unsigned HOST_WIDE_INT mask = ~(unsigned HOST_WIDE_INT)0xFF;
- /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
+ /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
be all zero, or all one. */
if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
&& ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
!= ((~(unsigned HOST_WIDE_INT) 0)
& ~(unsigned HOST_WIDE_INT) 0xffffffff)))
return FALSE;
-
+
/* Fast return for 0 and powers of 2 */
if ((i & (i - 1)) == 0)
return TRUE;
*/
if (!after_arm_reorg
&& !cond
- && (arm_gen_constant (code, mode, NULL_RTX, val, target, source,
+ && (arm_gen_constant (code, mode, NULL_RTX, val, target, source,
1, 0)
> arm_constant_limit + (code != SET)))
{
}
}
- return arm_gen_constant (code, mode, cond, val, target, source, subtargets,
+ return arm_gen_constant (code, mode, cond, val, target, source, subtargets,
1);
}
do
{
int end;
-
+
if (i <= 0)
i += 32;
if (remainder & (3 << (i - 2)))
{
if (generate)
emit_constant_insn (cond,
- gen_rtx_SET (VOIDmode, target,
+ gen_rtx_SET (VOIDmode, target,
gen_rtx_MINUS (mode, GEN_INT (val),
source)));
return 1;
if (generate)
emit_constant_insn (cond,
gen_rtx_SET (VOIDmode, target,
- (source
+ (source
? gen_rtx_fmt_ee (code, mode, source,
GEN_INT (val))
: GEN_INT (val))));
if (set_sign_bit_copies > 1)
{
if (const_ok_for_arm
- (temp1 = ARM_SIGN_EXTEND (remainder
+ (temp1 = ARM_SIGN_EXTEND (remainder
<< (set_sign_bit_copies - 1))))
{
if (generate)
{
rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
emit_constant_insn (cond,
- gen_rtx_SET (VOIDmode, new_src,
+ gen_rtx_SET (VOIDmode, new_src,
GEN_INT (temp1)));
emit_constant_insn (cond,
- gen_ashrsi3 (target, new_src,
+ gen_ashrsi3 (target, new_src,
GEN_INT (set_sign_bit_copies - 1)));
}
return 2;
gen_rtx_SET (VOIDmode, new_src,
GEN_INT (temp1)));
emit_constant_insn (cond,
- gen_ashrsi3 (target, new_src,
+ gen_ashrsi3 (target, new_src,
GEN_INT (set_sign_bit_copies - 1)));
}
return 2;
source, subtargets, generate);
source = new_src;
if (generate)
- emit_constant_insn
+ emit_constant_insn
(cond,
gen_rtx_SET
(VOIDmode, target,
rtx sub = subtargets ? gen_reg_rtx (mode) : target;
emit_constant_insn (cond,
- gen_rtx_SET (VOIDmode, sub,
+ gen_rtx_SET (VOIDmode, sub,
GEN_INT (val)));
emit_constant_insn (cond,
- gen_rtx_SET (VOIDmode, target,
+ gen_rtx_SET (VOIDmode, target,
gen_rtx_fmt_ee (code, mode,
source, sub)));
}
rtx sub = subtargets ? gen_reg_rtx (mode) : target;
rtx shift = GEN_INT (set_sign_bit_copies);
- emit_constant_insn
+ emit_constant_insn
(cond,
gen_rtx_SET (VOIDmode, sub,
- gen_rtx_NOT (mode,
+ gen_rtx_NOT (mode,
gen_rtx_ASHIFT (mode,
- source,
+ source,
shift))));
- emit_constant_insn
+ emit_constant_insn
(cond,
gen_rtx_SET (VOIDmode, target,
gen_rtx_NOT (mode,
gen_rtx_LSHIFTRT (mode,
source,
shift))));
- emit_constant_insn
+ emit_constant_insn
(cond,
gen_rtx_SET (VOIDmode, target,
gen_rtx_NOT (mode,
sub = gen_reg_rtx (mode);
emit_constant_insn (cond,
gen_rtx_SET (VOIDmode, sub,
- gen_rtx_AND (mode, source,
+ gen_rtx_AND (mode, source,
GEN_INT (temp1))));
emit_constant_insn (cond,
gen_rtx_SET (VOIDmode, target,
if (generate)
{
rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
- insns = arm_gen_constant (AND, mode, cond,
+ insns = arm_gen_constant (AND, mode, cond,
remainder | shift_mask,
new_src, source, subtargets, 1);
source = new_src;
if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
{
HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
-
+
if ((remainder | shift_mask) != 0xffffffff)
{
if (generate)
rather than having to synthesize both large constants from scratch.
Therefore, we calculate how many insns would be required to emit
- the constant starting from `best_start', and also starting from
- zero (ie with bit 31 first to be output). If `best_start' doesn't
+ the constant starting from `best_start', and also starting from
+ zero (ie with bit 31 first to be output). If `best_start' doesn't
yield a shorter sequence, we may as well use zero. */
if (best_start != 0
&& ((((unsigned HOST_WIDE_INT) 1) << best_start) < remainder)
- && (count_insns_for_constant (remainder, 0) <=
+ && (count_insns_for_constant (remainder, 0) <=
count_insns_for_constant (remainder, best_start)))
best_start = 0;
temp1_rtx = gen_rtx_fmt_ee (code, mode, source, temp1_rtx);
emit_constant_insn (cond,
- gen_rtx_SET (VOIDmode, new_src,
+ gen_rtx_SET (VOIDmode, new_src,
temp1_rtx));
source = new_src;
}
int unsignedp ATTRIBUTE_UNUSED;
rtx r ATTRIBUTE_UNUSED;
-
+
mode = TYPE_MODE (type);
/* Promote integer types. */
if (INTEGRAL_TYPE_P (type))
larger than a word (or are variable size). */
return (size < 0 || size > UNITS_PER_WORD);
}
-
+
/* For the arm-wince targets we choose to be compatible with Microsoft's
ARM and Thumb compilers, which always return aggregates in memory. */
#ifndef ARM_WINCE
we will want to return it via memory and not in a register. */
if (size < 0 || size > UNITS_PER_WORD)
return 1;
-
+
if (TREE_CODE (type) == RECORD_TYPE)
{
tree field;
has an offset of zero. For practical purposes this means
that the structure can have at most one non bit-field element
and that this element must be the first one in the structure. */
-
+
/* Find the first field, ignoring non FIELD_DECL things which will
have been created by C++. */
for (field = TYPE_FIELDS (type);
field && TREE_CODE (field) != FIELD_DECL;
field = TREE_CHAIN (field))
continue;
-
+
if (field == NULL)
return 0; /* An empty structure. Allowed by an extension to ANSI C. */
{
if (TREE_CODE (field) != FIELD_DECL)
continue;
-
+
if (!DECL_BIT_FIELD_TYPE (field))
return 1;
}
return 0;
}
-
+
if (TREE_CODE (type) == UNION_TYPE)
{
tree field;
if (FLOAT_TYPE_P (TREE_TYPE (field)))
return 1;
-
+
if (RETURN_IN_MEMORY (TREE_TYPE (field)))
return 1;
}
-
+
return 0;
}
-#endif /* not ARM_WINCE */
-
+#endif /* not ARM_WINCE */
+
/* Return all other types in memory. */
return 1;
}
for a call to a function whose data type is FNTYPE.
For a library call, FNTYPE is NULL. */
void
-arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype,
+arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype,
rtx libname ATTRIBUTE_UNUSED,
tree fndecl ATTRIBUTE_UNUSED)
{
pcum->nregs = ((fntype && aggregate_value_p (TREE_TYPE (fntype), fntype)) ? 1 : 0);
pcum->iwmmxt_nregs = 0;
pcum->can_split = true;
-
+
pcum->call_cookie = CALL_NORMAL;
if (TARGET_LONG_CALLS)
pcum->call_cookie = CALL_LONG;
-
+
/* Check for long call/short call attributes. The attributes
override any command line option. */
if (fntype)
/* Varargs vectors are treated the same as long long.
named_count avoids having to change the way arm handles 'named' */
if (TARGET_IWMMXT_ABI
- && VECTOR_MODE_SUPPORTED_P (mode)
+ && arm_vector_mode_supported_p (mode)
&& pcum->named_count > pcum->nargs + 1)
{
if (pcum->iwmmxt_nregs <= 9)
if (!named || pcum->nregs + nregs > NUM_ARG_REGS)
return NULL_RTX;
-
+
return gen_rtx_REG (mode, pcum->nregs);
}
/* Whereas these functions are always known to reside within the 26 bit
addressing range. */
{ "short_call", 0, 0, false, true, true, NULL },
- /* Interrupt Service Routines have special prologue and epilogue requirements. */
+ /* Interrupt Service Routines have special prologue and epilogue requirements. */
{ "isr", 0, 1, false, false, false, arm_handle_isr_attribute },
{ "interrupt", 0, 1, false, false, false, arm_handle_isr_attribute },
{ "naked", 0, 0, true, false, false, arm_handle_fndecl_attribute },
arm_comp_type_attributes (tree type1, tree type2)
{
int l1, l2, s1, s2;
-
+
/* Check for mismatch of non-default calling convention. */
if (TREE_CODE (type1) != FUNCTION_TYPE)
return 1;
if ((l1 & s2) || (l2 & s1))
return 0;
}
-
+
/* Check for mismatched ISR attribute. */
l1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
if (! l1)
or 3. the target function has __attribute__ ((section))
However we do not generate a long call if the function:
-
+
d. has an __attribute__ ((short_call))
or e. is inside the scope of a #pragma no_long_calls
or f. is defined within the current compilation unit.
-
+
This function will be called by C fragments contained in the machine
description file. SYM_REF and CALL_COOKIE correspond to the matched
rtl operands. CALL_SYMBOL is used to distinguish between
if (current_file_function_operand (sym_ref))
return 0;
-
+
return (call_cookie & CALL_LONG)
|| ENCODED_LONG_CALL_ATTR_P (XSTR (sym_ref, 0))
|| TARGET_LONG_CALLS;
return false;
/* If we are interworking and the function is not declared static
- then we can't tail-call it unless we know that it exists in this
+ then we can't tail-call it unless we know that it exists in this
compilation unit (since it might be a Thumb routine). */
if (TARGET_INTERWORK && TREE_PUBLIC (decl) && !TREE_ASM_WRITTEN (decl))
return false;
emit_insn (gen_pic_load_addr_thumb (address, orig));
if ((GET_CODE (orig) == LABEL_REF
- || (GET_CODE (orig) == SYMBOL_REF &&
+ || (GET_CODE (orig) == SYMBOL_REF &&
SYMBOL_REF_LOCAL_P (orig)))
&& NEED_GOT_RELOC)
pic_ref = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, address);
pic_tmp2 = gen_rtx_CONST (VOIDmode, global_offset_table);
pic_rtx = gen_rtx_CONST (Pmode, gen_rtx_MINUS (Pmode, pic_tmp2, pic_tmp));
-
+
if (TARGET_ARM)
{
emit_insn (gen_pic_load_addr_arm (pic_offset_table_rtx, pic_rtx));
{
bool use_ldrd;
enum rtx_code code = GET_CODE (x);
-
+
if (arm_address_register_rtx_p (x, strict_p))
return 1;
}
/* Return nonzero if x is a legitimate Thumb-state address.
-
+
The AP may be eliminated to either the SP or the FP, so we use the
least common denominator, e.g. SImode, and offsets from 0 to 64.
case ASHIFT:
case ASHIFTRT:
case LSHIFTRT:
- case ROTATERT:
+ case ROTATERT:
case PLUS:
case MINUS:
case COMPARE:
case NEG:
- case NOT:
+ case NOT:
return COSTS_N_INSNS (1);
-
- case MULT:
- if (GET_CODE (XEXP (x, 1)) == CONST_INT)
- {
- int cycles = 0;
+
+ case MULT:
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT)
+ {
+ int cycles = 0;
unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1));
-
- while (i)
- {
- i >>= 2;
- cycles++;
- }
- return COSTS_N_INSNS (2) + cycles;
+
+ while (i)
+ {
+ i >>= 2;
+ cycles++;
+ }
+ return COSTS_N_INSNS (2) + cycles;
}
return COSTS_N_INSNS (1) + 16;
-
- case SET:
- return (COSTS_N_INSNS (1)
- + 4 * ((GET_CODE (SET_SRC (x)) == MEM)
+
+ case SET:
+ return (COSTS_N_INSNS (1)
+ + 4 * ((GET_CODE (SET_SRC (x)) == MEM)
+ GET_CODE (SET_DEST (x)) == MEM));
-
- case CONST_INT:
- if (outer == SET)
- {
- if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
- return 0;
- if (thumb_shiftable_const (INTVAL (x)))
- return COSTS_N_INSNS (2);
- return COSTS_N_INSNS (3);
- }
+
+ case CONST_INT:
+ if (outer == SET)
+ {
+ if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
+ return 0;
+ if (thumb_shiftable_const (INTVAL (x)))
+ return COSTS_N_INSNS (2);
+ return COSTS_N_INSNS (3);
+ }
else if ((outer == PLUS || outer == COMPARE)
- && INTVAL (x) < 256 && INTVAL (x) > -256)
+ && INTVAL (x) < 256 && INTVAL (x) > -256)
return 0;
else if (outer == AND
&& INTVAL (x) < 256 && INTVAL (x) >= -256)
return COSTS_N_INSNS (1);
- else if (outer == ASHIFT || outer == ASHIFTRT
- || outer == LSHIFTRT)
- return 0;
+ else if (outer == ASHIFT || outer == ASHIFTRT
+ || outer == LSHIFTRT)
+ return 0;
return COSTS_N_INSNS (2);
-
- case CONST:
- case CONST_DOUBLE:
- case LABEL_REF:
- case SYMBOL_REF:
+
+ case CONST:
+ case CONST_DOUBLE:
+ case LABEL_REF:
+ case SYMBOL_REF:
return COSTS_N_INSNS (3);
-
+
case UDIV:
case UMOD:
case DIV:
case AND:
case XOR:
- case IOR:
+ case IOR:
/* XXX guess. */
return 8;
case QImode:
return (1 + (mode == DImode ? 4 : 0)
+ (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
-
+
case HImode:
return (4 + (mode == DImode ? 4 : 0)
+ (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
-
+
case SImode:
return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
-
+
default:
return 99;
}
-
+
default:
return 99;
}
case ASHIFT: case LSHIFTRT: case ASHIFTRT:
if (mode == DImode)
return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8)
- + ((GET_CODE (XEXP (x, 0)) == REG
+ + ((GET_CODE (XEXP (x, 0)) == REG
|| (GET_CODE (XEXP (x, 0)) == SUBREG
&& GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
? 0 : 8));
return 1;
/* Fall through */
- case PLUS:
+ case PLUS:
if (GET_MODE_CLASS (mode) == MODE_FLOAT)
return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
+ ((REG_OR_SUBREG_REG (XEXP (x, 1))
? 0 : 8));
/* Fall through */
- case AND: case XOR: case IOR:
+ case AND: case XOR: case IOR:
extra_cost = 0;
/* Normally the frame registers will be spilt into reg+const during
}
abort ();
- case CONST_INT:
- if (const_ok_for_arm (INTVAL (x)))
- return outer == SET ? 2 : -1;
- else if (outer == AND
- && const_ok_for_arm (~INTVAL (x)))
- return -1;
- else if ((outer == COMPARE
- || outer == PLUS || outer == MINUS)
- && const_ok_for_arm (-INTVAL (x)))
- return -1;
- else
+ case CONST_INT:
+ if (const_ok_for_arm (INTVAL (x)))
+ return outer == SET ? 2 : -1;
+ else if (outer == AND
+ && const_ok_for_arm (~INTVAL (x)))
+ return -1;
+ else if ((outer == COMPARE
+ || outer == PLUS || outer == MINUS)
+ && const_ok_for_arm (-INTVAL (x)))
+ return -1;
+ else
return 5;
-
- case CONST:
- case LABEL_REF:
- case SYMBOL_REF:
+
+ case CONST:
+ case LABEL_REF:
+ case SYMBOL_REF:
return 6;
-
- case CONST_DOUBLE:
+
+ case CONST_DOUBLE:
if (arm_const_double_rtx (x))
- return outer == SET ? 2 : -1;
- else if ((outer == COMPARE || outer == PLUS)
- && neg_const_double_rtx_ok_for_fpa (x))
- return -1;
+ return outer == SET ? 2 : -1;
+ else if ((outer == COMPARE || outer == PLUS)
+ && neg_const_double_rtx_ok_for_fpa (x))
+ return -1;
return 7;
-
+
default:
return 99;
}
switch (code)
{
case MEM:
- /* A memory access costs 1 insn if the mode is small, or the address is
+ /* A memory access costs 1 insn if the mode is small, or the address is
a single register, otherwise it costs one insn per word. */
if (REG_P (XEXP (x, 0)))
*total = COSTS_N_INSNS (1);
*total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
return false;
- case PLUS:
+ case PLUS:
if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
{
*total = COSTS_N_INSNS (1);
case HImode:
*total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
-
+
case SImode:
break;
return false;
- case CONST_INT:
- if (const_ok_for_arm (INTVAL (x)))
+ case CONST_INT:
+ if (const_ok_for_arm (INTVAL (x)))
*total = COSTS_N_INSNS (outer_code == SET ? 1 : 0);
else if (const_ok_for_arm (~INTVAL (x)))
*total = COSTS_N_INSNS (outer_code == AND ? 0 : 1);
else
*total = COSTS_N_INSNS (2);
return true;
-
- case CONST:
- case LABEL_REF:
- case SYMBOL_REF:
+
+ case CONST:
+ case LABEL_REF:
+ case SYMBOL_REF:
*total = COSTS_N_INSNS (2);
return true;
-
+
case CONST_DOUBLE:
*total = COSTS_N_INSNS (4);
return true;
*total = thumb_rtx_costs (x, code, outer_code);
return true;
}
-
+
switch (code)
{
case MULT:
int cost, const_ok = const_ok_for_arm (i);
int j, booth_unit_size;
- /* Tune as appropriate. */
+ /* Tune as appropriate. */
cost = const_ok ? 4 : 8;
booth_unit_size = 2;
for (j = 0; i && j < 32; j += booth_unit_size)
*total = 30 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
+ (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
return true;
-
+
default:
*total = arm_rtx_costs_1 (x, code, outer_code);
return true;
*total = thumb_rtx_costs (x, code, outer_code);
return true;
}
-
+
switch (code)
{
case MULT:
*total = 8;
return true;
}
-
+
if (GET_MODE_CLASS (mode) == MODE_FLOAT
|| mode == DImode)
int cost, const_ok = const_ok_for_arm (i);
int j, booth_unit_size;
- /* Tune as appropriate. */
+ /* Tune as appropriate. */
cost = const_ok ? 4 : 8;
booth_unit_size = 8;
for (j = 0; i && j < 32; j += booth_unit_size)
*total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
+ (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
return true;
-
+
default:
*total = arm_rtx_costs_1 (x, code, outer_code);
return true;
*total = thumb_rtx_costs (x, code, outer_code);
return true;
}
-
+
switch (code)
{
case MULT:
*total = 8;
return true;
}
-
+
if (GET_MODE_CLASS (mode) == MODE_FLOAT
|| mode == DImode)
*total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
+ (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
return true;
-
+
default:
*total = arm_rtx_costs_1 (x, code, outer_code);
return true;
enum machine_mode mode = GET_MODE (x);
int nonreg_cost;
int cost;
-
+
if (TARGET_THUMB)
{
switch (code)
case MULT:
*total = COSTS_N_INSNS (3);
return true;
-
+
default:
*total = thumb_rtx_costs (x, code, outer_code);
return true;
}
}
-
+
switch (code)
{
case MULT:
*total = 3;
return true;
}
-
+
if (GET_MODE_CLASS (mode) == MODE_FLOAT)
{
*total = cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : nonreg_cost)
+ (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : nonreg_cost);
return true;
-
+
default:
*total = arm_rtx_costs_1 (x, code, outer_code);
return true;
{
rtx shifted_operand;
int opno;
-
+
/* Get the shifted operand. */
extract_insn (insn);
shifted_operand = recog_data.operand[shift_opnum];
rtx src_mem = XEXP (SET_SRC (i_pat), 0);
/* This is a load after a store, there is no conflict if the load reads
from a cached area. Assume that loads from the stack, and from the
- constant pool are cached, and that others will miss. This is a
+ constant pool are cached, and that others will miss. This is a
hack. */
-
+
if ((GET_CODE (src_mem) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (src_mem))
|| reg_mentioned_p (stack_pointer_rtx, src_mem)
|| reg_mentioned_p (frame_pointer_rtx, src_mem)
{
REAL_VALUE_TYPE r;
int i;
-
+
if (!fp_consts_inited)
init_fp_table ();
-
+
REAL_VALUE_FROM_CONST_DOUBLE (r, x);
if (REAL_VALUE_MINUS_ZERO (r))
return 0;
{
REAL_VALUE_TYPE r;
int i;
-
+
if (!fp_consts_inited)
init_fp_table ();
-
+
REAL_VALUE_FROM_CONST_DOUBLE (r, x);
r = REAL_VALUE_NEGATE (r);
if (REAL_VALUE_MINUS_ZERO (r))
return (GET_CODE (op) == REG
&& REGNO (op) < FIRST_PSEUDO_REGISTER);
}
-
+
/* An arm register operand. */
int
arm_general_register_operand (rtx op, enum machine_mode mode)
{
if (TARGET_THUMB)
return thumb_cmp_operand (op, mode);
-
+
return (s_register_operand (op, mode)
|| (GET_CODE (op) == CONST_INT
&& (const_ok_for_arm (INTVAL (op))
&& REGNO_POINTER_ALIGN (REGNO (reg)) >= 32);
}
-/* Similar to s_register_operand, but does not allow hard integer
+/* Similar to s_register_operand, but does not allow hard integer
registers. */
int
f_register_operand (rtx op, enum machine_mode mode)
if (GET_CODE (op) == SUBREG && CONSTANT_P (SUBREG_REG (op)))
return FALSE;
-
+
if (GET_CODE (op) == SUBREG)
op = SUBREG_REG (op);
-
+
switch (GET_CODE (op))
{
case CONST_DOUBLE:
if (mode == VOIDmode)
{
mode = GET_MODE (x);
-
+
if (GET_MODE_CLASS (mode) != MODE_CC)
return FALSE;
}
if (mode == VOIDmode)
{
mode = GET_MODE (x);
-
+
if (GET_MODE_CLASS (mode) != MODE_CC)
return FALSE;
}
return 1;
fmt = GET_RTX_FORMAT (GET_CODE (x));
-
+
for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
{
if (fmt[i] == 'E')
{
int val0 = 0, val1 = 0;
int reg0, reg1;
-
+
if (GET_CODE (XEXP (a, 0)) == PLUS)
{
reg0 = REGNO (XEXP (XEXP (a, 0), 0));
arith_adjacentmem pattern to output an overlong sequence. */
if (!const_ok_for_op (PLUS, val0) || !const_ok_for_op (PLUS, val1))
return 0;
-
+
return (reg0 == reg1) && ((val1 - val0) == 4 || (val0 - val1) == 4);
}
return 0;
: REGNO (SUBREG_REG (operands[i])));
order[0] = 0;
}
- else
+ else
{
if (base_reg != (int) REGNO (reg))
/* Not addressed from the same base register. */
scratch register (one of the result regs) and then doing a load
multiple actually becomes slower (and no smaller in code size).
That is the transformation
-
+
ldr rd1, [rbase + offset]
ldr rd2, [rbase + offset + 4]
-
+
to
-
+
add rd1, rbase, offset
ldmia rd1, {rd1, rd2}
-
+
produces worse code -- '3 cycles + any stalls on rd2' instead of
'2 cycles + any stalls on rd2'. On ARMs with only one cache
access per cycle, the first sequence could never complete in less
/* Can't do it without setting up the offset, only do this if it takes
no more than one insn. */
- return (const_ok_for_arm (unsorted_offsets[order[0]])
+ return (const_ok_for_arm (unsorted_offsets[order[0]])
|| const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0;
}
abort ();
}
- sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
+ sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
for (i = 1; i < nops; i++)
: REGNO (SUBREG_REG (operands[i])));
order[0] = 0;
}
- else
+ else
{
if (base_reg != (int) REGNO (reg))
/* Not addressed from the same base register. */
abort ();
}
- sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
+ sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
for (i = 1; i < nops; i++)
if (arm_tune_xscale && count <= 2 && ! optimize_size)
{
rtx seq;
-
+
start_sequence ();
-
+
for (i = 0; i < count; i++)
{
addr = plus_constant (from, i * 4 * sign);
seq = get_insns ();
end_sequence ();
-
+
return seq;
}
if (arm_tune_xscale && count <= 2 && ! optimize_size)
{
rtx seq;
-
+
start_sequence ();
-
+
for (i = 0; i < count; i++)
{
addr = plus_constant (to, i * 4 * sign);
seq = get_insns ();
end_sequence ();
-
+
return seq;
}
emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE,
srcbase, &srcoffset));
else
- emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
+ emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
FALSE, srcbase, &srcoffset));
if (out_words_to_go)
dstbase, &dstoffset));
else if (out_words_to_go != 1)
emit_insn (arm_gen_store_multiple (0, out_words_to_go,
- dst, TRUE,
+ dst, TRUE,
(last_bytes == 0
? FALSE : TRUE),
dstbase, &dstoffset));
if (out_words_to_go)
{
rtx sreg;
-
+
mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
sreg = copy_to_reg (mem);
mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
emit_move_insn (mem, sreg);
in_words_to_go--;
-
+
if (in_words_to_go) /* Sanity check */
abort ();
}
emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
GEN_INT (8 * (4 - last_bytes))));
part_bytes_reg = tmp;
-
+
while (last_bytes)
{
mem = adjust_automodify_address (dstbase, QImode,
part_bytes_reg = tmp;
}
}
-
+
}
else
{
dstoffset += 2;
}
}
-
+
if (last_bytes)
{
mem = adjust_automodify_address (dstbase, QImode, dst, dstoffset);
/* Select a dominance comparison mode if possible for a test of the general
form (OP (COND_OR (X) (Y)) (const_int 0)). We support three forms.
- COND_OR == DOM_CC_X_AND_Y => (X && Y)
+ COND_OR == DOM_CC_X_AND_Y => (X && Y)
COND_OR == DOM_CC_NX_OR_Y => ((! X) || Y)
- COND_OR == DOM_CC_X_OR_Y => (X || Y)
+ COND_OR == DOM_CC_X_OR_Y => (X || Y)
In all cases OP will be either EQ or NE, but we don't need to know which
- here. If we are unable to support a dominance comparison we return
+ here. If we are unable to support a dominance comparison we return
CC mode. This will then fail to match for the RTL expressions that
generate this call. */
enum machine_mode
/* If the comparisons are not equal, and one doesn't dominate the other,
then we can't do this. */
- if (cond1 != cond2
+ if (cond1 != cond2
&& !comparison_dominates_p (cond1, cond2)
&& (swapped = 1, !comparison_dominates_p (cond2, cond1)))
return CCmode;
if (cond2 == NE)
return CC_DNEmode;
break;
-
+
case LTU:
if (cond2 == LTU || cond_or == DOM_CC_X_AND_Y)
return CC_DLTUmode;
abort ();
}
}
-
+
/* A compare with a shifted operand. Because of canonicalization, the
comparison will have to be swapped when we emit the assembler. */
if (GET_MODE (y) == SImode && GET_CODE (y) == REG
|| GET_CODE (x) == ROTATERT))
return CC_SWPmode;
- /* This is a special case that is used by combine to allow a
+ /* This is a special case that is used by combine to allow a
comparison of a shifted byte load to be split into a zero-extend
followed by a comparison of the shifted integer (only valid for
equalities and unsigned inequalities). */
|| XEXP (x, 2) == const1_rtx)
&& COMPARISON_P (XEXP (x, 0))
&& COMPARISON_P (XEXP (x, 1)))
- return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
+ return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
INTVAL (XEXP (x, 2)));
/* Alternate canonicalizations of the above. These are somewhat cleaner. */
plus_constant (base,
offset))));
emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode, operands[0], 0),
- gen_rtx_MEM (QImode,
+ gen_rtx_MEM (QImode,
plus_constant (base,
offset + 1))));
if (!BYTES_BIG_ENDIAN)
emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
- gen_rtx_IOR (SImode,
+ gen_rtx_IOR (SImode,
gen_rtx_ASHIFT
(SImode,
gen_rtx_SUBREG (SImode, operands[0], 0),
scratch)));
else
emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
- gen_rtx_IOR (SImode,
+ gen_rtx_IOR (SImode,
gen_rtx_ASHIFT (SImode, scratch,
GEN_INT (8)),
gen_rtx_SUBREG (SImode, operands[0],
if (BYTES_BIG_ENDIAN)
{
- emit_insn (gen_movqi (gen_rtx_MEM (QImode,
+ emit_insn (gen_movqi (gen_rtx_MEM (QImode,
plus_constant (base, offset + 1)),
gen_lowpart (QImode, outval)));
emit_insn (gen_lshrsi3 (scratch,
is_jump_table (rtx insn)
{
rtx table;
-
+
if (GET_CODE (insn) == JUMP_INSN
&& JUMP_LABEL (insn) != NULL
&& ((table = next_real_insn (JUMP_LABEL (insn)))
mp->next = max_mp;
mp->prev = max_mp->prev;
max_mp->prev = mp;
-
+
if (mp->prev != NULL)
mp->prev->next = mp;
else
Mnode * max_mp = NULL;
HOST_WIDE_INT max_address = fix->address + fix->forwards;
Mnode * mp;
-
+
/* If this fix's address is greater than the address of the first
entry, then we can't put the fix in this pool. We subtract the
size of the current fix to ensure that if the table is fully
}
return min_mp;
-}
+}
/* Add a constant to the minipool for a backward reference. Returns the
- node added or NULL if the constant will not fit in this pool.
+ node added or NULL if the constant will not fit in this pool.
Note that the code for insertion for a backwards reference can be
somewhat confusing because the calculated offsets for each fix do
&& rtx_equal_p (fix->value, mp->value)
/* Check that there is enough slack to move this entry to the
end of the table (this is conservative). */
- && (mp->max_address
- > (minipool_barrier->address
+ && (mp->max_address
+ > (minipool_barrier->address
+ minipool_vector_tail->offset
+ minipool_vector_tail->fix_size)))
{
mp->next = min_mp->next;
mp->prev = min_mp;
min_mp->next = mp;
-
+
if (mp->next != NULL)
mp->next->prev = mp;
else
for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
{
mp->offset = offset;
-
+
if (mp->refcount > 0)
offset += mp->fix_size;
}
{
if (dump_file)
{
- fprintf (dump_file,
+ fprintf (dump_file,
";; Offset %u, min %ld, max %ld ",
(unsigned) mp->offset, (unsigned long) mp->min_address,
(unsigned long) mp->max_address);
case CODE_LABEL:
/* It will always be better to place the table before the label, rather
than after it. */
- return 50;
+ return 50;
case INSN:
case CALL_INSN:
}
new_cost = arm_barrier_cost (from);
-
+
if (count < max_count && new_cost <= selected_cost)
{
selected = from;
fprintf (dump_file,
";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
GET_MODE_NAME (mode),
- INSN_UID (insn), (unsigned long) address,
+ INSN_UID (insn), (unsigned long) address,
-1 * (long)fix->backwards, (long)fix->forwards);
arm_print_value (dump_file, fix->value);
fprintf (dump_file, "\n");
/* Add it to the chain of fixes. */
fix->next = NULL;
-
+
if (minipool_fix_head != NULL)
minipool_fix_tail->next = fix;
else
/* Casting the address of something to a mode narrower
than a word can cause avoid_constant_pool_reference()
to return the pool reference itself. That's no good to
- us here. Lets just hope that we can use the
+ us here. Lets just hope that we can use the
constant pool value directly. */
if (op == cop)
cop = get_pool_constant (XEXP (op, 0));
}
fix = minipool_fix_head;
-
+
/* Now scan the fixups and perform the required changes. */
while (fix)
{
the next mini-pool. */
if (last_barrier != NULL)
{
- /* Reduce the refcount for those fixes that won't go into this
+ /* Reduce the refcount for those fixes that won't go into this
pool after all. */
for (fdel = last_barrier->next;
fdel && fdel != ftmp;
if (GET_CODE (this_fix->insn) != BARRIER)
{
rtx addr
- = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
+ = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
minipool_vector_label),
this_fix->minipool->offset);
*this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
{
REAL_VALUE_TYPE r;
int i;
-
+
if (!fp_consts_inited)
init_fp_table ();
-
+
REAL_VALUE_FROM_CONST_DOUBLE (r, x);
for (i = 0; i < 8; i++)
if (REAL_VALUES_EQUAL (r, values_fp[i]))
fputc ('\t', stream);
asm_fprintf (stream, instr, reg);
fputs (", {", stream);
-
+
for (i = 0; i <= LAST_ARM_REGNUM; i++)
if (mask & (1 << i))
{
if (not_first)
fprintf (stream, ", ");
-
+
asm_fprintf (stream, "%r", i);
not_first = TRUE;
}
operands[0] = gen_rtx_REG (SImode, IP_REGNUM);
output_asm_insn ("mov%?\t%0, %|lr", operands);
}
-
+
output_asm_insn ("mov%?\t%|lr, %|pc", operands);
-
+
if (TARGET_INTERWORK || arm_arch4t)
output_asm_insn ("bx%?\t%0", operands);
else
output_asm_insn ("mov%?\t%|pc, %0", operands);
-
+
return "";
}
ops[0] = gen_rtx_REG (SImode, arm_reg0);
ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
-
+
output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops);
output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands);
-
+
return "";
}
if (arm_reg0 == IP_REGNUM)
abort ();
-
+
ops[0] = gen_rtx_REG (SImode, arm_reg0);
ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops);
int reg0 = REGNO (operands[0]);
otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
-
+
if (code1 == REG)
{
int reg1 = REGNO (operands[1]);
operands[1] = GEN_INT (hint);
break;
-
+
default:
abort ();
}
otherops[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
operands[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
}
-
+
output_mov_immediate (operands);
output_mov_immediate (otherops);
}
avoid a conflict. */
otherops[1] = XEXP (XEXP (operands[1], 0), 1);
otherops[2] = XEXP (XEXP (operands[1], 0), 0);
-
+
}
/* If both registers conflict, it will usually
have been fixed by a splitter. */
}
else
output_asm_insn ("sub%?\t%0, %1, %2", otherops);
-
+
return "ldm%?ia\t%0, %M0";
}
else
}
}
}
-
+
return "";
}
shift. >=32 is not a valid shift for "asl", so we must try and
output a shift that produces the correct arithmetical result.
Using lsr #32 is identical except for the fact that the carry bit
- is not set correctly if we set the flags; but we never use the
+ is not set correctly if we set the flags; but we never use the
carry bit from such an operation, so we can ignore that. */
if (code == ROTATERT)
/* Rotate is just modulo 32. */
/* Shifts of 0 are no-ops. */
if (*amountp == 0)
return NULL;
- }
+ }
return mnem;
}
int len_so_far = 0;
fputs ("\t.ascii\t\"", stream);
-
+
for (i = 0; i < len; i++)
{
int c = p[i];
switch (c)
{
- case TARGET_TAB:
+ case TARGET_TAB:
fputs ("\\t", stream);
- len_so_far += 2;
+ len_so_far += 2;
break;
-
+
case TARGET_FF:
fputs ("\\f", stream);
len_so_far += 2;
break;
-
+
case TARGET_BS:
fputs ("\\b", stream);
len_so_far += 2;
break;
-
+
case TARGET_CR:
fputs ("\\r", stream);
len_so_far += 2;
break;
-
+
case TARGET_NEWLINE:
fputs ("\\n", stream);
c = p [i + 1];
else
len_so_far += 2;
break;
-
+
case '\"':
case '\\':
putc ('\\', stream);
max_reg = 7;
else
max_reg = 12;
-
+
for (reg = 0; reg <= max_reg; reg++)
if (regs_ever_live[reg]
|| (! current_function_is_leaf && call_used_regs [reg]))
/* If we aren't loading the PIC register,
don't stack it even though it may be live. */
if (flag_pic
- && ! TARGET_SINGLE_PIC_BASE
+ && ! TARGET_SINGLE_PIC_BASE
&& regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
}
if (current_function_calls_eh_return)
{
unsigned int i;
-
+
for (i = 0; ; i++)
{
reg = EH_RETURN_DATA_REGNO (i);
if (really_return)
{
rtx ops[2];
-
+
/* Otherwise, trap an attempted return by aborting. */
ops[0] = operand;
- ops[1] = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)"
+ ops[1] = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)"
: "abort");
assemble_external_libcall (ops[1]);
output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
}
-
+
return "";
}
{
const char * return_reg;
- /* If we do not have any special requirements for function exit
- (eg interworking, or ISR) then we can load the return address
+ /* If we do not have any special requirements for function exit
+ (eg interworking, or ISR) then we can load the return address
directly into the PC. Otherwise we must load it into LR. */
if (really_return
&& ! TARGET_INTERWORK)
}
if (reg <= LAST_ARM_REGNUM
&& (reg != LR_REGNUM
- || ! really_return
+ || ! really_return
|| ! IS_INTERRUPT (func_type)))
{
- sprintf (instr, "ldr%s\t%%|%s, [%%|sp], #4", conditional,
+ sprintf (instr, "ldr%s\t%%|%s, [%%|sp], #4", conditional,
(reg == LR_REGNUM) ? return_reg : reg_names[reg]);
}
else
memcpy (p + 2, reg_names[reg], l);
p += l + 2;
}
-
+
if (live_regs_mask & (1 << LR_REGNUM))
{
sprintf (p, "%s%%|%s}", first ? "" : ", ", return_reg);
default:
/* Use bx if it's available. */
if (arm_arch5 || arm_arch4t)
- sprintf (instr, "bx%s\t%%|lr", conditional);
+ sprintf (instr, "bx%s\t%%|lr", conditional);
else
sprintf (instr, "mov%s\t%%|pc, %%|lr", conditional);
break;
length = strlen (name) + 1;
alignlength = ROUND_UP_WORD (length);
-
+
ASM_OUTPUT_ASCII (stream, name, length);
ASM_OUTPUT_ALIGN (stream, 2);
x = GEN_INT ((unsigned HOST_WIDE_INT) 0xff000000 + alignlength);
thumb_output_function_prologue (f, frame_size);
return;
}
-
+
/* Sanity check. */
if (arm_ccfsm_state || arm_target_insn)
abort ();
func_type = arm_current_func_type ();
-
+
switch ((int) ARM_FUNC_TYPE (func_type))
{
default:
asm_fprintf (f, "\t%@ ARM Exception Handler.\n");
break;
}
-
+
if (IS_NAKED (func_type))
asm_fprintf (f, "\t%@ Naked Function: prologue and epilogue provided by programmer.\n");
if (IS_NESTED (func_type))
asm_fprintf (f, "\t%@ Nested: function declared inside another function.\n");
-
+
asm_fprintf (f, "\t%@ args = %d, pretend = %d, frame = %wd\n",
current_function_args_size,
current_function_pretend_args_size, frame_size);
asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, PIC_OFFSET_TABLE_REGNUM);
#endif
- return_used_this_function = 0;
+ return_used_this_function = 0;
}
const char *
int reg;
unsigned long saved_regs_mask;
unsigned long func_type;
- /* Floats_offset is the offset from the "virtual" frame. In an APCS
+ /* Floats_offset is the offset from the "virtual" frame. In an APCS
frame that is $fp + 4 for a non-variadic function. */
int floats_offset = 0;
rtx operands[3];
if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
{
rtx op;
-
+
/* A volatile function should never return. Call abort. */
op = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)" : "abort");
assemble_external_libcall (op);
output_asm_insn ("bl\t%a0", &op);
-
+
return "";
}
/* If we are throwing an exception, then we really must
be doing a return, so we can't tail-call. */
abort ();
-
+
offsets = arm_get_frame_offsets ();
saved_regs_mask = arm_compute_save_reg_mask ();
for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
if (saved_regs_mask & (1 << reg))
floats_offset += 4;
-
+
if (frame_pointer_needed)
{
/* This variable is for the Virtual Frame Pointer, not VFP regs. */
if (regs_ever_live[reg] && !call_used_regs[reg])
{
floats_offset += 12;
- asm_fprintf (f, "\tldfe\t%r, [%r, #-%d]\n",
+ asm_fprintf (f, "\tldfe\t%r, [%r, #-%d]\n",
reg, FP_REGNUM, floats_offset - vfp_offset);
}
}
if (regs_ever_live[reg] && !call_used_regs[reg])
{
floats_offset += 12;
-
+
/* We can't unstack more than four registers at once. */
if (start_reg - reg == 3)
{
We can ignore floats_offset since that was already included in
the live_regs_mask. */
lrm_count += (lrm_count % 2 ? 2 : 1);
-
+
for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
if (regs_ever_live[reg] && !call_used_regs[reg])
{
- asm_fprintf (f, "\twldrd\t%r, [%r, #-%d]\n",
+ asm_fprintf (f, "\twldrd\t%r, [%r, #-%d]\n",
reg, FP_REGNUM, lrm_count * 4);
- lrm_count += 2;
+ lrm_count += 2;
}
}
asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
start_reg, reg - start_reg,
SP_REGNUM);
-
+
start_reg = reg + 1;
}
}
/* Stack adjustment for exception handler. */
if (current_function_calls_eh_return)
- asm_fprintf (f, "\tadd\t%r, %r, %r\n", SP_REGNUM, SP_REGNUM,
+ asm_fprintf (f, "\tadd\t%r, %r, %r\n", SP_REGNUM, SP_REGNUM,
ARM_EH_STACKADJ_REGNUM);
/* Generate the return instruction. */
by the push_multi pattern in the arm.md file. The insn looks
something like this:
- (parallel [
+ (parallel [
(set (mem:BLK (pre_dec:BLK (reg:SI sp)))
(unspec:BLK [(reg:SI r4)] UNSPEC_PUSH_MULT))
(use (reg:SI 11 fp))
stack decrement per instruction. The RTL we generate for the note looks
something like this:
- (sequence [
+ (sequence [
(set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
(set (mem:SI (reg:SI sp)) (reg:SI r4))
(set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI fp))
This sequence is used both by the code to support stack unwinding for
exceptions handlers and the code to generate dwarf2 frame debugging. */
-
+
par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs));
dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (num_dwarf_regs + 1));
dwarf_par_index = 1;
}
par = emit_insn (par);
-
+
tmp = gen_rtx_SET (SImode,
stack_pointer_rtx,
gen_rtx_PLUS (SImode,
GEN_INT (-4 * num_regs)));
RTX_FRAME_RELATED_P (tmp) = 1;
XVECEXP (dwarf, 0, 0) = tmp;
-
+
REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
REG_NOTES (par));
return par;
reg = gen_rtx_REG (XFmode, base_reg++);
XVECEXP (par, 0, 0)
- = gen_rtx_SET (VOIDmode,
+ = gen_rtx_SET (VOIDmode,
gen_rtx_MEM (BLKmode,
gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
gen_rtx_UNSPEC (BLKmode,
gen_rtvec (1, reg),
UNSPEC_PUSH_MULT));
- tmp = gen_rtx_SET (VOIDmode,
+ tmp = gen_rtx_SET (VOIDmode,
gen_rtx_MEM (XFmode, stack_pointer_rtx), reg);
RTX_FRAME_RELATED_P (tmp) = 1;
- XVECEXP (dwarf, 0, 1) = tmp;
-
+ XVECEXP (dwarf, 0, 1) = tmp;
+
for (i = 1; i < count; i++)
{
reg = gen_rtx_REG (XFmode, base_reg++);
XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
- tmp = gen_rtx_SET (VOIDmode,
+ tmp = gen_rtx_SET (VOIDmode,
gen_rtx_MEM (XFmode,
plus_constant (stack_pointer_rtx,
i * 12)),
reg);
RTX_FRAME_RELATED_P (tmp) = 1;
- XVECEXP (dwarf, 0, i + 1) = tmp;
+ XVECEXP (dwarf, 0, i + 1) = tmp;
}
tmp = gen_rtx_SET (VOIDmode,
HOST_WIDE_INT frame_size;
offsets = &cfun->machine->stack_offsets;
-
+
/* We need to know if we are a leaf function. Unfortunately, it
is possible to be called after start_sequence has been called,
which causes get_insns to return the insns for the sequence,
/* Make a copy of c_f_p_a_s as we may need to modify it locally. */
args_to_push = current_function_pretend_args_size;
-
+
/* Compute which register we will have to save onto the stack. */
live_regs_mask = arm_compute_save_reg_mask ();
To get around this need to find somewhere to store IP
whilst the frame is being created. We try the following
places in order:
-
+
1. The last argument register.
2. A slot on the stack above the frame. (This only
works if the function is not a varargs function).
((0xf0 >> (args_to_push / 4)) & 0xf);
else
insn = emit_insn
- (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
GEN_INT (- args_to_push)));
RTX_FRAME_RELATED_P (insn) = 1;
}
else
insn = gen_movsi (ip_rtx, stack_pointer_rtx);
-
+
insn = emit_insn (insn);
RTX_FRAME_RELATED_P (insn) = 1;
}
((0xf0 >> (args_to_push / 4)) & 0xf);
else
insn = emit_insn
- (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
GEN_INT (- args_to_push)));
RTX_FRAME_RELATED_P (insn) = 1;
}
if ((func_type == ARM_FT_ISR || func_type == ARM_FT_FIQ)
&& (live_regs_mask & (1 << LR_REGNUM)) != 0
&& ! frame_pointer_needed)
- emit_insn (gen_rtx_SET (SImode,
+ emit_insn (gen_rtx_SET (SImode,
gen_rtx_REG (SImode, LR_REGNUM),
gen_rtx_PLUS (SImode,
gen_rtx_REG (SImode, LR_REGNUM),
insn = GEN_INT (-(4 + args_to_push + fp_offset));
insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, ip_rtx, insn));
RTX_FRAME_RELATED_P (insn) = 1;
-
+
if (IS_NESTED (func_type))
{
/* Recover the static chain register. */
case '_':
fputs (user_label_prefix, stream);
return;
-
+
case '|':
fputs (REGISTER_PREFIX, stream);
return;
return;
/* An explanation of the 'Q', 'R' and 'H' register operands:
-
+
In a pair of registers containing a DI or DF value the 'Q'
operand returns the register number of the register containing
the least significant part of the value. The 'R' operand returns
the register number of the register containing the most
significant part of the value.
-
+
The 'H' operand returns the higher of the two register numbers.
On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
same as the 'Q' operand, since the most significant part of the
value is held in the lower number register. The reverse is true
on systems where WORDS_BIG_ENDIAN is false.
-
+
The purpose of these operands is to distinguish between cases
where the endian-ness of the values is important (for example
when they are added together), and cases where the endian-ness
return;
case 'm':
- asm_fprintf (stream, "%r",
+ asm_fprintf (stream, "%r",
GET_CODE (XEXP (x, 0)) == REG
? REGNO (XEXP (x, 0)) : REGNO (XEXP (XEXP (x, 0), 0)));
return;
/* CONST_TRUE_RTX means always -- that's the default. */
if (x == const_true_rtx)
return;
-
+
fputs (arm_condition_codes[get_arm_condition_code (x)],
stream);
return;
"wCGR0", "wCGR1", "wCGR2", "wCGR3",
"wC12", "wC13", "wC14", "wC15"
};
-
+
fprintf (stream, wc_reg_names [INTVAL (x)]);
}
return;
return true;
}
- if (VECTOR_MODE_SUPPORTED_P (GET_MODE (x)))
+ if (arm_vector_mode_supported_p (GET_MODE (x)))
{
int i, units;
case GEU: return ARM_CC;
default: abort ();
}
-
+
case CCmode:
switch (comp_code)
{
means that we have to grub around within the jump expression to find
out what the conditions are when the jump isn't taken. */
int jump_clobbers = 0;
-
+
/* If we start with a return insn, we only succeed if we find another one. */
int seeking_return = 0;
-
+
/* START_INSN will hold the insn from where we start looking. This is the
first insn after the following code_label if REVERSE is true. */
rtx start_insn = insn;
if (GET_CODE (insn) != JUMP_INSN)
return;
- /* This jump might be paralleled with a clobber of the condition codes
+ /* This jump might be paralleled with a clobber of the condition codes
the jump should always come first */
if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
body = XVECEXP (body, 0, 0);
int then_not_else = TRUE;
rtx this_insn = start_insn, label = 0;
- /* If the jump cannot be done with one instruction, we cannot
+ /* If the jump cannot be done with one instruction, we cannot
conditionally execute the instruction in the inverse case. */
if (get_attr_conds (insn) == CONDS_JUMP_CLOB)
{
jump_clobbers = 1;
return;
}
-
+
/* Register the insn jumped to. */
if (reverse)
{
case BARRIER:
/* Succeed if the following insn is the target label.
- Otherwise fail.
- If return insns are used then the last insn in a function
+ Otherwise fail.
+ If return insns are used then the last insn in a function
will be a barrier. */
this_insn = next_nonnote_insn (this_insn);
if (this_insn && this_insn == label)
{
if (reverse)
abort ();
- arm_current_cc =
+ arm_current_cc =
get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body),
0), 0), 1));
if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND)
if (reverse || then_not_else)
arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
}
-
+
/* Restore recog_data (getting the attributes of other insns can
destroy this array, but final.c assumes that it remains intact
across this call; since the insn has been recognized already we
{
if (GET_MODE_CLASS (mode) == MODE_CC)
return regno == CC_REGNUM || regno == VFPCC_REGNUM;
-
+
if (TARGET_THUMB)
/* For the Thumb we only allow values bigger than SImode in
registers 0 - 6, so that there is always a second low
|| regno == FRAME_POINTER_REGNUM
|| regno == ARG_POINTER_REGNUM)
return GENERAL_REGS;
-
+
if (regno == CC_REGNUM || regno == VFPCC_REGNUM)
return NO_REGS;
an offset of 0 is correct. */
if (REGNO (addr) == (unsigned) HARD_FRAME_POINTER_REGNUM)
return 0;
-
+
/* If we are using the stack pointer to point at the
argument, then an offset of 0 is correct. */
if ((TARGET_THUMB || !frame_pointer_needed)
&& REGNO (addr) == SP_REGNUM)
return 0;
-
+
/* Oh dear. The argument is pointed to by a register rather
than being held in a register, or being stored at a known
offset from the frame pointer. Since GDB only understands
looking to see where this register gets its value. If the
register is initialized from the frame pointer plus an offset
then we are in luck and we can continue, otherwise we give up.
-
+
This code is exercised by producing debugging information
for a function with arguments like this:
-
+
double func (double a, double b, int c, double d) {return d;}
-
+
Without this code the stab for parameter 'd' will be set to
an offset of 0 from the frame pointer, rather than 8. */
a constant integer
then... */
-
+
for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
{
- if ( GET_CODE (insn) == INSN
+ if ( GET_CODE (insn) == INSN
&& GET_CODE (PATTERN (insn)) == SET
&& REGNO (XEXP (PATTERN (insn), 0)) == REGNO (addr)
&& GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
)
{
value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
-
+
break;
}
}
-
+
if (value == 0)
{
debug_rtx (addr);
#define IWMMXT_BUILTIN2(code, builtin) \
{ FL_IWMMXT, CODE_FOR_##code, NULL, ARM_BUILTIN_##builtin, 0, 0 },
-
+
IWMMXT_BUILTIN2 (iwmmxt_wpackhss, WPACKHSS)
IWMMXT_BUILTIN2 (iwmmxt_wpackwss, WPACKWSS)
IWMMXT_BUILTIN2 (iwmmxt_wpackdss, WPACKDSS)
return 0;
emit_insn (pat);
return target;
-
+
case ARM_BUILTIN_WZERO:
target = gen_reg_rtx (DImode);
emit_insn (gen_iwmmxt_clrdi (target));
for (; block; block = BLOCK_CHAIN (block))
{
tree sym;
-
+
if (!TREE_USED (block))
continue;
SET_DECL_RTL (sym, new);
}
-
+
replace_symbols_in_block (BLOCK_SUBBLOCKS (block), orig, new);
}
}
/* If we have any popping registers left over, remove them. */
if (available > 0)
regs_available_for_popping &= ~available;
-
+
/* Otherwise if we need another popping register we can use
the fourth argument register. */
else if (pops_needed)
/* Register a4 is being used to hold part of the return value,
but we have dire need of a free, low register. */
restore_a4 = TRUE;
-
+
asm_fprintf (f, "\tmov\t%r, %r\n",IP_REGNUM, LAST_ARG_REGNUM);
}
-
+
if (reg_containing_return_addr != LAST_ARG_REGNUM)
{
/* The fourth argument register is available. */
regs_available_for_popping |= 1 << LAST_ARG_REGNUM;
-
+
--pops_needed;
}
}
{
/* The return address was popped into the lowest numbered register. */
regs_to_pop &= ~(1 << LR_REGNUM);
-
+
reg_containing_return_addr =
number_of_first_bit_set (regs_available_for_popping);
if (regs_available_for_popping)
{
int frame_pointer;
-
+
/* Work out which register currently contains the frame pointer. */
frame_pointer = number_of_first_bit_set (regs_available_for_popping);
/* (Temporarily) remove it from the mask of popped registers. */
regs_available_for_popping &= ~(1 << frame_pointer);
regs_to_pop &= ~(1 << ARM_HARD_FRAME_POINTER_REGNUM);
-
+
if (regs_available_for_popping)
{
int stack_pointer;
-
+
/* We popped the stack pointer as well,
find the register that contains it. */
stack_pointer = number_of_first_bit_set (regs_available_for_popping);
/* Move it into the stack register. */
asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, stack_pointer);
-
+
/* At this point we have popped all necessary registers, so
do not worry about restoring regs_available_for_popping
to its correct value:
regs_available_for_popping |= (1 << frame_pointer);
}
}
-
+
/* If we still have registers left on the stack, but we no longer have
any registers into which we can pop them, then we must move the return
address into the link register and make available the register that
if (regs_available_for_popping == 0 && pops_needed > 0)
{
regs_available_for_popping |= 1 << reg_containing_return_addr;
-
+
asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM,
reg_containing_return_addr);
-
+
reg_containing_return_addr = LR_REGNUM;
}
{
int popped_into;
int move_to;
-
+
thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
regs_available_for_popping);
--pops_needed;
}
-
+
/* If we still have not popped everything then we must have only
had one register available to us and we are now popping the SP. */
if (pops_needed > 0)
{
int popped_into;
-
+
thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
regs_available_for_popping);
asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
reg_containing_return_addr = LR_REGNUM;
}
-
+
asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
}
thumb_exit (f, -1);
return;
}
-
+
fprintf (f, "\t%s\t{", push ? "push" : "pop");
/* Look at the low registers first. */
if (lo_mask & 1)
{
asm_fprintf (f, "%r", regno);
-
+
if ((lo_mask & ~1) != 0)
fprintf (f, ", ");
pushed_words++;
}
}
-
+
if (push && (mask & (1 << LR_REGNUM)))
{
/* Catch pushing the LR. */
if (mask & 0xFF)
fprintf (f, ", ");
-
+
asm_fprintf (f, "%r", LR_REGNUM);
pushed_words++;
{
if (mask & 0xFF)
fprintf (f, ", ");
-
+
asm_fprintf (f, "%r", PC_REGNUM);
}
}
-
+
fprintf (f, "}\n");
if (push && pushed_words && dwarf2out_do_frame ())
if (val == 0) /* XXX */
return 0;
-
+
for (i = 0; i < 25; i++)
if ((val & (mask << i)) == val)
return 1;
/* This test is only important for leaf functions. */
/* assert (!leaf_function_p ()); */
-
+
/* If we have already decided that far jumps may be used,
do not bother checking again, and always return true even if
it turns out that they are not being used. Once we have made
return 1;
}
}
-
+
return 0;
}
if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
return TRUE;
-#ifdef ARM_PE
+#ifdef ARM_PE
return lookup_attribute ("interfacearm", DECL_ATTRIBUTES (func)) != NULL_TREE;
#else
return FALSE;
mov r6, r8
push {r6, r7}
as part of the prolog. We have to undo that pushing here. */
-
+
if (high_regs_pushed)
{
int mask = live_regs_mask & 0xff;
high registers! */
internal_error
("no low registers available for popping high registers");
-
+
for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
if (live_regs_mask & (1 << next_hi_reg))
break;
{
asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", next_hi_reg,
regno);
-
+
for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
if (live_regs_mask & (1 << next_hi_reg))
break;
if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
{
- /* Pop the return address into the PC. */
+ /* Pop the return address into the PC. */
if (had_to_push_lr)
live_regs_mask |= 1 << PC_REGNUM;
}
else
regno = LR_REGNUM;
-
+
/* Remove the argument registers that were pushed onto the stack. */
asm_fprintf (asm_out_file, "\tadd\t%r, %r, #%d\n",
SP_REGNUM, SP_REGNUM,
current_function_pretend_args_size);
-
+
thumb_exit (asm_out_file, regno);
}
struct machine_function *machine;
machine = (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
-#if ARM_FT_UNKNOWN != 0
+#if ARM_FT_UNKNOWN != 0
machine->func_type = ARM_FT_UNKNOWN;
#endif
return machine;
unsigned long live_regs_mask;
func_type = arm_current_func_type ();
-
+
/* Naked functions don't have prologues. */
if (IS_NAKED (func_type))
return;
/* Restore the low register's original value. */
emit_insn (gen_movsi (reg, spare));
-
+
/* Emit a USE of the restored scratch register, so that flow
analysis will not consider the restore redundant. The
register won't be used again in this function and isn't
emit_insn (gen_stack_tie (stack_pointer_rtx,
hard_frame_pointer_rtx));
}
-
+
if (current_function_profile || TARGET_NO_SCHED_PRO)
emit_insn (gen_blockage ());
emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
}
}
-
+
/* Emit a USE (stack_pointer_rtx), so that
the stack adjustment will not be deleted. */
emit_insn (gen_prologue_use (stack_pointer_rtx));
if (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0)) != SYMBOL_REF)
abort ();
name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
-
+
/* Generate code sequence to switch us into Thumb mode. */
/* The .code 32 directive has already been emitted by
ASM_DECLARE_FUNCTION_NAME. */
is called from a Thumb encoded function elsewhere in the
same file. Hence the definition of STUB_NAME here must
agree with the definition in gas/config/tc-arm.c. */
-
+
#define STUB_NAME ".real_start_of"
-
+
fprintf (f, "\t.code\t16\n");
#ifdef ARM_PE
if (arm_dllexport_name_p (name))
name = arm_strip_name_encoding (name);
-#endif
+#endif
asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
fprintf (f, "\t.thumb_func\n");
asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
}
-
+
if (current_function_pretend_args_size)
{
if (cfun->machine->uses_anonymous_args)
{
int num_pushes;
-
+
fprintf (f, "\tpush\t{");
num_pushes = ARM_NUM_INTS (current_function_pretend_args_size);
-
+
for (regno = LAST_ARG_REGNUM + 1 - num_pushes;
regno <= LAST_ARG_REGNUM;
regno++)
fprintf (f, "}\n");
}
else
- asm_fprintf (f, "\tsub\t%r, %r, #%d\n",
+ asm_fprintf (f, "\tsub\t%r, %r, #%d\n",
SP_REGNUM, SP_REGNUM,
current_function_pretend_args_size);
{
int offset;
int work_register;
-
+
/* We have been asked to create a stack backtrace structure.
The code looks like this:
-
+
0 .align 2
0 func:
0 sub SP, #16 Reserve space for 4 registers.
22 mov FP, R7 Put this value into the frame pointer. */
work_register = thumb_find_work_register (live_regs_mask);
-
+
asm_fprintf
(f, "\tsub\t%r, %r, #16\t%@ Create stack backtrace structure\n",
SP_REGNUM, SP_REGNUM);
}
else
offset = 0;
-
+
asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
offset + 16 + current_function_pretend_args_size);
-
+
asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
offset + 4);
asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
offset + 12);
}
-
+
asm_fprintf (f, "\tmov\t%r, %r\n", work_register, LR_REGNUM);
asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
offset + 8);
if (pushable_regs & (1 << regno))
{
asm_fprintf (f, "\tmov\t%r, %r\n", regno, next_hi_reg);
-
+
high_regs_pushed--;
real_regs_mask |= (1 << next_hi_reg);
-
+
if (high_regs_pushed)
{
for (next_hi_reg--; next_hi_reg > LAST_LO_REGNUM;
rtx offset;
rtx arg1;
rtx arg2;
-
+
if (GET_CODE (operands[0]) != REG)
abort ();
-
+
if (GET_CODE (operands[1]) != MEM)
abort ();
/* Get the memory address. */
addr = XEXP (operands[1], 0);
-
+
/* Work out how the memory address is computed. */
switch (GET_CODE (addr))
{
output_asm_insn ("ldr\t%H0, %2", operands);
}
break;
-
+
case CONST:
/* Compute <address> + 4 for the high order load. */
operands[2] = gen_rtx_MEM (SImode,
plus_constant (XEXP (operands[1], 0), 4));
-
+
output_asm_insn ("ldr\t%0, %1", operands);
output_asm_insn ("ldr\t%H0, %2", operands);
break;
-
+
case PLUS:
arg1 = XEXP (addr, 0);
arg2 = XEXP (addr, 1);
-
+
if (CONSTANT_P (arg1))
base = arg2, offset = arg1;
else
base = arg1, offset = arg2;
-
+
if (GET_CODE (base) != REG)
abort ();
int reg_offset = REGNO (offset);
int reg_base = REGNO (base);
int reg_dest = REGNO (operands[0]);
-
+
/* Add the base and offset registers together into the
higher destination register. */
asm_fprintf (asm_out_file, "\tadd\t%r, %r, %r",
reg_dest + 1, reg_base, reg_offset);
-
+
/* Load the lower destination register from the address in
the higher destination register. */
asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #0]",
reg_dest, reg_dest + 1);
-
+
/* Load the higher destination register from its own address
plus 4. */
asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #4]",
/* Compute <address> + 4 for the high order load. */
operands[2] = gen_rtx_MEM (SImode,
plus_constant (XEXP (operands[1], 0), 4));
-
+
/* If the computed address is held in the low order register
then load the high order register first, otherwise always
load the low order register first. */
directly. */
operands[2] = gen_rtx_MEM (SImode,
plus_constant (XEXP (operands[1], 0), 4));
-
+
output_asm_insn ("ldr\t%H0, %2", operands);
output_asm_insn ("ldr\t%0, %1", operands);
break;
-
+
default:
abort ();
break;
}
-
+
return "";
}
operands[4] = operands[5];
operands[5] = tmp;
}
-
+
output_asm_insn ("ldmia\t%1!, {%4, %5, %6}", operands);
output_asm_insn ("stmia\t%0!, {%4, %5, %6}", operands);
break;
emit_insn (gen_movmem12b (out, in, out, in));
len -= 12;
}
-
+
if (len >= 8)
{
emit_insn (gen_movmem8b (out, in, out, in));
len -= 8;
}
-
+
if (len >= 4)
{
rtx reg = gen_reg_rtx (SImode);
len -= 4;
offset += 4;
}
-
+
if (len >= 2)
{
rtx reg = gen_reg_rtx (HImode);
- emit_insn (gen_movhi (reg, gen_rtx_MEM (HImode,
+ emit_insn (gen_movhi (reg, gen_rtx_MEM (HImode,
plus_constant (in, offset))));
emit_insn (gen_movhi (gen_rtx_MEM (HImode, plus_constant (out, offset)),
reg));
len -= 2;
offset += 2;
}
-
+
if (len)
{
rtx reg = gen_reg_rtx (QImode);
&& memory_operand (op, mode)));
}
-/* Handle storing a half-word to memory during reload. */
+/* Handle storing a half-word to memory during reload. */
void
thumb_reload_out_hi (rtx *operands)
{
emit_insn (gen_thumb_movhi_clobber (operands[0], operands[1], operands[2]));
}
-/* Handle reading a half-word from memory during reload. */
+/* Handle reading a half-word from memory during reload. */
void
thumb_reload_in_hi (rtx *operands ATTRIBUTE_UNUSED)
{
switch (c)
{
ARM_NAME_ENCODING_LENGTHS
- default: return 0;
+ default: return 0;
}
}
arm_strip_name_encoding (const char *name)
{
int skip;
-
+
while ((skip = arm_get_strip_length (* name)))
name += skip;
PIC_OFFSET_TABLE_REGNUM,
PIC_OFFSET_TABLE_REGNUM);
fputs ("|x$adcons|\n", f);
-
+
for (chain = aof_pic_chain; chain; chain = chain->next)
{
fputs ("\tDCD\t", f);
rtx offset;
rtx wcgr;
rtx sum;
-
+
if (GET_CODE (operands [1]) != MEM
|| GET_CODE (sum = XEXP (operands [1], 0)) != PLUS
|| GET_CODE (reg = XEXP (sum, 0)) != REG
|| GET_CODE (offset = XEXP (sum, 1)) != CONST_INT
|| ((INTVAL (offset) < 1024) && (INTVAL (offset) > -1024)))
return "wldrw%?\t%0, %1";
-
- /* Fix up an out-of-range load of a GR register. */
+
+ /* Fix up an out-of-range load of a GR register. */
output_asm_insn ("str%?\t%0, [sp, #-4]!\t@ Start of GR load expansion", & reg);
wcgr = operands[0];
operands[0] = reg;
if (GET_CODE (addr) == PARALLEL)
addr = XVECEXP (addr, 0, 0);
addr = XEXP (addr, 0);
-
+
return !reg_overlap_mentioned_p (value, addr);
}
if (GET_CODE (op) == PARALLEL)
op = XVECEXP (op, 0, 0);
op = XEXP (op, 1);
-
+
early_op = XEXP (op, 0);
/* This is either an actual independent shift, or a shift applied to
the first operand of another operation. We want the whole shift
if (GET_CODE (op) == PARALLEL)
op = XVECEXP (op, 0, 0);
op = XEXP (op, 1);
-
+
early_op = XEXP (op, 0);
/* This is either an actual independent shift, or a shift applied to
shifted, in either case. */
if (GET_CODE (early_op) != REG)
early_op = XEXP (early_op, 0);
-
+
return !reg_overlap_mentioned_p (value, early_op);
}
if (GET_CODE (op) == PARALLEL)
op = XVECEXP (op, 0, 0);
op = XEXP (op, 1);
-
+
return (GET_CODE (op) == PLUS
&& !reg_overlap_mentioned_p (value, XEXP (op, 0)));
}
offsets = arm_get_frame_offsets ();
delta = offsets->outgoing_args - (offsets->frame + 4);
-
+
if (delta >= 4096)
{
emit_insn (gen_addsi3 (scratch, stack_pointer_rtx,
delta -= 16;
/* The link register is always the first saved register. */
delta -= 4;
-
+
/* Construct the address. */
addr = gen_rtx_REG (SImode, reg);
if ((reg != SP_REGNUM && delta >= 128)
emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
}
+/* Implements target hook vector_mode_supported_p. */
+bool
+arm_vector_mode_supported_p (enum machine_mode mode)
+{
+ if ((mode == V2SImode)
+ || (mode == V4HImode)
+ || (mode == V8QImode))
+ return true;
+
+ return false;
+}
/* Nonzero if we need to protect the prolog from scheduling */
#define ARM_FLAG_NO_SCHED_PRO (1 << 12)
-/* Nonzero if a call to abort should be generated if a noreturn
+/* Nonzero if a call to abort should be generated if a noreturn
function tries to return. */
#define ARM_FLAG_ABORT_NORETURN (1 << 13)
/* Nonzero if all call instructions should be indirect. */
#define ARM_FLAG_LONG_CALLS (1 << 15)
-
+
/* Nonzero means that the target ISA is the THUMB, not the ARM. */
#define ARM_FLAG_THUMB (1 << 16)
etc., in addition to just the AAPCS calling conventions. */
#ifndef TARGET_BPABI
#define TARGET_BPABI false
-#endif
+#endif
/* SUBTARGET_SWITCHES is used to add flags on a per-config basis. */
#ifndef SUBTARGET_SWITCHES
extern int arm_is_6_or_7;
/* Nonzero if we should define __THUMB_INTERWORK__ in the
- preprocessor.
+ preprocessor.
XXX This is a bit of a hack, it's intended to help work around
problems in GLD which doesn't understand that armv5t code is
interworking clean. */
/* Nonzero if we need to refer to the GOT with a PC-relative
offset. In other words, generate
- .word _GLOBAL_OFFSET_TABLE_ - [. - (.Lxx + 8)]
+ .word _GLOBAL_OFFSET_TABLE_ - [. - (.Lxx + 8)]
rather than
.word _GLOBAL_OFFSET_TABLE_ - (.Lxx + 8)
- The default is true, which matches NetBSD. Subtargets can
+ The default is true, which matches NetBSD. Subtargets can
override this if required. */
#ifndef GOT_PCREL
#define GOT_PCREL 1
in instructions that operate on numbered bit-fields. */
#define BITS_BIG_ENDIAN 0
-/* Define this if most significant byte of a word is the lowest numbered.
+/* Define this if most significant byte of a word is the lowest numbered.
Most ARM processors are run in little endian mode, so that is the default.
If you want to have it run-time selectable, change the definition in a
cover file to be TARGET_BIG_ENDIAN. */
/* Make strings word-aligned so strcpy from constants will be faster. */
#define CONSTANT_ALIGNMENT_FACTOR (TARGET_THUMB || ! arm_tune_xscale ? 1 : 2)
-
+
#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
((TREE_CODE (EXP) == STRING_CST \
&& (ALIGN) < BITS_PER_WORD * CONSTANT_ALIGNMENT_FACTOR) \
r4-r8 S register variable
r9 S (rfp) register variable (real frame pointer)
-
+
r10 F S (sl) stack limit (used by -mapcs-stack-check)
r11 F S (fp) argument pointer
r12 (ip) temp workspace
The latter must include the registers where values are returned
and the register where structure-value addresses are passed.
Aside from that, you can include as many other registers as you like.
- The CC is not preserved over function calls on the ARM 6, so it is
+ The CC is not preserved over function calls on the ARM 6, so it is
easier to assume this for all. SFP is preserved, since FP is. */
#define CALL_USED_REGISTERS \
{ \
} \
SUBTARGET_CONDITIONAL_REGISTER_USAGE \
}
-
+
/* These are a couple of extensions to the formats accepted
by asm_fprintf:
%@ prints out ASM_COMMENT_START
#define MUST_USE_SJLJ_EXCEPTIONS 1
/* We can generate DWARF2 Unwind info, even though we don't use it. */
#define DWARF2_UNWIND_INFO 1
-
+
/* Use r0 and r1 to pass exception handling information. */
#define EH_RETURN_DATA_REGNO(N) (((N) < 2) ? N : INVALID_REGNUM)
/* Value should be nonzero if functions must have frame pointers.
Zero means the frame pointer need not be set up (and parms may be accessed
- via the stack pointer) in functions that seem suitable.
+ via the stack pointer) in functions that seem suitable.
If we have to have a frame pointer we might as well make use of it.
APCS says that the frame pointer does not need to be pushed in leaf
functions, or simple tail call functions. */
#define MODES_TIEABLE_P(MODE1, MODE2) \
(GET_MODE_CLASS (MODE1) == GET_MODE_CLASS (MODE2))
-#define VECTOR_MODE_SUPPORTED_P(MODE) \
- ((MODE) == V2SImode || (MODE) == V4HImode || (MODE) == V8QImode)
-
#define VALID_IWMMXT_REG_MODE(MODE) \
- (VECTOR_MODE_SUPPORTED_P (MODE) || (MODE) == DImode)
+ (arm_vector_mode_supported_p (MODE) || (MODE) == DImode)
/* The order in which register should be allocated. It is good to use ip
since no saving is required (though calls clobber it) and it never contains
function parameters. It is quite good to use lr since other calls may
- clobber it anyway. Allocate r0 through r3 in reverse order since r3 is
+ clobber it anyway. Allocate r0 through r3 in reverse order since r3 is
least likely to contain a function parameter; in addition results are
returned in r0. */
#define CLASS_LIKELY_SPILLED_P(CLASS) \
((TARGET_THUMB && (CLASS) == LO_REGS) \
|| (CLASS) == CC_REG)
-
+
/* The class value for index registers, and the one for base regs. */
#define INDEX_REG_CLASS (TARGET_THUMB ? LO_REGS : GENERAL_REGS)
#define BASE_REG_CLASS (TARGET_THUMB ? LO_REGS : GENERAL_REGS)
C is the letter, and VALUE is a constant value.
Return 1 if VALUE is in the range specified by C.
I: immediate arithmetic operand (i.e. 8 bits shifted as required).
- J: valid indexing constants.
+ J: valid indexing constants.
K: ~value ok in rhs argument of data operand.
- L: -value ok in rhs argument of data operand.
+ L: -value ok in rhs argument of data operand.
M: 0..32, or a power of 2 (for shifts, or mult done by shift). */
#define CONST_OK_FOR_ARM_LETTER(VALUE, C) \
((C) == 'I' ? const_ok_for_arm (VALUE) : \
#define CONST_OK_FOR_LETTER_P(VALUE, C) \
(TARGET_ARM ? \
CONST_OK_FOR_ARM_LETTER (VALUE, C) : CONST_OK_FOR_THUMB_LETTER (VALUE, C))
-
+
/* Constant letter 'G' for the FP immediate constants.
'H' means the same constant negated. */
#define CONST_DOUBLE_OK_FOR_ARM_LETTER(X, C) \
CONST_DOUBLE_OK_FOR_ARM_LETTER (X, C) : 0)
/* For the ARM, `Q' means that this is a memory operand that is just
- an offset from a register.
+ an offset from a register.
`S' means any symbol that has the SYMBOL_REF_FLAG set or a CONSTANT_POOL
address. This means that the symbol is in the text segment and can be
accessed without using a load.
'U' Prefixes an extended memory constraint where:
- 'Uv' is an address valid for VFP load/store insns.
- 'Uy' is an address valid for iwmmxt load/store insns.
+ 'Uv' is an address valid for VFP load/store insns.
+ 'Uy' is an address valid for iwmmxt load/store insns.
'Uq' is an address valid for ldrsb. */
#define EXTRA_CONSTRAINT_STR_ARM(OP, C, STR) \
? (((MODE) == HImode && ! arm_arch4 && true_regnum (X) == -1) \
? GENERAL_REGS : NO_REGS) \
: THUMB_SECONDARY_OUTPUT_RELOAD_CLASS (CLASS, MODE, X))
-
+
/* If we need to load shorts byte-at-a-time, then we need a scratch. */
#define SECONDARY_INPUT_RELOAD_CLASS(CLASS, MODE, X) \
/* Restrict which direct reloads are allowed for VFP regs. */ \
ARM_LEGITIMIZE_RELOAD_ADDRESS (X, MODE, OPNUM, TYPE, IND_LEVELS, WIN); \
else \
THUMB_LEGITIMIZE_RELOAD_ADDRESS (X, MODE, OPNUM, TYPE, IND_LEVELS, WIN)
-
+
/* Return the maximum number of consecutive registers
needed to represent mode MODE in a register of class CLASS.
ARM regs are UNITS_PER_WORD bits while FPA regs can hold any FP mode */
: TARGET_ARM && TARGET_HARD_FLOAT && TARGET_MAVERICK \
&& GET_MODE_CLASS (MODE) == MODE_FLOAT \
? gen_rtx_REG (MODE, FIRST_CIRRUS_FP_REGNUM) \
- : TARGET_IWMMXT_ABI && VECTOR_MODE_SUPPORTED_P (MODE) \
+ : TARGET_IWMMXT_ABI && arm_vector_mode_supported_p (MODE) \
? gen_rtx_REG (MODE, FIRST_IWMMXT_REGNUM) \
: gen_rtx_REG (MODE, ARG_REGISTER (1)))
/* For an arg passed partly in registers and partly in memory,
this is the number of registers used.
For args passed entirely in registers or entirely in memory, zero. */
-#define FUNCTION_ARG_PARTIAL_NREGS(CUM, MODE, TYPE, NAMED) \
- (VECTOR_MODE_SUPPORTED_P (MODE) ? 0 : \
- NUM_ARG_REGS > (CUM).nregs \
+#define FUNCTION_ARG_PARTIAL_NREGS(CUM, MODE, TYPE, NAMED) \
+ (arm_vector_mode_supported_p (MODE) ? 0 : \
+ NUM_ARG_REGS > (CUM).nregs \
&& (NUM_ARG_REGS < ((CUM).nregs + ARM_NUM_REGS2 (MODE, TYPE)) \
- && (CUM).can_split) \
+ && (CUM).can_split) \
? NUM_ARG_REGS - (CUM).nregs : 0)
/* Initialize a variable CUM of type CUMULATIVE_ARGS
(TYPE is null for libcalls where that information may not be available.) */
#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
(CUM).nargs += 1; \
- if (VECTOR_MODE_SUPPORTED_P (MODE) \
+ if (arm_vector_mode_supported_p (MODE) \
&& (CUM).named_count > (CUM).nargs) \
(CUM).iwmmxt_nregs += 1; \
else \
/* Special case handling of the location of arguments passed on the stack. */
#define DEBUGGER_ARG_OFFSET(value, addr) value ? value : arm_debugger_arg_offset (value, addr)
-
+
/* Initialize data used by insn expanders. This is called from insn_emit,
once for every function before code is generated. */
#define INIT_EXPANDERS arm_init_expanders ()
ARM_TRAMPOLINE_TEMPLATE (FILE) \
else \
THUMB_TRAMPOLINE_TEMPLATE (FILE)
-
+
/* Length in units of the trampoline for entering a nested function. */
#define TRAMPOLINE_SIZE (TARGET_ARM ? 16 : 24)
On the ARM, allow any integer (invalid ones are removed later by insn
patterns), nice doubles and symbol_refs which refer to the function's
constant pool XXX.
-
+
When generating pic allow anything. */
#define ARM_LEGITIMATE_CONSTANT_P(X) (flag_pic || ! label_mentioned_p (X))
case SHORT_CALL_FLAG_CHAR: return 1; \
case LONG_CALL_FLAG_CHAR: return 1; \
case '*': return 1; \
- SUBTARGET_NAME_ENCODING_LENGTHS
+ SUBTARGET_NAME_ENCODING_LENGTHS
/* This is how to output a reference to a user-level label named NAME.
`assemble_name' uses this. */
that is a valid memory address for an instruction.
The MODE argument is the machine mode for the MEM expression
that wants to use this address. */
-
+
#define ARM_BASE_REGISTER_RTX_P(X) \
(GET_CODE (X) == REG && ARM_REG_OK_FOR_BASE_P (X))
if (TARGET_ARM) \
ARM_GO_IF_LEGITIMATE_ADDRESS (MODE, X, WIN) \
else /* if (TARGET_THUMB) */ \
- THUMB_GO_IF_LEGITIMATE_ADDRESS (MODE, X, WIN)
+ THUMB_GO_IF_LEGITIMATE_ADDRESS (MODE, X, WIN)
\f
/* Try machine-dependent ways of modifying an illegitimate address
if (memory_address_p (MODE, X)) \
goto WIN; \
} while (0)
-
+
/* Go to LABEL if ADDR (a legitimate address expression)
has an effect that depends on the machine mode it is used for. */
#define ARM_GO_IF_MODE_DEPENDENT_ADDRESS(ADDR, LABEL) \
/* Nothing helpful to do for the Thumb */
#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR, LABEL) \
if (TARGET_ARM) \
- ARM_GO_IF_MODE_DEPENDENT_ADDRESS (ADDR, LABEL)
+ ARM_GO_IF_MODE_DEPENDENT_ADDRESS (ADDR, LABEL)
\f
/* Specify the machine mode that this machine uses
#define SLOW_BYTE_ACCESS 0
#define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) 1
-
+
/* Immediate shift counts are truncated by the output routines (or was it
the assembler?). Shift counts in a register are truncated by ARM. Note
that the native compiler puts too large (> 32) immediate shift counts
(TARGET_ARM ? 10 : \
((GET_MODE_SIZE (M) < 4 ? 8 : 2 * GET_MODE_SIZE (M)) \
* (CLASS == LO_REGS ? 1 : 2)))
-
+
/* Try to generate sequences that don't involve branches, we can then use
conditional instructions */
#define BRANCH_COST \
#define RETURN_ADDR_RTX(COUNT, FRAME) \
arm_return_addr (COUNT, FRAME)
-/* Mask of the bits in the PC that contain the real return address
+/* Mask of the bits in the PC that contain the real return address
when running in 26-bit mode. */
#define RETURN_ADDR_MASK26 (0x03fffffc)
static void ix86_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
tree, int *, int);
static tree ix86_gimplify_va_arg (tree, tree, tree *, tree *);
+static bool ix86_vector_mode_supported_p (enum machine_mode);
static int ix86_address_cost (rtx);
static bool ix86_cannot_force_const_mem (rtx);
#undef TARGET_GIMPLIFY_VA_ARG_EXPR
#define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
+#undef TARGET_VECTOR_MODE_SUPPORTED_P
+#define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
+
#ifdef SUBTARGET_INSERT_ATTRIBUTES
#undef TARGET_INSERT_ATTRIBUTES
#define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
return regparm;
}
-/* Return true if EAX is live at the start of the function. Used by
+/* Return true if EAX is live at the start of the function. Used by
ix86_expand_prologue to determine if we need special help before
calling allocate_stack_worker. */
t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
t = build2 (MODIFY_EXPR, void_type_node, addr, t);
gimplify_and_add (t, pre_p);
-
+
for (i = 0; i < XVECLEN (container, 0); i++)
{
rtx slot = XVECEXP (container, 0, i);
GEN_INT ((count >> (size == 4 ? 2 : 3))
& (TARGET_64BIT ? -1 : 0x3fffffff)));
countreg = ix86_zero_extend_to_Pmode (countreg);
-
+
destexp = gen_rtx_ASHIFT (Pmode, countreg,
GEN_INT (size == 4 ? 2 : 3));
srcexp = gen_rtx_PLUS (Pmode, destexp, srcreg);
if (is_mulwiden)
op0 = XEXP (op0, 0), mode = GET_MODE (op0);
}
-
+
*total = COSTS_N_INSNS (ix86_cost->mult_init[MODE_INDEX (mode)]
+ nbits * ix86_cost->mult_bit)
+ rtx_cost (op0, outer_code) + rtx_cost (op1, outer_code);
if (TARGET_MACHO)
{
rtx sym_ref = XEXP (DECL_RTL (function), 0);
- tmp = (gen_rtx_SYMBOL_REF
- (Pmode,
+ tmp = (gen_rtx_SYMBOL_REF
+ (Pmode,
machopic_indirection_name (sym_ref, /*stub_p=*/true)));
tmp = gen_rtx_MEM (QImode, tmp);
xops[0] = tmp;
int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
int n_elts = (GET_MODE_SIZE (mode) / elt_size);
int i;
-
+
for (i = n_elts - 1; i >= 0; i--)
if (GET_CODE (XVECEXP (vals, 0, i)) != CONST_INT
&& GET_CODE (XVECEXP (vals, 0, i)) != CONST_DOUBLE)
break;
- /* Few special cases first...
+ /* Few special cases first...
... constants are best loaded from constant pool. */
if (i < 0)
{
}
}
+/* Implements target hook vector_mode_supported_p. */
+static bool
+ix86_vector_mode_supported_p (enum machine_mode mode)
+{
+ if (TARGET_SSE
+ && VALID_SSE_REG_MODE (mode))
+ return true;
+
+ else if (TARGET_MMX
+ && VALID_MMX_REG_MODE (mode))
+ return true;
+
+ else if (TARGET_3DNOW
+ && VALID_MMX_REG_MODE_3DNOW (mode))
+ return true;
+
+ else
+ return false;
+}
+
/* Worker function for TARGET_MD_ASM_CLOBBERS.
We do this in the new i386 backend to maintain source compatibility
static tree
ix86_md_asm_clobbers (tree clobbers)
{
- clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
- clobbers);
- clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
- clobbers);
- clobbers = tree_cons (NULL_TREE, build_string (7, "dirflag"),
- clobbers);
+ clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
+ clobbers);
+ clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
+ clobbers);
+ clobbers = tree_cons (NULL_TREE, build_string (7, "dirflag"),
+ clobbers);
return clobbers;
}
{
emit_insn (gen_x86_sahf_1 (reg));
- temp = gen_rtx_REG (CCmode, FLAGS_REG);
+ temp = gen_rtx_REG (CCmode, FLAGS_REG);
temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
}
else
{
emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
- temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
+ temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
}
-
+
temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
gen_rtx_LABEL_REF (VOIDmode, label),
pc_rtx);
emit_label (label2);
}
-
+
#include "gt-i386.h"
const int lea; /* cost of a lea instruction */
const int shift_var; /* variable shift costs */
const int shift_const; /* constant shift costs */
- const int mult_init[5]; /* cost of starting a multiply
+ const int mult_init[5]; /* cost of starting a multiply
in QImode, HImode, SImode, DImode, TImode*/
const int mult_bit; /* cost of multiply per each bit set */
- const int divide[5]; /* cost of a divide/mod
+ const int divide[5]; /* cost of a divide/mod
in QImode, HImode, SImode, DImode, TImode*/
int movsx; /* The cost of movsx operation. */
int movzx; /* The cost of movzx operation. */
((MODE) == DImode || (MODE) == V8QImode || (MODE) == V4HImode \
|| (MODE) == V2SImode || (MODE) == SImode)
-#define VECTOR_MODE_SUPPORTED_P(MODE) \
- (VALID_SSE_REG_MODE (MODE) && TARGET_SSE ? 1 \
- : VALID_MMX_REG_MODE (MODE) && TARGET_MMX ? 1 \
- : VALID_MMX_REG_MODE_3DNOW (MODE) && TARGET_3DNOW ? 1 : 0)
-
#define UNITS_PER_SIMD_WORD \
(TARGET_SSE ? 16 : TARGET_MMX || TARGET_3DNOW ? 8 : 0)
|| ((CLASS) == FP_SECOND_REG))
/* Return a class of registers that cannot change FROM mode to TO mode.
-
+
x87 registers can't do subreg as all values are reformated to extended
precision. XMM registers does not support with nonzero offsets equal
to 4, 8 and 12 otherwise valid for integer registers. Since we can't
/* Subroutines used for code generation on IBM RS/6000.
- Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
+ Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
/* Schedule instructions for group formation. */
static GTY(()) bool rs6000_sched_groups;
-/* Support adjust_priority scheduler hook
+/* Support adjust_priority scheduler hook
and -mprioritize-restricted-insns= option. */
const char *rs6000_sched_restricted_insns_priority_str;
int rs6000_sched_restricted_insns_priority;
static rtx altivec_expand_st_builtin (tree, rtx, bool *);
static rtx altivec_expand_dst_builtin (tree, rtx, bool *);
static rtx altivec_expand_abs_builtin (enum insn_code, tree, rtx);
-static rtx altivec_expand_predicate_builtin (enum insn_code,
+static rtx altivec_expand_predicate_builtin (enum insn_code,
const char *, tree, rtx);
static rtx altivec_expand_lv_builtin (enum insn_code, tree, rtx);
static rtx altivec_expand_stv_builtin (enum insn_code, tree);
static tree rs6000_build_builtin_va_list (void);
static tree rs6000_gimplify_va_arg (tree, tree, tree *, tree *);
static bool rs6000_must_pass_in_stack (enum machine_mode, tree);
+static bool rs6000_vector_mode_supported_p (enum machine_mode);
static enum machine_mode rs6000_eh_return_filter_mode (void);
#define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
#undef TARGET_SCHED_ADJUST_PRIORITY
#define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
-#undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
+#undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
#define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
#undef TARGET_SCHED_FINISH
#define TARGET_SCHED_FINISH rs6000_sched_finish
#undef TARGET_EH_RETURN_FILTER_MODE
#define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
+#undef TARGET_VECTOR_MODE_SUPPORTED_P
+#define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
+
struct gcc_target targetm = TARGET_INITIALIZER;
\f
/* This table occasionally claims that a processor does not support
a particular feature even though it does, but the feature is slower
than the alternative. Thus, it shouldn't be relied on as a
- complete description of the processor's support.
+ complete description of the processor's support.
Please keep this list in order, and don't forget to update the
documentation in invoke.texi when adding a new processor or
enum {
POWER_MASKS = MASK_POWER | MASK_POWER2 | MASK_MULTIPLE | MASK_STRING,
- POWERPC_MASKS = (POWERPC_BASE_MASK | MASK_PPC_GPOPT
+ POWERPC_MASKS = (POWERPC_BASE_MASK | MASK_PPC_GPOPT
| MASK_PPC_GFXOPT | MASK_POWERPC64 | MASK_ALTIVEC
| MASK_MFCRF)
};
= (rs6000_sched_groups ? store_to_load_dep_costly : no_dep_costly);
if (rs6000_sched_costly_dep_str)
{
- if (! strcmp (rs6000_sched_costly_dep_str, "no"))
+ if (! strcmp (rs6000_sched_costly_dep_str, "no"))
rs6000_sched_costly_dep = no_dep_costly;
else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
rs6000_sched_costly_dep = all_deps_costly;
rs6000_sched_costly_dep = true_store_to_load_dep_costly;
else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
rs6000_sched_costly_dep = store_to_load_dep_costly;
- else
+ else
rs6000_sched_costly_dep = atoi (rs6000_sched_costly_dep_str);
}
/* Allocate an alias set for register saves & restores from stack. */
rs6000_sr_alias_set = new_alias_set ();
- if (TARGET_TOC)
+ if (TARGET_TOC)
ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
/* We can only guarantee the availability of DI pseudo-ops when
if (!TARGET_SPE_ABI)
error ("not configured for ABI: '%s'", rs6000_abi_string);
}
-
+
else if (! strcmp (rs6000_abi_string, "no-spe"))
rs6000_spe_abi = 0;
else
/* Returns 1 always. */
int
-any_operand (rtx op ATTRIBUTE_UNUSED,
+any_operand (rtx op ATTRIBUTE_UNUSED,
enum machine_mode mode ATTRIBUTE_UNUSED)
{
return 1;
/* Returns 1 always. */
int
-any_parallel_operand (rtx op ATTRIBUTE_UNUSED,
+any_parallel_operand (rtx op ATTRIBUTE_UNUSED,
enum machine_mode mode ATTRIBUTE_UNUSED)
{
return 1;
int
altivec_register_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
{
-
+
return (register_operand (op, mode)
&& (GET_CODE (op) != REG
|| REGNO (op) > FIRST_PSEUDO_REGISTER
{
return (register_operand (op, mode)
&& (GET_CODE (op) != REG
- || (REGNO (op) >= ARG_POINTER_REGNUM
+ || (REGNO (op) >= ARG_POINTER_REGNUM
&& !XER_REGNO_P (REGNO (op)))
|| REGNO (op) < MQ_REGNO));
}
return CONST_DOUBLE_HIGH (op) == 0;
}
- else
+ else
return gpc_reg_operand (op, mode);
}
static int
easy_vector_splat_const (int cst, enum machine_mode mode)
{
- switch (mode)
+ switch (mode)
{
case V4SImode:
- if (EASY_VECTOR_15 (cst)
- || EASY_VECTOR_15_ADD_SELF (cst))
+ if (EASY_VECTOR_15 (cst)
+ || EASY_VECTOR_15_ADD_SELF (cst))
return cst;
if ((cst & 0xffff) != ((cst >> 16) & 0xffff))
break;
cst = cst >> 16;
case V8HImode:
- if (EASY_VECTOR_15 (cst)
- || EASY_VECTOR_15_ADD_SELF (cst))
+ if (EASY_VECTOR_15 (cst)
+ || EASY_VECTOR_15_ADD_SELF (cst))
return cst;
if ((cst & 0xff) != ((cst >> 8) & 0xff))
break;
cst = cst >> 8;
case V16QImode:
- if (EASY_VECTOR_15 (cst)
- || EASY_VECTOR_15_ADD_SELF (cst))
+ if (EASY_VECTOR_15 (cst)
+ || EASY_VECTOR_15_ADD_SELF (cst))
return cst;
- default:
+ default:
break;
}
return 0;
&& cst2 >= -0x7fff && cst2 <= 0x7fff)
return 1;
- if (TARGET_ALTIVEC
+ if (TARGET_ALTIVEC
&& easy_vector_same (op, mode))
{
cst = easy_vector_splat_const (cst, mode);
- if (EASY_VECTOR_15_ADD_SELF (cst)
+ if (EASY_VECTOR_15_ADD_SELF (cst)
|| EASY_VECTOR_15 (cst))
return 1;
- }
+ }
return 0;
}
{
cst = easy_vector_splat_const (INTVAL (CONST_VECTOR_ELT (op, 0)), mode);
if (EASY_VECTOR_15_ADD_SELF (cst))
- return 1;
+ return 1;
}
return 0;
}
/* Generate easy_vector_constant out of a easy_vector_constant_add_self. */
-rtx
+rtx
gen_easy_vector_constant_add_self (rtx op)
{
int i, units;
v = rtvec_alloc (units);
for (i = 0; i < units; i++)
- RTVEC_ELT (v, i) =
+ RTVEC_ELT (v, i) =
GEN_INT (INTVAL (CONST_VECTOR_ELT (op, i)) >> 1);
return gen_rtx_raw_CONST_VECTOR (GET_MODE (op), v);
}
if (reload_completed && GET_CODE (inner) == SUBREG)
inner = SUBREG_REG (inner);
-
+
return gpc_reg_operand (inner, mode)
|| (memory_operand (inner, mode)
&& GET_CODE (XEXP (inner, 0)) != PRE_INC
this file. */
int
-current_file_function_operand (rtx op,
+current_file_function_operand (rtx op,
enum machine_mode mode ATTRIBUTE_UNUSED)
{
return (GET_CODE (op) == SYMBOL_REF
/* Return 1 for an operand in small memory on V.4/eabi. */
int
-small_data_operand (rtx op ATTRIBUTE_UNUSED,
+small_data_operand (rtx op ATTRIBUTE_UNUSED,
enum machine_mode mode ATTRIBUTE_UNUSED)
{
#if TARGET_ELF
\f
/* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address. */
-static int
-constant_pool_expr_1 (rtx op, int *have_sym, int *have_toc)
+static int
+constant_pool_expr_1 (rtx op, int *have_sym, int *have_toc)
{
- switch (GET_CODE(op))
+ switch (GET_CODE(op))
{
case SYMBOL_REF:
if (RS6000_SYMBOL_REF_TLS_P (op))
return rs6000_legitimize_tls_address (x, model);
}
- if (GET_CODE (x) == PLUS
+ if (GET_CODE (x) == PLUS
&& GET_CODE (XEXP (x, 0)) == REG
&& GET_CODE (XEXP (x, 1)) == CONST_INT
&& (unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000) >= 0x10000)
- {
+ {
HOST_WIDE_INT high_int, low_int;
rtx sum;
low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
GEN_INT (high_int)), 0);
return gen_rtx_PLUS (Pmode, sum, GEN_INT (low_int));
}
- else if (GET_CODE (x) == PLUS
+ else if (GET_CODE (x) == PLUS
&& GET_CODE (XEXP (x, 0)) == REG
&& GET_CODE (XEXP (x, 1)) != CONST_INT
&& GET_MODE_NUNITS (mode) == 1
&& TARGET_NO_TOC
&& ! flag_pic
&& GET_CODE (x) != CONST_INT
- && GET_CODE (x) != CONST_DOUBLE
+ && GET_CODE (x) != CONST_DOUBLE
&& CONSTANT_P (x)
&& GET_MODE_NUNITS (mode) == 1
&& (GET_MODE_BITSIZE (mode) <= 32
&& ! MACHO_DYNAMIC_NO_PIC_P
#endif
&& GET_CODE (x) != CONST_INT
- && GET_CODE (x) != CONST_DOUBLE
+ && GET_CODE (x) != CONST_DOUBLE
&& CONSTANT_P (x)
&& ((TARGET_HARD_FLOAT && TARGET_FPRS) || mode != DFmode)
- && mode != DImode
+ && mode != DImode
&& mode != TImode)
{
rtx reg = gen_reg_rtx (Pmode);
emit_insn (gen_macho_high (reg, x));
return gen_rtx_LO_SUM (Pmode, reg, x);
}
- else if (TARGET_TOC
+ else if (TARGET_TOC
&& constant_pool_expr_p (x)
&& ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
{
rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
- }
+ }
return rs6000_got_symbol;
}
The Darwin code is inside #if TARGET_MACHO because only then is
machopic_function_base_name() defined. */
rtx
-rs6000_legitimize_reload_address (rtx x, enum machine_mode mode,
+rs6000_legitimize_reload_address (rtx x, enum machine_mode mode,
int opnum, int type, int ind_levels ATTRIBUTE_UNUSED, int *win)
{
- /* We must recognize output that we have already generated ourselves. */
+ /* We must recognize output that we have already generated ourselves. */
if (GET_CODE (x) == PLUS
&& GET_CODE (XEXP (x, 0)) == PLUS
&& GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
}
*win = 0;
return x;
-}
+}
/* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression
that is a valid memory address for an instruction.
??? Except that due to conceptual problems in offsettable_address_p
we can't really report the problems of integral offsets. So leave
- this assuming that the adjustable offset must be valid for the
+ this assuming that the adjustable offset must be valid for the
sub-words of a TFmode operand, which is what we had before. */
bool
insns, zero is returned and no insns and emitted. */
rtx
-rs6000_emit_set_const (rtx dest, enum machine_mode mode,
+rs6000_emit_set_const (rtx dest, enum machine_mode mode,
rtx source, int n ATTRIBUTE_UNUSED)
{
rtx result, insn, set;
ud3 = c2 & 0xffff;
ud4 = (c2 & 0xffff0000) >> 16;
- if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
+ if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
|| (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
{
if (ud1 & 0x8000)
emit_move_insn (dest, GEN_INT (ud1));
}
- else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
+ else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
|| (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
{
if (ud2 & 0x8000)
- emit_move_insn (dest, GEN_INT (((ud2 << 16) ^ 0x80000000)
+ emit_move_insn (dest, GEN_INT (((ud2 << 16) ^ 0x80000000)
- 0x80000000));
else
emit_move_insn (dest, GEN_INT (ud2 << 16));
if (ud1 != 0)
emit_move_insn (dest, gen_rtx_IOR (DImode, dest, GEN_INT (ud1)));
}
- else if ((ud4 == 0xffff && (ud3 & 0x8000))
+ else if ((ud4 == 0xffff && (ud3 & 0x8000))
|| (ud4 == 0 && ! (ud3 & 0x8000)))
{
if (ud3 & 0x8000)
- emit_move_insn (dest, GEN_INT (((ud3 << 16) ^ 0x80000000)
+ emit_move_insn (dest, GEN_INT (((ud3 << 16) ^ 0x80000000)
- 0x80000000));
else
emit_move_insn (dest, GEN_INT (ud3 << 16));
if (ud1 != 0)
emit_move_insn (dest, gen_rtx_IOR (DImode, dest, GEN_INT (ud1)));
}
- else
+ else
{
if (ud4 & 0x8000)
- emit_move_insn (dest, GEN_INT (((ud4 << 16) ^ 0x80000000)
+ emit_move_insn (dest, GEN_INT (((ud4 << 16) ^ 0x80000000)
- 0x80000000));
else
emit_move_insn (dest, GEN_INT (ud4 << 16));
emit_move_insn (dest, gen_rtx_ASHIFT (DImode, dest, GEN_INT (32)));
if (ud2 != 0)
- emit_move_insn (dest, gen_rtx_IOR (DImode, dest,
- GEN_INT (ud2 << 16)));
+ emit_move_insn (dest, gen_rtx_IOR (DImode, dest,
+ GEN_INT (ud2 << 16)));
if (ud1 != 0)
emit_move_insn (dest, gen_rtx_IOR (DImode, dest, GEN_INT (ud1)));
}
rtx operands[2];
operands[0] = dest;
operands[1] = source;
-
+
/* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
if (GET_CODE (operands[1]) == CONST_DOUBLE
&& ! FLOAT_MODE_P (mode)
&& ! (SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[0]) > 32
? 32 : MEM_ALIGN (operands[0])))
|| SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[1]) > 32
- ? 32
+ ? 32
: MEM_ALIGN (operands[1]))))
&& ! MEM_VOLATILE_P (operands [0])
&& ! MEM_VOLATILE_P (operands [1]))
regnum = REGNO (operands[1]);
else
regnum = -1;
-
+
/* If operands[1] is a register, on POWER it may have
double-precision data in it, so truncate it to single
precision. */
case DFmode:
case SFmode:
- if (CONSTANT_P (operands[1])
+ if (CONSTANT_P (operands[1])
&& ! easy_fp_constant (operands[1], mode))
operands[1] = force_const_mem (mode, operands[1]);
break;
-
+
case V16QImode:
case V8HImode:
case V4SFmode:
&& !easy_vector_constant (operands[1], mode))
operands[1] = force_const_mem (mode, operands[1]);
break;
-
+
case SImode:
case DImode:
/* Use default pattern for address of ELF small data */
if (TARGET_ELF
&& mode == Pmode
&& DEFAULT_ABI == ABI_V4
- && (GET_CODE (operands[1]) == SYMBOL_REF
+ && (GET_CODE (operands[1]) == SYMBOL_REF
|| GET_CODE (operands[1]) == CONST)
&& small_data_operand (operands[1], mode))
{
operands[1] = force_const_mem (mode, operands[1]);
- if (TARGET_TOC
+ if (TARGET_TOC
&& constant_pool_expr_p (XEXP (operands[1], 0))
&& ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (
get_pool_constant (XEXP (operands[1], 0)),
so we never return a PARALLEL. */
void
-init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
+init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
rtx libname ATTRIBUTE_UNUSED, int incoming,
int libcall, int n_named_args)
{
fprintf (stderr, " proto = %d, nargs = %d\n",
cum->prototype, cum->nargs_prototype);
}
-
- if (fntype
- && !TARGET_ALTIVEC
+
+ if (fntype
+ && !TARGET_ALTIVEC
&& TARGET_ALTIVEC_ABI
&& ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
{
}
/* If defined, a C expression that gives the alignment boundary, in bits,
- of an argument with the specified mode and type. If it is not defined,
+ of an argument with the specified mode and type. If it is not defined,
PARM_BOUNDARY is used for all arguments.
-
+
V.4 wants long longs to be double word aligned. */
int
itself. */
void
-function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
+function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
tree type, int named)
{
cum->nargs_prototype--;
" to enable them.");
/* PowerPC64 Linux and AIX allocate GPRs for a vector argument
- even if it is going to be passed in a vector register.
+ even if it is going to be passed in a vector register.
Darwin does the same for variable-argument functions. */
if ((DEFAULT_ABI == ABI_AIX && TARGET_64BIT)
|| (cum->stdarg && DEFAULT_ABI != ABI_V4))
if (stack)
{
int align;
-
+
/* Vector parameters must be 16-byte aligned. This places
them at 2 mod 4 in terms of words in 32-bit mode, since
the parameter save area starts at offset 24 from the
else
align = cum->words & 1;
cum->words += align + rs6000_arg_size (mode, type);
-
+
if (TARGET_DEBUG_ARG)
{
- fprintf (stderr, "function_adv: words = %2d, align=%d, ",
+ fprintf (stderr, "function_adv: words = %2d, align=%d, ",
cum->words, align);
fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
- cum->nargs_prototype, cum->prototype,
+ cum->nargs_prototype, cum->prototype,
GET_MODE_NAME (mode));
}
}
/* Determine where to put a SIMD argument on the SPE. */
static rtx
-rs6000_spe_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
+rs6000_spe_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
tree type)
{
if (cum->stdarg)
itself. */
struct rtx_def *
-function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
+function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
tree type, int named)
{
enum rs6000_abi abi = DEFAULT_ABI;
number of registers used by the first element of the PARALLEL. */
int
-function_arg_partial_nregs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
+function_arg_partial_nregs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
tree type, int named)
{
int ret = 0;
reference. */
static bool
-rs6000_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
- enum machine_mode mode ATTRIBUTE_UNUSED,
+rs6000_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
tree type, bool named ATTRIBUTE_UNUSED)
{
if ((DEFAULT_ABI == ABI_V4
if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
tem = NULL_RTX;
else
- tem = simplify_gen_subreg (reg_mode, x, BLKmode,
+ tem = simplify_gen_subreg (reg_mode, x, BLKmode,
i * GET_MODE_SIZE(reg_mode));
}
else
\f
/* Perform any needed actions needed for a function that is receiving a
- variable number of arguments.
+ variable number of arguments.
CUM is as above.
stack and set PRETEND_SIZE to the length of the registers pushed. */
static void
-setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
+setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
tree type, int *pretend_size ATTRIBUTE_UNUSED, int no_rtl)
{
CUMULATIVE_ARGS next_cum;
set_mem_alias_set (mem, set);
set_mem_align (mem, BITS_PER_WORD);
- rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
+ rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
GP_ARG_NUM_REG - first_reg_offset);
}
record = (*lang_hooks.types.make_type) (RECORD_TYPE);
type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
- f_gpr = build_decl (FIELD_DECL, get_identifier ("gpr"),
+ f_gpr = build_decl (FIELD_DECL, get_identifier ("gpr"),
unsigned_char_type_node);
- f_fpr = build_decl (FIELD_DECL, get_identifier ("fpr"),
+ f_fpr = build_decl (FIELD_DECL, get_identifier ("fpr"),
unsigned_char_type_node);
/* Give the two bytes of padding a name, so that -Wpadded won't warn on
every user file. */
{ MASK_ALTIVEC, CODE_FOR_altivec_vmsumshm, "__builtin_altivec_vmsumshm", ALTIVEC_BUILTIN_VMSUMSHM },
{ MASK_ALTIVEC, CODE_FOR_altivec_vmsumuhs, "__builtin_altivec_vmsumuhs", ALTIVEC_BUILTIN_VMSUMUHS },
{ MASK_ALTIVEC, CODE_FOR_altivec_vmsumshs, "__builtin_altivec_vmsumshs", ALTIVEC_BUILTIN_VMSUMSHS },
- { MASK_ALTIVEC, CODE_FOR_altivec_vnmsubfp, "__builtin_altivec_vnmsubfp", ALTIVEC_BUILTIN_VNMSUBFP },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vnmsubfp, "__builtin_altivec_vnmsubfp", ALTIVEC_BUILTIN_VNMSUBFP },
{ MASK_ALTIVEC, CODE_FOR_altivec_vperm_4sf, "__builtin_altivec_vperm_4sf", ALTIVEC_BUILTIN_VPERM_4SF },
{ MASK_ALTIVEC, CODE_FOR_altivec_vperm_4si, "__builtin_altivec_vperm_4si", ALTIVEC_BUILTIN_VPERM_4SI },
{ MASK_ALTIVEC, CODE_FOR_altivec_vperm_8hi, "__builtin_altivec_vperm_8hi", ALTIVEC_BUILTIN_VPERM_8HI },
}
static rtx
-altivec_expand_predicate_builtin (enum insn_code icode, const char *opcode,
+altivec_expand_predicate_builtin (enum insn_code icode, const char *opcode,
tree arglist, rtx target)
{
rtx pat, scratch;
|| ! (*insn_data[icode].operand[0].predicate) (target, tmode))
target = gen_reg_rtx (tmode);
- op1 = copy_to_mode_reg (mode1, op1);
+ op1 = copy_to_mode_reg (mode1, op1);
if (op0 == const0_rtx)
{
if (! (*insn_data[icode].operand[1].predicate) (op0, tmode))
op0 = copy_to_mode_reg (tmode, op0);
- op2 = copy_to_mode_reg (mode2, op2);
+ op2 = copy_to_mode_reg (mode2, op2);
if (op1 == const0_rtx)
{
/* Expand the stvx builtins. */
static rtx
-altivec_expand_st_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
+altivec_expand_st_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
bool *expandedp)
{
tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
/* Expand the dst builtins. */
static rtx
-altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
+altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
bool *expandedp)
{
tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
|| GET_MODE (target) != tmode
|| ! (*insn_data[icode].operand[0].predicate) (target, tmode))
target = gen_reg_rtx (tmode);
-
+
pat = GEN_FCN (icode) (target);
if (! pat)
return 0;
emit_insn (gen_altivec_dss (op0));
return NULL_RTX;
-
+
case ALTIVEC_BUILTIN_COMPILETIME_ERROR:
arg0 = TREE_VALUE (arglist);
while (TREE_CODE (arg0) == NOP_EXPR || TREE_CODE (arg0) == ADDR_EXPR)
|| GET_MODE (target) != tmode
|| ! (*insn_data[icode].operand[0].predicate) (target, tmode))
target = gen_reg_rtx (tmode);
-
+
pat = GEN_FCN (icode) (target);
if (! pat)
return 0;
static rtx
rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
- enum machine_mode mode ATTRIBUTE_UNUSED,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
int ignore ATTRIBUTE_UNUSED)
{
tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
size_t i;
rtx ret;
bool success;
-
+
if (TARGET_ALTIVEC)
{
ret = altivec_expand_builtin (exp, target, &success);
END is the builtin enum at which to end. */
static void
enable_mask_for_builtins (struct builtin_description *desc, int size,
- enum rs6000_builtins start,
+ enum rs6000_builtins start,
enum rs6000_builtins end)
{
int i;
opaque_V2SI_type_node));
/* Initialize irregular SPE builtins. */
-
+
def_builtin (target_flags, "__builtin_spe_mtspefscr", void_ftype_int, SPE_BUILTIN_MTSPEFSCR);
def_builtin (target_flags, "__builtin_spe_mfspefscr", int_ftype_void, SPE_BUILTIN_MFSPEFSCR);
def_builtin (target_flags, "__builtin_spe_evstddx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDDX);
default:
abort ();
}
-
+
def_builtin (dp->mask, dp->name, type, dp->code);
}
default:
abort ();
}
-
+
def_builtin (d->mask, d->name, type, d->code);
}
}
tree v2sf_ftype_v2sf
= build_function_type_list (opaque_V2SF_type_node,
opaque_V2SF_type_node, NULL_TREE);
-
+
tree v2sf_ftype_v2si
= build_function_type_list (opaque_V2SF_type_node,
opaque_V2SI_type_node, NULL_TREE);
= build_function_type_list (V4SF_type_node,
V4SF_type_node, V4SF_type_node,
V4SF_type_node, NULL_TREE);
- tree v4si_ftype_v4si_v4si_v4si
+ tree v4si_ftype_v4si_v4si_v4si
= build_function_type_list (V4SI_type_node,
V4SI_type_node, V4SI_type_node,
V4SI_type_node, NULL_TREE);
d = (struct builtin_description *) bdesc_3arg;
for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
{
-
+
enum machine_mode mode0, mode1, mode2, mode3;
tree type;
if (d->name == 0 || d->icode == CODE_FOR_nothing)
continue;
-
+
mode0 = insn_data[d->icode].operand[0].mode;
mode1 = insn_data[d->icode].operand[1].mode;
mode2 = insn_data[d->icode].operand[2].mode;
mode3 = insn_data[d->icode].operand[3].mode;
-
+
/* When all four are of the same mode. */
if (mode0 == mode1 && mode1 == mode2 && mode2 == mode3)
{
break;
case V8HImode:
type = v8hi_ftype_v8hi_v8hi_v8hi;
- break;
+ break;
case V16QImode:
type = v16qi_ftype_v16qi_v16qi_v16qi;
- break;
+ break;
default:
- abort();
+ abort();
}
}
else if (mode0 == mode1 && mode1 == mode2 && mode3 == V16QImode)
break;
case V8HImode:
type = v8hi_ftype_v8hi_v8hi_v16qi;
- break;
+ break;
case V16QImode:
type = v16qi_ftype_v16qi_v16qi_v16qi;
- break;
+ break;
default:
- abort();
+ abort();
}
}
- else if (mode0 == V4SImode && mode1 == V16QImode && mode2 == V16QImode
+ else if (mode0 == V4SImode && mode1 == V16QImode && mode2 == V16QImode
&& mode3 == V4SImode)
type = v4si_ftype_v16qi_v16qi_v4si;
- else if (mode0 == V4SImode && mode1 == V8HImode && mode2 == V8HImode
+ else if (mode0 == V4SImode && mode1 == V8HImode && mode2 == V8HImode
&& mode3 == V4SImode)
type = v4si_ftype_v8hi_v8hi_v4si;
- else if (mode0 == V4SFmode && mode1 == V4SFmode && mode2 == V4SFmode
+ else if (mode0 == V4SFmode && mode1 == V4SFmode && mode2 == V4SFmode
&& mode3 == V4SImode)
type = v4sf_ftype_v4sf_v4sf_v4si;
if (d->name == 0 || d->icode == CODE_FOR_nothing)
continue;
-
+
mode0 = insn_data[d->icode].operand[0].mode;
mode1 = insn_data[d->icode].operand[1].mode;
mode2 = insn_data[d->icode].operand[2].mode;
/* vint, vshort, vint. */
else if (mode0 == V4SImode && mode1 == V8HImode && mode2 == V4SImode)
type = v4si_ftype_v8hi_v4si;
-
+
/* vint, vint, 5 bit literal. */
else if (mode0 == V4SImode && mode1 == V4SImode && mode2 == QImode)
type = v4si_ftype_v4si_int;
-
+
/* vshort, vshort, 5 bit literal. */
else if (mode0 == V8HImode && mode1 == V8HImode && mode2 == QImode)
type = v8hi_ftype_v8hi_int;
-
+
/* vchar, vchar, 5 bit literal. */
else if (mode0 == V16QImode && mode1 == V16QImode && mode2 == QImode)
type = v16qi_ftype_v16qi_int;
/* vfloat, vint, 5 bit literal. */
else if (mode0 == V4SFmode && mode1 == V4SImode && mode2 == QImode)
type = v4sf_ftype_v4si_int;
-
+
/* vint, vfloat, 5 bit literal. */
else if (mode0 == V4SImode && mode1 == V4SFmode && mode2 == QImode)
type = v4si_ftype_v4sf_int;
if (d->name == 0 || d->icode == CODE_FOR_nothing)
continue;
-
+
mode0 = insn_data[d->icode].operand[0].mode;
mode1 = insn_data[d->icode].operand[1].mode;
rtx (*mov) (rtx, rtx);
enum machine_mode mode = BLKmode;
rtx dest;
-
+
if (bytes >= 8 && TARGET_POWERPC64
/* 64-bit loads and stores require word-aligned
displacements. */
mode = QImode;
mov = gen_movqi;
}
-
+
dest = adjust_address (orig_dest, mode, offset);
-
+
emit_insn ((*mov) (dest, const0_rtx));
}
return 1;
/* store_one_arg depends on expand_block_move to handle at least the size of
- reg_parm_stack_space. */
+ reg_parm_stack_space. */
if (bytes > (TARGET_POWERPC64 ? 64 : 32))
return 0;
} gen_func;
enum machine_mode mode = BLKmode;
rtx src, dest;
-
+
if (TARGET_STRING
&& bytes > 24 /* move up to 32 bytes at a time */
&& ! fixed_regs[5]
mode = QImode;
gen_func.mov = gen_movqi;
}
-
+
src = adjust_address (orig_src, mode, offset);
dest = adjust_address (orig_dest, mode, offset);
-
- if (mode != BLKmode)
+
+ if (mode != BLKmode)
{
rtx tmp_reg = gen_reg_rtx (mode);
-
+
emit_insn ((*gen_func.mov) (tmp_reg, src));
stores[num_reg++] = (*gen_func.mov) (dest, tmp_reg);
}
src = replace_equiv_address (src, src_reg);
}
set_mem_size (src, GEN_INT (move_bytes));
-
+
if (!REG_P (XEXP (dest, 0)))
{
rtx dest_reg = copy_addr_to_reg (XEXP (dest, 0));
dest = replace_equiv_address (dest, dest_reg);
}
set_mem_size (dest, GEN_INT (move_bytes));
-
+
emit_insn ((*gen_func.movmemsi) (dest, src,
GEN_INT (move_bytes & 31),
align_rtx));
|| XVECLEN (SET_SRC (XVECEXP (op, 0, 0)), 0) != 2)
return 0;
src_reg = XVECEXP (SET_SRC (XVECEXP (op, 0, 0)), 0, 0);
-
+
if (GET_CODE (src_reg) != REG
|| GET_MODE (src_reg) != SImode
|| ! INT_REGNO_P (REGNO (src_reg)))
rtx exp = XVECEXP (op, 0, i);
rtx unspec;
int maskval;
-
+
if (GET_CODE (exp) != SET
|| GET_CODE (SET_DEST (exp)) != REG
|| GET_MODE (SET_DEST (exp)) != CCmode
return 0;
unspec = SET_SRC (exp);
maskval = 1 << (MAX_CR_REGNO - REGNO (SET_DEST (exp)));
-
+
if (GET_CODE (unspec) != UNSPEC
|| XINT (unspec, 1) != UNSPEC_MOVESI_TO_CR
|| XVECLEN (unspec, 0) != 2
|| code == UNGT || code == UNLT
|| code == UNGE || code == UNLE))
abort ();
-
- /* These should never be generated except for
+
+ /* These should never be generated except for
flag_finite_math_only. */
if (mode == CCFPmode
&& ! flag_finite_math_only
abort ();
/* These are invalid; the information is not there. */
- if (mode == CCEQmode
+ if (mode == CCEQmode
&& code != EQ && code != NE)
abort ();
}
registers_ok_for_quad_peep (rtx reg1, rtx reg2)
{
/* We might have been passed a SUBREG. */
- if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
+ if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
return 0;
-
+
/* We might have been passed non floating point registers. */
if (!FP_REGNO_P (REGNO (reg1))
|| !FP_REGNO_P (REGNO (reg2)))
/* The mems cannot be volatile. */
if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
return 0;
-
+
addr1 = XEXP (mem1, 0);
addr2 = XEXP (mem2, 0);
}
/* Make sure the second address is a (mem (plus (reg) (const_int)))
- or if it is (mem (reg)) then make sure that offset1 is -8 and the same
+ or if it is (mem (reg)) then make sure that offset1 is -8 and the same
register as addr1. */
if (offset1 == -8 && GET_CODE (addr2) == REG && reg1 == REGNO (addr2))
return 1;
NO_REGS is returned. */
enum reg_class
-secondary_reload_class (enum reg_class class,
+secondary_reload_class (enum reg_class class,
enum machine_mode mode ATTRIBUTE_UNUSED,
rtx in)
{
other than BASE_REGS for TARGET_ELF. So indicate that a
register from BASE_REGS is needed as an intermediate
register.
-
+
On Darwin, pic addresses require a load from memory, which
needs a base register. */
if (class != BASE_REGS
}
\f
/* Given a comparison operation, return the bit number in CCR to test. We
- know this is a valid comparison.
+ know this is a valid comparison.
SCC_P is 1 if this is for an scc. That means that %D will have been
used instead of %C, so the bits will be in different places.
if (scc_p && code != EQ && code != GT && code != LT && code != UNORDERED
&& code != GTU && code != LTU)
abort ();
-
+
switch (code)
{
case NE:
reg_names[SMALL_DATA_REG]);
}
return;
-
+
case 'm':
/* MB value for a mask operand. */
if (! mask_operand (x, SImode))
else
s = t[1];
}
-
+
fputs (s, file);
}
return;
if (! INT_P (x))
output_operand_lossage ("invalid %%u value");
else
- fprintf (file, HOST_WIDE_INT_PRINT_HEX,
+ fprintf (file, HOST_WIDE_INT_PRINT_HEX,
(INT_LOWPART (x) >> 16) & 0xffff);
return;
/* If constant, low-order 16 bits of constant, signed. Otherwise, write
normally. */
if (INT_P (x))
- fprintf (file, HOST_WIDE_INT_PRINT_DEC,
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC,
((INT_LOWPART (x) & 0xffff) ^ 0x8000) - 0x8000);
else
print_operand (file, x, 0);
reg_names[SMALL_DATA_REG]);
}
return;
-
+
case 'z':
/* X is a SYMBOL_REF. Write out the name preceded by a
period and without any trailing data in brackets. Used for function
abort ();
break;
}
-
+
case 0:
if (GET_CODE (x) == REG)
fprintf (file, "%s", reg_names[REGNO (x)]);
rtx contains_minus = XEXP (x, 1);
rtx minus, symref;
const char *name;
-
+
/* Find the (minus (sym) (toc)) buried in X, and temporarily
turn it into (sym) for output_addr_const. */
while (GET_CODE (XEXP (contains_minus, 0)) != MINUS)
{
extern int in_toc_section (void);
static int recurse = 0;
-
+
/* For -mrelocatable, we mark all addresses that need to be fixed up
in the .fixup section. */
if (TARGET_RELOCATABLE
{
/* Reversal of FP compares takes care -- an ordered compare
becomes an unordered compare and vice versa. */
- if (mode == CCFPmode
+ if (mode == CCFPmode
&& (!flag_finite_math_only
|| code == UNLT || code == UNLE || code == UNGT || code == UNGE
|| code == UNEQ || code == LTGT))
else
emit_insn (gen_rtx_SET (VOIDmode, compare_result,
gen_rtx_COMPARE (comp_mode,
- rs6000_compare_op0,
+ rs6000_compare_op0,
rs6000_compare_op1)));
-
+
/* Some kinds of FP comparisons need an OR operation;
under flag_finite_math_only we don't bother. */
if (rs6000_compare_fp_p
enum rtx_code or1, or2;
rtx or1_rtx, or2_rtx, compare2_rtx;
rtx or_result = gen_reg_rtx (CCEQmode);
-
+
switch (code)
{
case LE: or1 = LT; or2 = EQ; break;
}
validate_condition_mode (code, GET_MODE (compare_result));
-
+
return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
}
rtx not_result = gen_reg_rtx (CCEQmode);
rtx not_op, rev_cond_rtx;
enum machine_mode cc_mode;
-
+
cc_mode = GET_MODE (XEXP (condition_rtx, 0));
rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
/* Return the string to output a conditional branch to LABEL, which is
the operand number of the label, or -1 if the branch is really a
- conditional return.
+ conditional return.
OP is the conditional expression. XEXP (OP, 0) is assumed to be a
condition code register and its mode specifies what kind of
ccode = "ne"; break;
case EQ: case UNEQ:
ccode = "eq"; break;
- case GE: case GEU:
+ case GE: case GEU:
ccode = "ge"; break;
- case GT: case GTU: case UNGT:
+ case GT: case GTU: case UNGT:
ccode = "gt"; break;
- case LE: case LEU:
+ case LE: case LEU:
ccode = "le"; break;
- case LT: case LTU: case UNLT:
+ case LT: case LTU: case UNLT:
ccode = "lt"; break;
case UNORDERED: ccode = "un"; break;
case ORDERED: ccode = "nu"; break;
default:
abort ();
}
-
- /* Maybe we have a guess as to how likely the branch is.
+
+ /* Maybe we have a guess as to how likely the branch is.
The old mnemonics don't have a way to specify this information. */
pred = "";
note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
prediction. For older cpus we may as well always hint, but
assume not taken for branches that are very close to 50% as a
mispredicted taken branch is more expensive than a
- mispredicted not-taken branch. */
+ mispredicted not-taken branch. */
if (rs6000_always_hint
|| abs (prob) > REG_BR_PROB_BASE / 100 * 48)
{
it'll probably be faster to use a branch here too. */
if (code == UNEQ && HONOR_NANS (compare_mode))
return 0;
-
+
if (GET_CODE (op1) == CONST_DOUBLE)
REAL_VALUE_FROM_CONST_DOUBLE (c1, op1);
-
+
/* We're going to try to implement comparisons by performing
a subtract, then comparing against zero. Unfortunately,
Inf - Inf is NaN which is not zero, and so if we don't
&& (GET_CODE (op1) != CONST_DOUBLE || real_isinf (&c1))
/* Constructs of the form (a OP b ? a : b) are safe. */
&& ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
- || (! rtx_equal_p (op0, true_cond)
+ || (! rtx_equal_p (op0, true_cond)
&& ! rtx_equal_p (op1, true_cond))))
return 0;
/* At this point we know we can use fsel. */
case EQ:
temp = gen_reg_rtx (compare_mode);
- emit_insn (gen_rtx_SET (VOIDmode, temp,
+ emit_insn (gen_rtx_SET (VOIDmode, temp,
gen_rtx_NEG (compare_mode,
gen_rtx_ABS (compare_mode, op0))));
op0 = temp;
/* a GT 0 <-> (a GE 0 && -a UNLT 0) */
temp = gen_reg_rtx (result_mode);
emit_insn (gen_rtx_SET (VOIDmode, temp,
- gen_rtx_IF_THEN_ELSE (result_mode,
+ gen_rtx_IF_THEN_ELSE (result_mode,
gen_rtx_GE (VOIDmode,
op0, op1),
true_cond, false_cond)));
c = GEU;
if (code == SMAX || code == UMAX)
- target = emit_conditional_move (dest, c, op0, op1, mode,
+ target = emit_conditional_move (dest, c, op0, op1, mode,
op0, op1, mode, 0);
else
- target = emit_conditional_move (dest, c, op0, op1, mode,
+ target = emit_conditional_move (dest, c, op0, op1, mode,
op1, op0, mode, 0);
if (target == NULL_RTX)
abort ();
else
reg_mode = word_mode;
reg_mode_size = GET_MODE_SIZE (reg_mode);
-
+
if (reg_mode_size * nregs != GET_MODE_SIZE (mode))
abort ();
-
+
if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
{
/* Move register range backwards, if we might have destructive
overlap. */
int i;
for (i = nregs - 1; i >= 0; i--)
- emit_insn (gen_rtx_SET (VOIDmode,
+ emit_insn (gen_rtx_SET (VOIDmode,
simplify_gen_subreg (reg_mode, dst, mode,
i * reg_mode_size),
simplify_gen_subreg (reg_mode, src, mode,
{
rtx delta_rtx;
breg = XEXP (XEXP (src, 0), 0);
- delta_rtx = GET_CODE (XEXP (src, 0)) == PRE_INC
- ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
- : GEN_INT (-GET_MODE_SIZE (GET_MODE (src)));
+ delta_rtx = GET_CODE (XEXP (src, 0)) == PRE_INC
+ ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
+ : GEN_INT (-GET_MODE_SIZE (GET_MODE (src)));
emit_insn (TARGET_32BIT
? gen_addsi3 (breg, breg, delta_rtx)
: gen_adddi3 (breg, breg, delta_rtx));
}
/* We have now address involving an base register only.
- If we use one of the registers to address memory,
+ If we use one of the registers to address memory,
we have change that register last. */
breg = (GET_CODE (XEXP (src, 0)) == PLUS
if (!REG_P (breg))
abort();
- if (REGNO (breg) >= REGNO (dst)
+ if (REGNO (breg) >= REGNO (dst)
&& REGNO (breg) < REGNO (dst) + nregs)
j = REGNO (breg) - REGNO (dst);
}
{
rtx delta_rtx;
breg = XEXP (XEXP (dst, 0), 0);
- delta_rtx = GET_CODE (XEXP (dst, 0)) == PRE_INC
- ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
- : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst)));
+ delta_rtx = GET_CODE (XEXP (dst, 0)) == PRE_INC
+ ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
+ : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst)));
/* We have to update the breg before doing the store.
Use store with update, if available. */
}
for (i = 0; i < nregs; i++)
- {
+ {
/* Calculate index to next subword. */
++j;
- if (j == nregs)
+ if (j == nregs)
j = 0;
- /* If compiler already emited move of first word by
+ /* If compiler already emited move of first word by
store with update, no need to do anything. */
if (j == 0 && used_update)
continue;
-
+
emit_insn (gen_rtx_SET (VOIDmode,
simplify_gen_subreg (reg_mode, dst, mode,
j * reg_mode_size),
/* Find lowest numbered live register. */
for (first_reg = 13; first_reg <= 31; first_reg++)
- if (regs_ever_live[first_reg]
+ if (regs_ever_live[first_reg]
&& (! call_used_regs[first_reg]
|| (first_reg == RS6000_PIC_OFFSET_TABLE_REGNUM
&& ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
&& info_ptr->first_gp_reg_save == FIRST_SAVED_GP_REGNO
&& info_ptr->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
&& info_ptr->cr_save_p;
-
+
/* This will not work in conjunction with sibcalls. Make sure there
are none. (This check is expensive, but seldom executed.) */
if ( info_ptr->world_save_p )
- {
+ {
rtx insn;
for ( insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
if ( GET_CODE (insn) == CALL_INSN
&& SIBLING_CALL_P (insn))
- {
+ {
info_ptr->world_save_p = 0;
break;
}
}
-
+
if (info_ptr->world_save_p)
{
/* Even if we're not touching VRsave, make sure there's room on the
/* Because the Darwin register save/restore routines only handle
F14 .. F31 and V20 .. V31 as per the ABI, perform a consistancy
check and abort if there's something worng. */
- if (info_ptr->first_fp_reg_save < FIRST_SAVED_FP_REGNO
+ if (info_ptr->first_fp_reg_save < FIRST_SAVED_FP_REGNO
|| info_ptr->first_altivec_reg_save < FIRST_SAVED_ALTIVEC_REGNO)
abort ();
}
- return;
+ return;
}
| Parameter save area (P) | 8
+---------------------------------------+
| Alloca space (A) | 8+P
- +---------------------------------------+
+ +---------------------------------------+
| Varargs save area (V) | 8+P+A
- +---------------------------------------+
+ +---------------------------------------+
| Local variable space (L) | 8+P+A+V
- +---------------------------------------+
+ +---------------------------------------+
| Float/int conversion temporary (X) | 8+P+A+V+L
+---------------------------------------+
| Save area for AltiVec registers (W) | 8+P+A+V+L+X
| SPE alignment padding |
+---------------------------------------+
| saved CR (C) | 8+P+A+V+L+X+W+Y+Z
- +---------------------------------------+
+ +---------------------------------------+
| Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
- +---------------------------------------+
+ +---------------------------------------+
| Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
+---------------------------------------+
old SP->| back chain to caller's caller |
/* Calculate which registers need to be saved & save area size. */
info_ptr->first_gp_reg_save = first_reg_to_save ();
- /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
+ /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
even if it currently looks like we won't. */
if (((TARGET_TOC && TARGET_MINIMAL_TOC)
|| (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
}
/* Determine if we need to save the condition code registers. */
- if (regs_ever_live[CR2_REGNO]
+ if (regs_ever_live[CR2_REGNO]
|| regs_ever_live[CR3_REGNO]
|| regs_ever_live[CR4_REGNO])
{
When we're called from the epilogue, we need to avoid counting
this as a store. */
-
+
push_topmost_sequence ();
top = get_insns ();
pop_topmost_sequence ();
{
if (FIND_REG_INC_NOTE (insn, reg))
return 1;
- else if (GET_CODE (insn) == CALL_INSN
+ else if (GET_CODE (insn) == CALL_INSN
&& !SIBLING_CALL_P (insn))
return 1;
else if (set_of (reg, insn) != NULL_RTX
static GTY(()) int set = -1;
-int
+int
get_TOC_alias_set (void)
{
if (set == -1)
set = new_alias_set ();
return set;
-}
+}
/* This returns nonzero if the current function uses the TOC. This is
determined by the presence of (use (unspec ... UNSPEC_TOC)), which
is generated by the ABI_V4 load_toc_* patterns. */
#if TARGET_ELF
static int
-uses_TOC (void)
+uses_TOC (void)
{
rtx insn;
rtx pat = PATTERN (insn);
int i;
- if (GET_CODE (pat) == PARALLEL)
+ if (GET_CODE (pat) == PARALLEL)
for (i = 0; i < XVECLEN (pat, 0); i++)
{
rtx sub = XVECEXP (pat, 0, i);
#endif
rtx
-create_TOC_reference (rtx symbol)
+create_TOC_reference (rtx symbol)
{
- return gen_rtx_PLUS (Pmode,
+ return gen_rtx_PLUS (Pmode,
gen_rtx_REG (Pmode, TOC_REGISTER),
- gen_rtx_CONST (Pmode,
- gen_rtx_MINUS (Pmode, symbol,
+ gen_rtx_CONST (Pmode,
+ gen_rtx_MINUS (Pmode, symbol,
gen_rtx_SYMBOL_REF (Pmode, toc_label_name))));
}
if (current_function_limit_stack)
{
if (REG_P (stack_limit_rtx)
- && REGNO (stack_limit_rtx) > 1
+ && REGNO (stack_limit_rtx) > 1
&& REGNO (stack_limit_rtx) <= 31)
{
emit_insn (TARGET_32BIT
&& DEFAULT_ABI == ABI_V4)
{
rtx toload = gen_rtx_CONST (VOIDmode,
- gen_rtx_PLUS (Pmode,
- stack_limit_rtx,
+ gen_rtx_PLUS (Pmode,
+ stack_limit_rtx,
GEN_INT (size)));
emit_insn (gen_elf_high (tmp_reg, toload));
insn = emit_insn (TARGET_32BIT
? gen_movsi_update (stack_reg, stack_reg,
todec, stack_reg)
- : gen_movdi_update (stack_reg, stack_reg,
+ : gen_movdi_update (stack_reg, stack_reg,
todec, stack_reg));
}
else
emit_move_insn (gen_rtx_MEM (Pmode, stack_reg),
gen_rtx_REG (Pmode, 12));
}
-
+
RTX_FRAME_RELATED_P (insn) = 1;
- REG_NOTES (insn) =
+ REG_NOTES (insn) =
gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
- gen_rtx_SET (VOIDmode, stack_reg,
+ gen_rtx_SET (VOIDmode, stack_reg,
gen_rtx_PLUS (Pmode, stack_reg,
GEN_INT (-size))),
REG_NOTES (insn));
its hand so much. */
static void
-rs6000_frame_related (rtx insn, rtx reg, HOST_WIDE_INT val,
+rs6000_frame_related (rtx insn, rtx reg, HOST_WIDE_INT val,
rtx reg2, rtx rreg)
{
rtx real, temp;
if (reg2 != NULL_RTX)
real = replace_rtx (real, reg2, rreg);
-
- real = replace_rtx (real, reg,
+
+ real = replace_rtx (real, reg,
gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode,
STACK_POINTER_REGNUM),
GEN_INT (val)));
-
+
/* We expect that 'real' is either a SET or a PARALLEL containing
SETs (and possibly other stuff). In a PARALLEL, all the SETs
are important so they all have to be marked RTX_FRAME_RELATED_P. */
if (GET_CODE (real) == SET)
{
rtx set = real;
-
+
temp = simplify_rtx (SET_SRC (set));
if (temp)
SET_SRC (set) = temp;
if (GET_CODE (XVECEXP (real, 0, i)) == SET)
{
rtx set = XVECEXP (real, 0, i);
-
+
temp = simplify_rtx (SET_SRC (set));
if (temp)
SET_SRC (set) = temp;
Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
static void
-emit_frame_save (rtx frame_reg, rtx frame_ptr, enum machine_mode mode,
+emit_frame_save (rtx frame_reg, rtx frame_ptr, enum machine_mode mode,
unsigned int regno, int offset, HOST_WIDE_INT total_size)
{
rtx reg, offset_rtx, insn, mem, addr, int_rtx;
int saving_FPRs_inline;
int using_store_multiple;
HOST_WIDE_INT sp_offset = 0;
-
+
if (TARGET_FIX_AND_CONTINUE)
{
/* gdb on darwin arranges to forward a function from the old
sp_offset = info->total_size;
else
frame_reg_rtx = frame_ptr_rtx;
- rs6000_emit_allocate_stack (info->total_size,
+ rs6000_emit_allocate_stack (info->total_size,
(frame_reg_rtx != sp_reg_rtx
&& (info->cr_save_p
|| info->lr_save_p
/* AltiVec addressing mode is [reg+reg]. */
mem = gen_rtx_MEM (V4SImode,
gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
-
+
set_mem_alias_set (mem, rs6000_sr_alias_set);
insn = emit_move_insn (mem, savereg);
if (! info->world_save_p && info->cr_save_p && frame_reg_rtx != frame_ptr_rtx)
{
rtx set;
-
+
cr_save_rtx = gen_rtx_REG (SImode, 12);
insn = emit_insn (gen_movesi_from_cr (cr_save_rtx));
RTX_FRAME_RELATED_P (insn) = 1;
{
int i;
for (i = 0; i < 64 - info->first_fp_reg_save; i++)
- if ((regs_ever_live[info->first_fp_reg_save+i]
+ if ((regs_ever_live[info->first_fp_reg_save+i]
&& ! call_used_regs[info->first_fp_reg_save+i]))
emit_frame_save (frame_reg_rtx, frame_ptr_rtx, DFmode,
info->first_fp_reg_save + i,
const char *alloc_rname;
rtvec p;
p = rtvec_alloc (2 + 64 - info->first_fp_reg_save);
-
- RTVEC_ELT (p, 0) = gen_rtx_CLOBBER (VOIDmode,
- gen_rtx_REG (Pmode,
+
+ RTVEC_ELT (p, 0) = gen_rtx_CLOBBER (VOIDmode,
+ gen_rtx_REG (Pmode,
LINK_REGISTER_REGNUM));
sprintf (rname, "%s%d%s", SAVE_FP_PREFIX,
info->first_fp_reg_save - 32, SAVE_FP_SUFFIX);
rtx addr, reg, mem;
reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
- GEN_INT (info->fp_save_offset
+ GEN_INT (info->fp_save_offset
+ sp_offset + 8*i));
mem = gen_rtx_MEM (DFmode, addr);
set_mem_alias_set (mem, rs6000_sr_alias_set);