/* Return nonzero if there is a bypass for the output of
OUT_INSN and the fp store IN_INSN. */
int
-hppa_fpstore_bypass_p (rtx out_insn, rtx in_insn)
+pa_fpstore_bypass_p (rtx out_insn, rtx in_insn)
{
enum machine_mode store_mode;
enum machine_mode other_mode;
static int hppa_register_move_cost (enum machine_mode mode, reg_class_t,
reg_class_t);
static int hppa_address_cost (rtx, bool);
-static bool hppa_rtx_costs (rtx, int, int, int *, bool);
+static bool hppa_rtx_costs (rtx, int, int, int, int *, bool);
static inline rtx force_mode (enum machine_mode, rtx);
static void pa_reorg (void);
static void pa_combine_instructions (void);
static int pa_can_combine_p (rtx, rtx, rtx, int, rtx, rtx, rtx);
static bool forward_branch_p (rtx);
static void compute_zdepwi_operands (unsigned HOST_WIDE_INT, unsigned *);
+static void compute_zdepdi_operands (unsigned HOST_WIDE_INT, unsigned *);
static int compute_movmem_length (rtx);
static int compute_clrmem_length (rtx);
static bool pa_assemble_integer (rtx, unsigned int, int);
#ifdef ASM_OUTPUT_EXTERNAL_REAL
static void pa_hpux_file_end (void);
#endif
-#if HPUX_LONG_DOUBLE_LIBRARY
-static void pa_hpux_init_libfuncs (void);
-#endif
+static void pa_init_libfuncs (void);
static rtx pa_struct_value_rtx (tree, int);
static bool pa_pass_by_reference (cumulative_args_t, enum machine_mode,
const_tree, bool);
#undef TARGET_MACHINE_DEPENDENT_REORG
#define TARGET_MACHINE_DEPENDENT_REORG pa_reorg
-#if HPUX_LONG_DOUBLE_LIBRARY
#undef TARGET_INIT_LIBFUNCS
-#define TARGET_INIT_LIBFUNCS pa_hpux_init_libfuncs
-#endif
+#define TARGET_INIT_LIBFUNCS pa_init_libfuncs
#undef TARGET_PROMOTE_FUNCTION_MODE
#define TARGET_PROMOTE_FUNCTION_MODE pa_promote_function_mode
pa_init_builtins (void)
{
#ifdef DONT_HAVE_FPUTC_UNLOCKED
- built_in_decls[(int) BUILT_IN_FPUTC_UNLOCKED] =
- built_in_decls[(int) BUILT_IN_PUTC_UNLOCKED];
- implicit_built_in_decls[(int) BUILT_IN_FPUTC_UNLOCKED]
- = implicit_built_in_decls[(int) BUILT_IN_PUTC_UNLOCKED];
+ {
+ tree decl = builtin_decl_explicit (BUILT_IN_PUTC_UNLOCKED);
+ set_builtin_decl (BUILT_IN_FPUTC_UNLOCKED, decl,
+ builtin_decl_implicit_p (BUILT_IN_PUTC_UNLOCKED));
+ }
#endif
#if TARGET_HPUX_11
- if (built_in_decls [BUILT_IN_FINITE])
- set_user_assembler_name (built_in_decls [BUILT_IN_FINITE], "_Isfinite");
- if (built_in_decls [BUILT_IN_FINITEF])
- set_user_assembler_name (built_in_decls [BUILT_IN_FINITEF], "_Isfinitef");
+ {
+ tree decl;
+
+ if ((decl = builtin_decl_explicit (BUILT_IN_FINITE)) != NULL_TREE)
+ set_user_assembler_name (decl, "_Isfinite");
+ if ((decl = builtin_decl_explicit (BUILT_IN_FINITEF)) != NULL_TREE)
+ set_user_assembler_name (decl, "_Isfinitef");
+ }
#endif
if (HPUX_LONG_DOUBLE_LIBRARY)
expressions will have one of a few well defined forms, so
we need only check those forms. */
int
-symbolic_expression_p (rtx x)
+pa_symbolic_expression_p (rtx x)
{
/* Strip off any HIGH. */
/* Accept any constant that can be moved in one instruction into a
general register. */
int
-cint_ok_for_move (HOST_WIDE_INT ival)
+pa_cint_ok_for_move (HOST_WIDE_INT ival)
{
/* OK if ldo, ldil, or zdepi, can be used. */
return (VAL_14_BITS_P (ival)
- || ldil_cint_p (ival)
- || zdepi_cint_p (ival));
+ || pa_ldil_cint_p (ival)
+ || pa_zdepi_cint_p (ival));
}
\f
/* True iff ldil can be used to load this CONST_INT. The least
significant 11 bits of the value must be zero and the value must
not change sign when extended from 32 to 64 bits. */
int
-ldil_cint_p (HOST_WIDE_INT ival)
+pa_ldil_cint_p (HOST_WIDE_INT ival)
{
HOST_WIDE_INT x = ival & (((HOST_WIDE_INT) -1 << 31) | 0x7ff);
zdepi first sign extends a 5-bit signed number to a given field
length, then places this field anywhere in a zero. */
int
-zdepi_cint_p (unsigned HOST_WIDE_INT x)
+pa_zdepi_cint_p (unsigned HOST_WIDE_INT x)
{
unsigned HOST_WIDE_INT lsb_mask, t;
1....10....0
1..10..01..1 */
int
-and_mask_p (unsigned HOST_WIDE_INT mask)
+pa_and_mask_p (unsigned HOST_WIDE_INT mask)
{
mask = ~mask;
mask += mask & -mask;
/* True iff depi can be used to compute (reg | MASK). */
int
-ior_mask_p (unsigned HOST_WIDE_INT mask)
+pa_ior_mask_p (unsigned HOST_WIDE_INT mask)
{
mask += mask & -mask;
return (mask & (mask - 1)) == 0;
position-independent addresses go to REG. If we need more
than one register, we lose. */
-rtx
+static rtx
legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
{
rtx pic_ref = orig;
if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT
&& GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
- && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1)))
+ && pa_shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1)))
&& (OBJECT_P (XEXP (x, 1))
|| GET_CODE (XEXP (x, 1)) == SUBREG)
&& GET_CODE (XEXP (x, 1)) != CONST)
&& GET_CODE (XEXP (x, 0)) == PLUS
&& GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
&& GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
- && shadd_constant_p (INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)))
+ && pa_shadd_constant_p (INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)))
&& (mode == SFmode || mode == DFmode))
{
idx = NULL_RTX;
/* Make sure they're both regs. If one was a SYMBOL_REF [+ const],
- then emit_move_sequence will turn on REG_POINTER so we'll know
+ then pa_emit_move_sequence will turn on REG_POINTER so we'll know
it's a base register below. */
if (GET_CODE (reg1) != REG)
reg1 = force_reg (Pmode, force_operand (reg1, 0));
do the optimization for floatint point modes. */
if (GET_CODE (x) == PLUS
- && symbolic_expression_p (XEXP (x, 1)))
+ && pa_symbolic_expression_p (XEXP (x, 1)))
{
/* Ugly. We modify things here so that the address offset specified
by the index expression is computed first, then added to x to form
&& INTVAL (XEXP (y, 1)) >= -4096
&& INTVAL (XEXP (y, 1)) <= 4095
&& GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
- && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
+ && pa_shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
{
int val = INTVAL (XEXP (XEXP (x, 0), 1));
rtx reg1, reg2;
&& GET_CODE (XEXP (y, 1)) == CONST_INT
&& INTVAL (XEXP (y, 1)) % INTVAL (XEXP (XEXP (x, 0), 1)) == 0
&& GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
- && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
+ && pa_shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
{
regx1
= force_reg (Pmode, GEN_INT (INTVAL (XEXP (y, 1))
scanned. In either case, *TOTAL contains the cost result. */
static bool
-hppa_rtx_costs (rtx x, int code, int outer_code, int *total,
- bool speed ATTRIBUTE_UNUSED)
+hppa_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
+ int *total, bool speed ATTRIBUTE_UNUSED)
{
switch (code)
{
of SCRATCH_REG in the proper mode. */
int
-emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch_reg)
+pa_emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch_reg)
{
register rtx operand0 = operands[0];
register rtx operand1 = operands[1];
const_mem = force_const_mem (mode, operand1);
xoperands[0] = scratch_reg;
xoperands[1] = XEXP (const_mem, 0);
- emit_move_sequence (xoperands, Pmode, 0);
+ pa_emit_move_sequence (xoperands, Pmode, 0);
/* Now load the destination register. */
emit_insn (gen_rtx_SET (mode, operand0,
/* Handle the most common case: storing into a register. */
else if (register_operand (operand0, mode))
{
+ /* Legitimize TLS symbol references. This happens for references
+ that aren't a legitimate constant. */
+ if (PA_SYMBOL_REF_TLS_P (operand1))
+ operand1 = legitimize_tls_address (operand1);
+
if (register_operand (operand1, mode)
|| (GET_CODE (operand1) == CONST_INT
- && cint_ok_for_move (INTVAL (operand1)))
+ && pa_cint_ok_for_move (INTVAL (operand1)))
|| (operand1 == CONST0_RTX (mode))
|| (GET_CODE (operand1) == HIGH
&& !symbolic_operand (XEXP (operand1, 0), VOIDmode))
/* Put the address of the memory location into our destination
register. */
operands[1] = temp;
- emit_move_sequence (operands, mode, scratch_reg);
+ pa_emit_move_sequence (operands, mode, scratch_reg);
/* Now load from the memory location into our destination
register. */
operands[1] = gen_rtx_MEM (Pmode, operands[0]);
- emit_move_sequence (operands, mode, scratch_reg);
+ pa_emit_move_sequence (operands, mode, scratch_reg);
/* And add back in the constant part. */
if (const_part != NULL_RTX)
operands[1] = legitimize_pic_address (XEXP (const_mem, 0),
mode, temp);
operands[1] = replace_equiv_address (const_mem, operands[1]);
- emit_move_sequence (operands, mode, temp);
+ pa_emit_move_sequence (operands, mode, temp);
}
else
{
operands[1] = tmp;
}
else if (GET_CODE (operand1) != CONST_INT
- || !cint_ok_for_move (INTVAL (operand1)))
+ || !pa_cint_ok_for_move (INTVAL (operand1)))
{
rtx insn, temp;
rtx op1 = operand1;
it will need a link/runtime reloc). */
int
-reloc_needed (tree exp)
+pa_reloc_needed (tree exp)
{
int reloc = 0;
case POINTER_PLUS_EXPR:
case PLUS_EXPR:
case MINUS_EXPR:
- reloc = reloc_needed (TREE_OPERAND (exp, 0));
- reloc |= reloc_needed (TREE_OPERAND (exp, 1));
+ reloc = pa_reloc_needed (TREE_OPERAND (exp, 0));
+ reloc |= pa_reloc_needed (TREE_OPERAND (exp, 1));
break;
CASE_CONVERT:
case NON_LVALUE_EXPR:
- reloc = reloc_needed (TREE_OPERAND (exp, 0));
+ reloc = pa_reloc_needed (TREE_OPERAND (exp, 0));
break;
case CONSTRUCTOR:
FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), ix, value)
if (value)
- reloc |= reloc_needed (value);
+ reloc |= pa_reloc_needed (value);
}
break;
/* Return the best assembler insn template
for moving operands[1] into operands[0] as a fullword. */
const char *
-singlemove_string (rtx *operands)
+pa_singlemove_string (rtx *operands)
{
HOST_WIDE_INT intval;
return "ldi %1,%0";
else if ((intval & 0x7ff) == 0)
return "ldil L'%1,%0";
- else if (zdepi_cint_p (intval))
+ else if (pa_zdepi_cint_p (intval))
return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
else
return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
/* Compute position (in OP[1]) and width (in OP[2])
useful for copying IMM to a register using the depdi,z
instructions. Store the immediate value to insert in OP[0]. */
-void
+
+static void
compute_zdepdi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
{
int lsb, len, maxlen;
with operands OPERANDS. */
const char *
-output_move_double (rtx *operands)
+pa_output_move_double (rtx *operands)
{
enum { REGOP, OFFSOP, MEMOP, CNSTOP, RNDOP } optype0, optype1;
rtx latehalf[2];
/* Do the late half first. */
if (addreg1)
output_asm_insn ("ldo 4(%0),%0", &addreg1);
- output_asm_insn (singlemove_string (latehalf), latehalf);
+ output_asm_insn (pa_singlemove_string (latehalf), latehalf);
/* Then clobber. */
if (addreg1)
output_asm_insn ("ldo -4(%0),%0", &addreg1);
- return singlemove_string (operands);
+ return pa_singlemove_string (operands);
}
/* Now handle register -> register case. */
if (optype0 == REGOP && optype1 == REGOP
&& REGNO (operands[0]) == REGNO (operands[1]) + 1)
{
- output_asm_insn (singlemove_string (latehalf), latehalf);
- return singlemove_string (operands);
+ output_asm_insn (pa_singlemove_string (latehalf), latehalf);
+ return pa_singlemove_string (operands);
}
/* Normal case: do the two words, low-numbered first. */
- output_asm_insn (singlemove_string (operands), operands);
+ output_asm_insn (pa_singlemove_string (operands), operands);
/* Make any unoffsettable addresses point at high-numbered word. */
if (addreg0)
output_asm_insn ("ldo 4(%0),%0", &addreg1);
/* Do that word. */
- output_asm_insn (singlemove_string (latehalf), latehalf);
+ output_asm_insn (pa_singlemove_string (latehalf), latehalf);
/* Undo the adds we just did. */
if (addreg0)
}
\f
const char *
-output_fp_move_double (rtx *operands)
+pa_output_fp_move_double (rtx *operands)
{
if (FP_REG_P (operands[0]))
{
OPERANDS[6] is another temporary register. */
const char *
-output_block_move (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
+pa_output_block_move (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
{
int align = INTVAL (operands[5]);
unsigned long n_bytes = INTVAL (operands[4]);
OPERANDS[3] is the alignment safe to use, as a CONST_INT. */
const char *
-output_block_clear (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
+pa_output_block_clear (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
{
int align = INTVAL (operands[3]);
unsigned long n_bytes = INTVAL (operands[2]);
\f
const char *
-output_and (rtx *operands)
+pa_output_and (rtx *operands)
{
if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
{
/* Return a string to perform a bitwise-and of operands[1] with operands[2]
storing the result in operands[0]. */
const char *
-output_64bit_and (rtx *operands)
+pa_output_64bit_and (rtx *operands)
{
if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
{
}
const char *
-output_ior (rtx *operands)
+pa_output_ior (rtx *operands)
{
unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
int bs0, bs1, p, len;
/* Return a string to perform a bitwise-and of operands[1] with operands[2]
storing the result in operands[0]. */
const char *
-output_64bit_ior (rtx *operands)
+pa_output_64bit_ior (rtx *operands)
{
unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
int bs0, bs1, p, len;
\f
/* Output an ascii string. */
void
-output_ascii (FILE *file, const char *p, int size)
+pa_output_ascii (FILE *file, const char *p, int size)
{
int i;
int chars_output;
}
HOST_WIDE_INT
-compute_frame_size (HOST_WIDE_INT size, int *fregs_live)
+pa_compute_frame_size (HOST_WIDE_INT size, int *fregs_live)
{
int freg_saved = 0;
int i, j;
- /* The code in hppa_expand_prologue and hppa_expand_epilogue must
+ /* The code in pa_expand_prologue and pa_expand_epilogue must
be consistent with the rounding and size calculation done here.
Change them at the same time. */
ASM_OUTPUT_LABEL (file, XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));
fputs ("\t.PROC\n", file);
- /* hppa_expand_prologue does the dirty work now. We just need
+ /* pa_expand_prologue does the dirty work now. We just need
to output the assembler directives which denote the start
of a function. */
fprintf (file, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC, actual_fsize);
}
void
-hppa_expand_prologue (void)
+pa_expand_prologue (void)
{
int merge_sp_adjust_with_store = 0;
HOST_WIDE_INT size = get_frame_size ();
save_fregs = 0;
/* Compute total size for frame pointer, filler, locals and rounding to
- the next word boundary. Similar code appears in compute_frame_size
+ the next word boundary. Similar code appears in pa_compute_frame_size
and must be changed in tandem with this code. */
local_fsize = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
if (local_fsize || frame_pointer_needed)
local_fsize += STARTING_FRAME_OFFSET;
- actual_fsize = compute_frame_size (size, &save_fregs);
+ actual_fsize = pa_compute_frame_size (size, &save_fregs);
if (flag_stack_usage_info)
current_function_static_stack_size = actual_fsize;
last_address = 0;
- /* hppa_expand_epilogue does the dirty work now. We just need
+ /* pa_expand_epilogue does the dirty work now. We just need
to output the assembler directives which denote the end
of a function.
}
void
-hppa_expand_epilogue (void)
+pa_expand_epilogue (void)
{
rtx tmpreg;
HOST_WIDE_INT offset;
}
}
+bool
+pa_can_use_return_insn (void)
+{
+ if (!reload_completed)
+ return false;
+
+ if (frame_pointer_needed)
+ return false;
+
+ if (df_regs_ever_live_p (2))
+ return false;
+
+ if (crtl->profile)
+ return false;
+
+ return pa_compute_frame_size (get_frame_size (), 0) == 0;
+}
+
rtx
hppa_pic_save_rtx (void)
{
return location is in a shared library. */
rtx
-return_addr_rtx (int count, rtx frameaddr)
+pa_return_addr_rtx (int count, rtx frameaddr)
{
rtx label;
rtx rp;
}
void
-emit_bcond_fp (rtx operands[])
+pa_emit_bcond_fp (rtx operands[])
{
enum rtx_code code = GET_CODE (operands[0]);
rtx operand0 = operands[1];
For `%' followed by punctuation, CODE is the punctuation and X is null. */
void
-print_operand (FILE *file, rtx x, int code)
+pa_print_operand (FILE *file, rtx x, int code)
{
switch (code)
{
xoperands[0] = XEXP (XEXP (x, 0), 0);
xoperands[1] = XVECEXP (XEXP (XEXP (x, 0), 1), 0, 0);
- output_global_address (file, xoperands[1], 0);
+ pa_output_global_address (file, xoperands[1], 0);
fprintf (file, "(%s)", reg_names [REGNO (xoperands[0])]);
return;
}
}
return;
case 'G':
- output_global_address (file, x, 0);
+ pa_output_global_address (file, x, 0);
return;
case 'H':
- output_global_address (file, x, 1);
+ pa_output_global_address (file, x, 1);
return;
case 0: /* Don't do anything special */
break;
/* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */
void
-output_global_address (FILE *file, rtx x, int round_constant)
+pa_output_global_address (FILE *file, rtx x, int round_constant)
{
/* Imagine (high (const (plus ...))). */
label. If an entry for SYMBOL is not found, a new entry is created. */
rtx
-get_deferred_plabel (rtx symbol)
+pa_get_deferred_plabel (rtx symbol)
{
const char *fname = XSTR (symbol, 0);
size_t i;
}
}
-#if HPUX_LONG_DOUBLE_LIBRARY
-/* Initialize optabs to point to HPUX long double emulation routines. */
+/* Initialize optabs to point to emulation routines. */
+
static void
-pa_hpux_init_libfuncs (void)
-{
- set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
- set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
- set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
- set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
- set_optab_libfunc (smin_optab, TFmode, "_U_Qmin");
- set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
- set_optab_libfunc (sqrt_optab, TFmode, "_U_Qfsqrt");
- set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
- set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
-
- set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
- set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
- set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
- set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
- set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
- set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
- set_optab_libfunc (unord_optab, TFmode, "_U_Qfunord");
-
- set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
- set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
- set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
- set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
-
- set_conv_libfunc (sfix_optab, SImode, TFmode, TARGET_64BIT
- ? "__U_Qfcnvfxt_quad_to_sgl"
- : "_U_Qfcnvfxt_quad_to_sgl");
- set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
- set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_usgl");
- set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_udbl");
-
- set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
- set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
- set_conv_libfunc (ufloat_optab, TFmode, SImode, "_U_Qfcnvxf_usgl_to_quad");
- set_conv_libfunc (ufloat_optab, TFmode, DImode, "_U_Qfcnvxf_udbl_to_quad");
+pa_init_libfuncs (void)
+{
+ if (HPUX_LONG_DOUBLE_LIBRARY)
+ {
+ set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
+ set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
+ set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
+ set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
+ set_optab_libfunc (smin_optab, TFmode, "_U_Qmin");
+ set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
+ set_optab_libfunc (sqrt_optab, TFmode, "_U_Qfsqrt");
+ set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
+ set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
+
+ set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
+ set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
+ set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
+ set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
+ set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
+ set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
+ set_optab_libfunc (unord_optab, TFmode, "_U_Qfunord");
+
+ set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
+ set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
+ set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
+ set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
+
+ set_conv_libfunc (sfix_optab, SImode, TFmode,
+ TARGET_64BIT ? "__U_Qfcnvfxt_quad_to_sgl"
+ : "_U_Qfcnvfxt_quad_to_sgl");
+ set_conv_libfunc (sfix_optab, DImode, TFmode,
+ "_U_Qfcnvfxt_quad_to_dbl");
+ set_conv_libfunc (ufix_optab, SImode, TFmode,
+ "_U_Qfcnvfxt_quad_to_usgl");
+ set_conv_libfunc (ufix_optab, DImode, TFmode,
+ "_U_Qfcnvfxt_quad_to_udbl");
+
+ set_conv_libfunc (sfloat_optab, TFmode, SImode,
+ "_U_Qfcnvxf_sgl_to_quad");
+ set_conv_libfunc (sfloat_optab, TFmode, DImode,
+ "_U_Qfcnvxf_dbl_to_quad");
+ set_conv_libfunc (ufloat_optab, TFmode, SImode,
+ "_U_Qfcnvxf_usgl_to_quad");
+ set_conv_libfunc (ufloat_optab, TFmode, DImode,
+ "_U_Qfcnvxf_udbl_to_quad");
+ }
+
+ if (TARGET_SYNC_LIBCALL)
+ init_sync_libfuncs (UNITS_PER_WORD);
}
-#endif
/* HP's millicode routines mean something special to the assembler.
Keep track of which ones we have used. */
the proper registers. */
const char *
-output_mul_insn (int unsignedp ATTRIBUTE_UNUSED, rtx insn)
+pa_output_mul_insn (int unsignedp ATTRIBUTE_UNUSED, rtx insn)
{
import_milli (mulI);
- return output_millicode_call (insn, gen_rtx_SYMBOL_REF (Pmode, "$$mulI"));
+ return pa_output_millicode_call (insn, gen_rtx_SYMBOL_REF (Pmode, "$$mulI"));
}
/* Emit the rtl for doing a division by a constant. */
/* Do magic division millicodes exist for this value? */
-const int magic_milli[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1};
+const int pa_magic_milli[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1};
/* We'll use an array to keep track of the magic millicodes and
whether or not we've used them already. [n][0] is signed, [n][1] is
static int div_milli[16][2];
int
-emit_hpdiv_const (rtx *operands, int unsignedp)
+pa_emit_hpdiv_const (rtx *operands, int unsignedp)
{
if (GET_CODE (operands[2]) == CONST_INT
&& INTVAL (operands[2]) > 0
&& INTVAL (operands[2]) < 16
- && magic_milli[INTVAL (operands[2])])
+ && pa_magic_milli[INTVAL (operands[2])])
{
rtx ret = gen_rtx_REG (SImode, TARGET_64BIT ? 2 : 31);
}
const char *
-output_div_insn (rtx *operands, int unsignedp, rtx insn)
+pa_output_div_insn (rtx *operands, int unsignedp, rtx insn)
{
int divisor;
{
sprintf (buf, "$$divU_" HOST_WIDE_INT_PRINT_DEC,
INTVAL (operands[0]));
- return output_millicode_call (insn,
- gen_rtx_SYMBOL_REF (SImode, buf));
+ return pa_output_millicode_call (insn,
+ gen_rtx_SYMBOL_REF (SImode, buf));
}
else
{
sprintf (buf, "$$divI_" HOST_WIDE_INT_PRINT_DEC,
INTVAL (operands[0]));
- return output_millicode_call (insn,
- gen_rtx_SYMBOL_REF (SImode, buf));
+ return pa_output_millicode_call (insn,
+ gen_rtx_SYMBOL_REF (SImode, buf));
}
}
/* Divisor isn't a special constant. */
if (unsignedp)
{
import_milli (divU);
- return output_millicode_call (insn,
+ return pa_output_millicode_call (insn,
gen_rtx_SYMBOL_REF (SImode, "$$divU"));
}
else
{
import_milli (divI);
- return output_millicode_call (insn,
+ return pa_output_millicode_call (insn,
gen_rtx_SYMBOL_REF (SImode, "$$divI"));
}
}
/* Output a $$rem millicode to do mod. */
const char *
-output_mod_insn (int unsignedp, rtx insn)
+pa_output_mod_insn (int unsignedp, rtx insn)
{
if (unsignedp)
{
import_milli (remU);
- return output_millicode_call (insn,
- gen_rtx_SYMBOL_REF (SImode, "$$remU"));
+ return pa_output_millicode_call (insn,
+ gen_rtx_SYMBOL_REF (SImode, "$$remU"));
}
else
{
import_milli (remI);
- return output_millicode_call (insn,
- gen_rtx_SYMBOL_REF (SImode, "$$remI"));
+ return pa_output_millicode_call (insn,
+ gen_rtx_SYMBOL_REF (SImode, "$$remI"));
}
}
void
-output_arg_descriptor (rtx call_insn)
+pa_output_arg_descriptor (rtx call_insn)
{
const char *arg_regs[4];
enum machine_mode arg_mode;
/* Secondary reloads of symbolic operands require %r1 as a scratch
register when we're generating PIC code and when the operand isn't
readonly. */
- if (symbolic_expression_p (x))
+ if (pa_symbolic_expression_p (x))
{
if (GET_CODE (x) == HIGH)
x = XEXP (x, 0);
/* In order to allow 14-bit displacements in integer loads and stores,
we need to prevent reload from generating out of range integer mode
loads and stores to the floating point registers. Previously, we
- used to call for a secondary reload and have emit_move_sequence()
+ used to call for a secondary reload and have pa_emit_move_sequence()
fix the instruction sequence. However, reload occasionally wouldn't
generate the reload and we would end up with an invalid REG+D memory
address. So, now we use an intermediate general register for most
}
enum direction
-function_arg_padding (enum machine_mode mode, const_tree type)
+pa_function_arg_padding (enum machine_mode mode, const_tree type)
{
if (mode == BLKmode
|| (TARGET_64BIT
u = fold_convert (sizetype, size_in_bytes (type));
u = fold_build1 (NEGATE_EXPR, sizetype, u);
- t = build2 (POINTER_PLUS_EXPR, valist_type, valist, u);
+ t = fold_build_pointer_plus (valist, u);
/* Align to 4 or 8 byte boundary depending on argument size. */
ofs = (8 - size) % 4;
if (ofs != 0)
- {
- u = size_int (ofs);
- t = build2 (POINTER_PLUS_EXPR, valist_type, t, u);
- }
+ t = fold_build_pointer_plus_hwi (t, ofs);
t = fold_convert (ptr, t);
t = build_va_arg_indirect_ref (t);
parameters. */
const char *
-output_cbranch (rtx *operands, int negated, rtx insn)
+pa_output_cbranch (rtx *operands, int negated, rtx insn)
{
static char buf[100];
bool useskip;
default:
/* The reversed conditional branch must branch over one additional
instruction if the delay slot is filled and needs to be extracted
- by output_lbranch. If the delay slot is empty or this is a
+ by pa_output_lbranch. If the delay slot is empty or this is a
nullified forward branch, the instruction after the reversed
condition branch must be nullified. */
if (dbr_sequence_length () == 0
}
output_asm_insn (buf, operands);
- return output_lbranch (operands[0], insn, xdelay);
+ return pa_output_lbranch (operands[0], insn, xdelay);
}
return buf;
}
bytes for the portable runtime, non-PIC and PIC cases, respectively. */
const char *
-output_lbranch (rtx dest, rtx insn, int xdelay)
+pa_output_lbranch (rtx dest, rtx insn, int xdelay)
{
rtx xoperands[2];
above. it returns the appropriate output template to emit the branch. */
const char *
-output_bb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx insn, int which)
+pa_output_bb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx insn, int which)
{
static char buf[100];
bool useskip;
default:
/* The reversed conditional branch must branch over one additional
instruction if the delay slot is filled and needs to be extracted
- by output_lbranch. If the delay slot is empty or this is a
+ by pa_output_lbranch. If the delay slot is empty or this is a
nullified forward branch, the instruction after the reversed
condition branch must be nullified. */
if (dbr_sequence_length () == 0
else
strcat (buf, " %0,%1,.+%4");
output_asm_insn (buf, operands);
- return output_lbranch (negated ? operands[3] : operands[2],
- insn, xdelay);
+ return pa_output_lbranch (negated ? operands[3] : operands[2],
+ insn, xdelay);
}
return buf;
}
branch. */
const char *
-output_bvb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx insn, int which)
+pa_output_bvb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx insn,
+ int which)
{
static char buf[100];
bool useskip;
default:
/* The reversed conditional branch must branch over one additional
instruction if the delay slot is filled and needs to be extracted
- by output_lbranch. If the delay slot is empty or this is a
+ by pa_output_lbranch. If the delay slot is empty or this is a
nullified forward branch, the instruction after the reversed
condition branch must be nullified. */
if (dbr_sequence_length () == 0
else
strcat (buf, " {%0,.+%4|%0,%%sar,.+%4}");
output_asm_insn (buf, operands);
- return output_lbranch (negated ? operands[3] : operands[2],
- insn, xdelay);
+ return pa_output_lbranch (negated ? operands[3] : operands[2],
+ insn, xdelay);
}
return buf;
}
Note it may perform some output operations on its own before
returning the final output string. */
const char *
-output_dbra (rtx *operands, rtx insn, int which_alternative)
+pa_output_dbra (rtx *operands, rtx insn, int which_alternative)
{
int length = get_attr_length (insn);
default:
/* The reversed conditional branch must branch over one additional
instruction if the delay slot is filled and needs to be extracted
- by output_lbranch. If the delay slot is empty or this is a
+ by pa_output_lbranch. If the delay slot is empty or this is a
nullified forward branch, the instruction after the reversed
condition branch must be nullified. */
if (dbr_sequence_length () == 0
else
output_asm_insn ("addib,%N2 %1,%0,.+%4", operands);
- return output_lbranch (operands[3], insn, xdelay);
+ return pa_output_lbranch (operands[3], insn, xdelay);
}
}
operands[5] = GEN_INT (length - 16);
output_asm_insn ("{comb|cmpb},%B2 %%r0,%4,.+%5", operands);
output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
- return output_lbranch (operands[3], insn, 0);
+ return pa_output_lbranch (operands[3], insn, 0);
}
}
/* Deal with gross reload from memory case. */
{
operands[5] = GEN_INT (length - 4);
output_asm_insn ("addib,%N2 %1,%4,.+%5\n\tstw %4,%0", operands);
- return output_lbranch (operands[3], insn, 0);
+ return pa_output_lbranch (operands[3], insn, 0);
}
}
}
Note it may perform some output operations on its own before
returning the final output string. */
const char *
-output_movb (rtx *operands, rtx insn, int which_alternative,
+pa_output_movb (rtx *operands, rtx insn, int which_alternative,
int reverse_comparison)
{
int length = get_attr_length (insn);
default:
/* The reversed conditional branch must branch over one additional
instruction if the delay slot is filled and needs to be extracted
- by output_lbranch. If the delay slot is empty or this is a
+ by pa_output_lbranch. If the delay slot is empty or this is a
nullified forward branch, the instruction after the reversed
condition branch must be nullified. */
if (dbr_sequence_length () == 0
else
output_asm_insn ("movb,%N2 %1,%0,.+%4", operands);
- return output_lbranch (operands[3], insn, xdelay);
+ return pa_output_lbranch (operands[3], insn, xdelay);
}
}
/* Deal with gross reload for FP destination register case. */
operands[4] = GEN_INT (length - 4);
output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4", operands);
output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
- return output_lbranch (operands[3], insn, 0);
+ return pa_output_lbranch (operands[3], insn, 0);
}
}
/* Deal with gross reload from memory case. */
operands[4] = GEN_INT (length);
output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tstw %1,%0",
operands);
- return output_lbranch (operands[3], insn, 0);
+ return pa_output_lbranch (operands[3], insn, 0);
}
}
/* Handle SAR as a destination. */
operands[4] = GEN_INT (length);
output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tmtsar %r1",
operands);
- return output_lbranch (operands[3], insn, 0);
+ return pa_output_lbranch (operands[3], insn, 0);
}
}
}
}
/* Return the attribute length for the millicode call instruction INSN.
- The length must match the code generated by output_millicode_call.
+ The length must match the code generated by pa_output_millicode_call.
We include the delay slot in the returned length as it is better to
over estimate the length than to under estimate it. */
int
-attr_length_millicode_call (rtx insn)
+pa_attr_length_millicode_call (rtx insn)
{
unsigned long distance = -1;
unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
CALL_DEST is the routine we are calling. */
const char *
-output_millicode_call (rtx insn, rtx call_dest)
+pa_output_millicode_call (rtx insn, rtx call_dest)
{
int attr_length = get_attr_length (insn);
int seq_length = dbr_sequence_length ();
/* Return the attribute length of the call instruction INSN. The SIBCALL
flag indicates whether INSN is a regular call or a sibling call. The
length returned must be longer than the code actually generated by
- output_call. Since branch shortening is done before delay branch
+ pa_output_call. Since branch shortening is done before delay branch
sequencing, there is no way to determine whether or not the delay
slot will be filled during branch shortening. Even when the delay
slot is filled, we may have to add a nop if the delay slot contains
these sequences. */
int
-attr_length_call (rtx insn, int sibcall)
+pa_attr_length_call (rtx insn, int sibcall)
{
int local_call;
rtx call, call_dest;
CALL_DEST is the routine we are calling. */
const char *
-output_call (rtx insn, rtx call_dest, int sibcall)
+pa_output_call (rtx insn, rtx call_dest, int sibcall)
{
int delay_insn_deleted = 0;
int delay_slot_filled = 0;
/* Handle the common case where we're sure that the branch will reach
the beginning of the "$CODE$" subspace. This is the beginning of
the current function if we are in a named section. */
- if (!TARGET_LONG_CALLS && attr_length_call (insn, sibcall) == 8)
+ if (!TARGET_LONG_CALLS && pa_attr_length_call (insn, sibcall) == 8)
{
xoperands[1] = gen_rtx_REG (word_mode, sibcall ? 0 : 2);
output_asm_insn ("{bl|b,l} %0,%1", xoperands);
/* ??? As far as I can tell, the HP linker doesn't support the
long pc-relative sequence described in the 64-bit runtime
architecture. So, we use a slightly longer indirect call. */
- xoperands[0] = get_deferred_plabel (call_dest);
+ xoperands[0] = pa_get_deferred_plabel (call_dest);
xoperands[1] = gen_label_rtx ();
/* If this isn't a sibcall, we put the load of %r27 into the
essentially an inline implementation of $$dyncall.
We don't actually try to call $$dyncall as this is
as difficult as calling the function itself. */
- xoperands[0] = get_deferred_plabel (call_dest);
+ xoperands[0] = pa_get_deferred_plabel (call_dest);
xoperands[1] = gen_label_rtx ();
/* Since the call is indirect, FP arguments in registers
the sequence itself. */
int
-attr_length_indirect_call (rtx insn)
+pa_attr_length_indirect_call (rtx insn)
{
unsigned long distance = -1;
unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
}
const char *
-output_indirect_call (rtx insn, rtx call_dest)
+pa_output_indirect_call (rtx insn, rtx call_dest)
{
rtx xoperands[1];
No need to check target flags as the length uniquely identifies
the remaining cases. */
- if (attr_length_indirect_call (insn) == 8)
+ if (pa_attr_length_indirect_call (insn) == 8)
{
/* The HP linker sometimes substitutes a BLE for BL/B,L calls to
$$dyncall. Since BLE uses %r31 as the link register, the 22-bit
/* Long millicode call, but we are not generating PIC or portable runtime
code. */
- if (attr_length_indirect_call (insn) == 12)
+ if (pa_attr_length_indirect_call (insn) == 12)
return ".CALL\tARGW0=GR\n\tldil L'$$dyncall,%%r2\n\tble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
/* Long millicode call for portable runtime. */
- if (attr_length_indirect_call (insn) == 20)
+ if (pa_attr_length_indirect_call (insn) == 20)
return "ldil L'$$dyncall,%%r31\n\tldo R'$$dyncall(%%r31),%%r31\n\tblr %%r0,%%r2\n\tbv,n %%r0(%%r31)\n\tnop";
/* We need a long PIC call to $$dyncall. */
return "";
}
-/* Return the total length of the save and restore instructions needed for
- the data linkage table pointer (i.e., the PIC register) across the call
- instruction INSN. No-return calls do not require a save and restore.
- In addition, we may be able to avoid the save and restore for calls
- within the same translation unit. */
-
-int
-attr_length_save_restore_dltp (rtx insn)
-{
- if (find_reg_note (insn, REG_NORETURN, NULL_RTX))
- return 0;
-
- return 8;
-}
-
/* In HPUX 8.0's shared library scheme, special relocations are needed
for function labels if they might be passed to a function
in a shared library (because shared libraries don't live in code
space), and special magic is needed to construct their address. */
void
-hppa_encode_label (rtx sym)
+pa_encode_label (rtx sym)
{
const char *str = XSTR (sym, 0);
int len = strlen (str) + 1;
{
SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
if (TREE_CODE (decl) == FUNCTION_DECL)
- hppa_encode_label (XEXP (rtl, 0));
+ pa_encode_label (XEXP (rtl, 0));
}
else if (old_referenced)
SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= old_referenced;
with a constant. Used to keep certain patterns from matching
during instruction combination. */
int
-is_function_label_plus_const (rtx op)
+pa_is_function_label_plus_const (rtx op)
{
/* Strip off any CONST. */
if (GET_CODE (op) == CONST)
single subspace mode and the call is not indirect. As far as I know,
there is no operating system support for the multiple subspace mode.
It might be possible to support indirect calls if we didn't use
- $$dyncall (see the indirect sequence generated in output_call). */
+ $$dyncall (see the indirect sequence generated in pa_output_call). */
if (TARGET_ELF32)
return (decl != NULL_TREE);
/* Returns 1 if the 6 operands specified in OPERANDS are suitable for
use in fmpyadd instructions. */
int
-fmpyaddoperands (rtx *operands)
+pa_fmpyaddoperands (rtx *operands)
{
enum machine_mode mode = GET_MODE (operands[0]);
pa_asm_out_constructor (rtx symbol, int priority)
{
if (!function_label_operand (symbol, VOIDmode))
- hppa_encode_label (symbol);
+ pa_encode_label (symbol);
#ifdef CTORS_SECTION_ASM_OP
default_ctor_section_asm_out_constructor (symbol, priority);
pa_asm_out_destructor (rtx symbol, int priority)
{
if (!function_label_operand (symbol, VOIDmode))
- hppa_encode_label (symbol);
+ pa_encode_label (symbol);
#ifdef DTORS_SECTION_ASM_OP
default_dtor_section_asm_out_destructor (symbol, priority);
/* Returns 1 if the 6 operands specified in OPERANDS are suitable for
use in fmpysub instructions. */
int
-fmpysuboperands (rtx *operands)
+pa_fmpysuboperands (rtx *operands)
{
enum machine_mode mode = GET_MODE (operands[0]);
/* Return 1 if the given constant is 2, 4, or 8. These are the valid
constants for shadd instructions. */
int
-shadd_constant_p (int val)
+pa_shadd_constant_p (int val)
{
if (val == 2 || val == 4 || val == 8)
return 1;
/* Return 1 if INSN is in the delay slot of a call instruction. */
int
-jump_in_call_delay (rtx insn)
+pa_jump_in_call_delay (rtx insn)
{
if (GET_CODE (insn) != JUMP_INSN)
/* Output an unconditional move and branch insn. */
const char *
-output_parallel_movb (rtx *operands, rtx insn)
+pa_output_parallel_movb (rtx *operands, rtx insn)
{
int length = get_attr_length (insn);
output_asm_insn ("ldi %1,%0", operands);
else
output_asm_insn ("copy %1,%0", operands);
- return output_lbranch (operands[2], insn, 1);
+ return pa_output_lbranch (operands[2], insn, 1);
}
/* Output an unconditional add and branch insn. */
const char *
-output_parallel_addb (rtx *operands, rtx insn)
+pa_output_parallel_addb (rtx *operands, rtx insn)
{
int length = get_attr_length (insn);
}
output_asm_insn ("add%I1 %1,%0,%0", operands);
- return output_lbranch (operands[3], insn, 1);
+ return pa_output_lbranch (operands[3], insn, 1);
}
/* Return nonzero if INSN (a jump insn) immediately follows a call
the delay slot of the call. */
int
-following_call (rtx insn)
+pa_following_call (rtx insn)
{
if (! TARGET_JUMP_IN_DELAY)
return 0;
filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
in particular. */
int
-insn_refs_are_delayed (rtx insn)
+pa_insn_refs_are_delayed (rtx insn)
{
return ((GET_CODE (insn) == INSN
&& GET_CODE (PATTERN (insn)) != SEQUENCE
if ((from == HARD_FRAME_POINTER_REGNUM || from == FRAME_POINTER_REGNUM)
&& to == STACK_POINTER_REGNUM)
- offset = -compute_frame_size (get_frame_size (), 0);
+ offset = -pa_compute_frame_size (get_frame_size (), 0);
else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
offset = 0;
else
if (!NEW_HP_ASSEMBLER && !TARGET_GAS && GET_CODE (x) == LABEL_REF)
return false;
+ /* TLS_MODEL_GLOBAL_DYNAMIC and TLS_MODEL_LOCAL_DYNAMIC are not
+ legitimate constants. */
+ if (PA_SYMBOL_REF_TLS_P (x))
+ {
+ enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
+
+ if (model == TLS_MODEL_GLOBAL_DYNAMIC || model == TLS_MODEL_LOCAL_DYNAMIC)
+ return false;
+ }
+
if (TARGET_64BIT && GET_CODE (x) == CONST_DOUBLE)
return false;
&& !reload_in_progress
&& !reload_completed
&& !LEGITIMATE_64BIT_CONST_INT_P (INTVAL (x))
- && !cint_ok_for_move (INTVAL (x)))
+ && !pa_cint_ok_for_move (INTVAL (x)))
return false;
if (function_label_operand (x, mode))