You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to
-the Free Software Foundation, 59 Temple Place - Suite 330,
-Boston, MA 02111-1307, USA. */
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
#include "config.h"
#include "system.h"
#include "langhooks.h"
#include "cfglayout.h"
#include "tree-gimple.h"
+#include "intl.h"
/* This is used for communication between ASM_OUTPUT_LABEL and
ASM_OUTPUT_LABELREF. */
static bool ia64_scalar_mode_supported_p (enum machine_mode mode);
static bool ia64_vector_mode_supported_p (enum machine_mode mode);
static bool ia64_cannot_force_const_mem (rtx);
+static const char *ia64_mangle_fundamental_type (tree);
+static const char *ia64_invalid_conversion (tree, tree);
+static const char *ia64_invalid_unary_op (int, tree);
+static const char *ia64_invalid_binary_op (int, tree, tree);
\f
/* Table of valid machine attributes. */
static const struct attribute_spec ia64_attribute_table[] =
#undef TARGET_CANNOT_FORCE_CONST_MEM
#define TARGET_CANNOT_FORCE_CONST_MEM ia64_cannot_force_const_mem
+#undef TARGET_MANGLE_FUNDAMENTAL_TYPE
+#define TARGET_MANGLE_FUNDAMENTAL_TYPE ia64_mangle_fundamental_type
+
+#undef TARGET_INVALID_CONVERSION
+#define TARGET_INVALID_CONVERSION ia64_invalid_conversion
+#undef TARGET_INVALID_UNARY_OP
+#define TARGET_INVALID_UNARY_OP ia64_invalid_unary_op
+#undef TARGET_INVALID_BINARY_OP
+#define TARGET_INVALID_BINARY_OP ia64_invalid_binary_op
+
struct gcc_target targetm = TARGET_INITIALIZER;
\f
typedef enum
case SYMBOL_REF:
return tls_symbolic_operand_type (x) == 0;
+ case CONST_VECTOR:
+ {
+ enum machine_mode mode = GET_MODE (x);
+
+ if (mode == V2SFmode)
+ return ia64_extra_constraint (x, 'Y');
+
+ return (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
+ && GET_MODE_SIZE (mode) <= 8);
+ }
+
default:
return false;
}
This solution attempts to prevent this situation from occurring. When
we see something like the above, we spill the inner register to memory. */
-rtx
-spill_xfmode_operand (rtx in, int force)
+static rtx
+spill_xfmode_rfmode_operand (rtx in, int force, enum machine_mode mode)
{
if (GET_CODE (in) == SUBREG
&& GET_MODE (SUBREG_REG (in)) == TImode
{
rtx memt = assign_stack_temp (TImode, 16, 0);
emit_move_insn (memt, SUBREG_REG (in));
- return adjust_address (memt, XFmode, 0);
+ return adjust_address (memt, mode, 0);
}
else if (force && GET_CODE (in) == REG)
{
- rtx memx = assign_stack_temp (XFmode, 16, 0);
+ rtx memx = assign_stack_temp (mode, 16, 0);
emit_move_insn (memx, in);
return memx;
}
return in;
}
+/* Expand the movxf or movrf pattern (MODE says which) with the given
+ OPERANDS, returning true if the pattern should then invoke
+ DONE. */
+
+bool
+ia64_expand_movxf_movrf (enum machine_mode mode, rtx operands[])
+{
+ rtx op0 = operands[0];
+
+ if (GET_CODE (op0) == SUBREG)
+ op0 = SUBREG_REG (op0);
+
+ /* We must support XFmode loads into general registers for stdarg/vararg,
+ unprototyped calls, and a rare case where a long double is passed as
+ an argument after a float HFA fills the FP registers. We split them into
+ DImode loads for convenience. We also need to support XFmode stores
+ for the last case. This case does not happen for stdarg/vararg routines,
+ because we do a block store to memory of unnamed arguments. */
+
+ if (GET_CODE (op0) == REG && GR_REGNO_P (REGNO (op0)))
+ {
+ rtx out[2];
+
+ /* We're hoping to transform everything that deals with XFmode
+ quantities and GR registers early in the compiler. */
+ gcc_assert (!no_new_pseudos);
+
+ /* Struct to register can just use TImode instead. */
+ if ((GET_CODE (operands[1]) == SUBREG
+ && GET_MODE (SUBREG_REG (operands[1])) == TImode)
+ || (GET_CODE (operands[1]) == REG
+ && GR_REGNO_P (REGNO (operands[1]))))
+ {
+ rtx op1 = operands[1];
+
+ if (GET_CODE (op1) == SUBREG)
+ op1 = SUBREG_REG (op1);
+ else
+ op1 = gen_rtx_REG (TImode, REGNO (op1));
+
+ emit_move_insn (gen_rtx_REG (TImode, REGNO (op0)), op1);
+ return true;
+ }
+
+ if (GET_CODE (operands[1]) == CONST_DOUBLE)
+ {
+ emit_move_insn (gen_rtx_REG (DImode, REGNO (op0)),
+ operand_subword (operands[1], 0, 0, mode));
+ emit_move_insn (gen_rtx_REG (DImode, REGNO (op0) + 1),
+ operand_subword (operands[1], 1, 0, mode));
+ return true;
+ }
+
+ /* If the quantity is in a register not known to be GR, spill it. */
+ if (register_operand (operands[1], mode))
+ operands[1] = spill_xfmode_rfmode_operand (operands[1], 1, mode);
+
+ gcc_assert (GET_CODE (operands[1]) == MEM);
+
+ out[WORDS_BIG_ENDIAN] = gen_rtx_REG (DImode, REGNO (op0));
+ out[!WORDS_BIG_ENDIAN] = gen_rtx_REG (DImode, REGNO (op0) + 1);
+
+ emit_move_insn (out[0], adjust_address (operands[1], DImode, 0));
+ emit_move_insn (out[1], adjust_address (operands[1], DImode, 8));
+ return true;
+ }
+
+ if (GET_CODE (operands[1]) == REG && GR_REGNO_P (REGNO (operands[1])))
+ {
+ /* We're hoping to transform everything that deals with XFmode
+ quantities and GR registers early in the compiler. */
+ gcc_assert (!no_new_pseudos);
+
+ /* Op0 can't be a GR_REG here, as that case is handled above.
+ If op0 is a register, then we spill op1, so that we now have a
+ MEM operand. This requires creating an XFmode subreg of a TImode reg
+ to force the spill. */
+ if (register_operand (operands[0], mode))
+ {
+ rtx op1 = gen_rtx_REG (TImode, REGNO (operands[1]));
+ op1 = gen_rtx_SUBREG (mode, op1, 0);
+ operands[1] = spill_xfmode_rfmode_operand (op1, 0, mode);
+ }
+
+ else
+ {
+ rtx in[2];
+
+ gcc_assert (GET_CODE (operands[0]) == MEM);
+ in[WORDS_BIG_ENDIAN] = gen_rtx_REG (DImode, REGNO (operands[1]));
+ in[!WORDS_BIG_ENDIAN] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
+
+ emit_move_insn (adjust_address (operands[0], DImode, 0), in[0]);
+ emit_move_insn (adjust_address (operands[0], DImode, 8), in[1]);
+ return true;
+ }
+ }
+
+ if (!reload_in_progress && !reload_completed)
+ {
+ operands[1] = spill_xfmode_rfmode_operand (operands[1], 0, mode);
+
+ if (GET_MODE (op0) == TImode && GET_CODE (op0) == REG)
+ {
+ rtx memt, memx, in = operands[1];
+ if (CONSTANT_P (in))
+ in = validize_mem (force_const_mem (mode, in));
+ if (GET_CODE (in) == MEM)
+ memt = adjust_address (in, TImode, 0);
+ else
+ {
+ memt = assign_stack_temp (TImode, 16, 0);
+ memx = adjust_address (memt, mode, 0);
+ emit_move_insn (memx, in);
+ }
+ emit_move_insn (op0, memt);
+ return true;
+ }
+
+ if (!ia64_move_ok (operands[0], operands[1]))
+ operands[1] = force_reg (mode, operands[1]);
+ }
+
+ return false;
+}
+
/* Emit comparison instruction if necessary, returning the expression
that holds the compare result in the proper mode. */
/* With the results of the comparisons, emit conditional moves. */
dl = gen_reg_rtx (SImode);
- x = gen_rtx_IF_THEN_ELSE (SImode, bl, op1l, op2l);
+ x = gen_rtx_NE (VOIDmode, bl, const0_rtx);
+ x = gen_rtx_IF_THEN_ELSE (SImode, x, op1l, op2l);
emit_insn (gen_rtx_SET (VOIDmode, dl, x));
dh = gen_reg_rtx (SImode);
- x = gen_rtx_IF_THEN_ELSE (SImode, bh, op1h, op2h);
+ x = gen_rtx_NE (VOIDmode, bh, const0_rtx);
+ x = gen_rtx_IF_THEN_ELSE (SImode, x, op1h, op2h);
emit_insn (gen_rtx_SET (VOIDmode, dh, x));
/* Merge the two partial results back into a vector. */
ia64_expand_vecint_minmax (enum rtx_code code, enum machine_mode mode,
rtx operands[])
{
- rtx xops[5];
+ rtx xops[6];
/* These four combinations are supported directly. */
if (mode == V8QImode && (code == UMIN || code == UMAX))
break;
i = regno - OUT_REG (0) + 1;
+#ifndef PROFILE_HOOK
/* When -p profiling, we need one output register for the mcount argument.
Likewise for -a profiling for the bb_init_func argument. For -ax
profiling, we need two output registers for the two bb_init_trace_func
arguments. */
if (current_function_profile)
i = MAX (i, 1);
+#endif
current_frame_info.n_output_regs = i;
/* ??? No rotating register support yet. */
gen_rtx_EXPR_LIST (VOIDmode,
gen_rtx_REG (DImode, basereg + cum->words + offset),
const0_rtx)));
+ /* Similarly, an anonymous XFmode or RFmode value must be split
+ into two registers and padded appropriately. */
+ else if (BYTES_BIG_ENDIAN && (mode == XFmode || mode == RFmode))
+ {
+ rtx loc[2];
+ loc[0] = gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (DImode, basereg + cum->words + offset),
+ const0_rtx);
+ loc[1] = gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (DImode, basereg + cum->words + offset + 1),
+ GEN_INT (UNITS_PER_WORD));
+ return gen_rtx_PARALLEL (mode, gen_rtvec_v (2, loc));
+ }
else
return gen_rtx_REG (mode, basereg + cum->words + offset);
}
the middle-end will give it XFmode anyway, and XFmode values
don't normally fit in integer registers. So we need to smuggle
the value inside a parallel. */
- else if (mode == XFmode || mode == XCmode)
+ else if (mode == XFmode || mode == XCmode || mode == RFmode)
need_parallel = true;
if (need_parallel)
so that we get secondary memory reloads. Between FR_REGS,
we have to make this at least as expensive as MEMORY_MOVE_COST
to avoid spectacularly poor register class preferencing. */
- if (mode == XFmode)
+ if (mode == XFmode || mode == RFmode)
{
if (to != GR_REGS || from != GR_REGS)
return MEMORY_MOVE_COST (mode, to, 0);
return;
case IF_THEN_ELSE:
- if (SET_DEST (x) == pc_rtx)
- /* X is a conditional branch. */
- return;
- else
- {
- /* X is a conditional move. */
- rtx cond = XEXP (src, 0);
- cond = XEXP (cond, 0);
-
- /* We always split conditional moves into COND_EXEC patterns, so the
- only pattern that can reach here is doloop_end_internal. We don't
- need to do anything special for this pattern. */
- gcc_assert (GET_CODE (cond) == REG && REGNO (cond) == AR_LC_REGNUM);
- return;
- }
+ /* There are three cases here:
+ (1) The destination is (pc), in which case this is a branch,
+ nothing here applies.
+ (2) The destination is ar.lc, in which case this is a
+ doloop_end_internal,
+ (3) The destination is an fp register, in which case this is
+ an fselect instruction.
+ In all cases, nothing we do in this function applies. */
+ return;
default:
if (COMPARISON_P (src)
- && GET_MODE_CLASS (GET_MODE (XEXP (src, 0))) == MODE_FLOAT)
+ && SCALAR_FLOAT_MODE_P (GET_MODE (XEXP (src, 0))))
/* Set pflags->is_fp to 1 so that we know we're dealing
with a floating point comparison when processing the
destination of the SET. */
/* Skip p0, which may be thought to be live due to (reg:DI p0)
grabbing the entire block of predicate registers. */
for (r = PR_REG (2); r < PR_REG (64); r += 2)
- if (REGNO_REG_SET_P (bb->global_live_at_start, r))
+ if (REGNO_REG_SET_P (bb->il.rtl->global_live_at_start, r))
{
rtx p = gen_rtx_REG (BImode, r);
rtx n = emit_insn_after (gen_pred_rel_mutex (p), head);
/* The __fpreg type. */
fpreg_type = make_node (REAL_TYPE);
- /* ??? The back end should know to load/save __fpreg variables using
- the ldf.fill and stf.spill instructions. */
- TYPE_PRECISION (fpreg_type) = 80;
+ TYPE_PRECISION (fpreg_type) = 82;
layout_type (fpreg_type);
(*lang_hooks.types.register_builtin_type) (fpreg_type, "__fpreg");
case SFmode:
case DFmode:
case XFmode:
+ case RFmode:
return true;
case TFmode:
fputs ("\tbr.call.sptk.many b0 = _mcount\n\t;;\n", file);
}
+static GTY(()) rtx mcount_func_rtx;
+static rtx
+gen_mcount_func_rtx (void)
+{
+ if (!mcount_func_rtx)
+ mcount_func_rtx = init_one_libfunc ("_mcount");
+ return mcount_func_rtx;
+}
+
+void
+ia64_profile_hook (int labelno)
+{
+ rtx label, ip;
+
+ if (NO_PROFILE_COUNTERS)
+ label = const0_rtx;
+ else
+ {
+ char buf[30];
+ const char *label_name;
+ ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
+ label_name = (*targetm.strip_name_encoding) (ggc_strdup (buf));
+ label = gen_rtx_SYMBOL_REF (Pmode, label_name);
+ SYMBOL_REF_FLAGS (label) = SYMBOL_FLAG_LOCAL;
+ }
+ ip = gen_reg_rtx (Pmode);
+ emit_insn (gen_ip_value (ip));
+ emit_library_call (gen_mcount_func_rtx (), LCT_NORMAL,
+ VOIDmode, 3,
+ gen_rtx_REG (Pmode, BR_REG (0)), Pmode,
+ ip, Pmode,
+ label, Pmode);
+}
+
+/* Return the mangling of TYPE if it is an extended fundamental type. */
+
+static const char *
+ia64_mangle_fundamental_type (tree type)
+{
+ /* On HP-UX, "long double" is mangled as "e" so __float128 is
+ mangled as "e". */
+ if (!TARGET_HPUX && TYPE_MODE (type) == TFmode)
+ return "g";
+ /* On HP-UX, "e" is not available as a mangling of __float80 so use
+ an extended mangling. Elsewhere, "e" is available since long
+ double is 80 bits. */
+ if (TYPE_MODE (type) == XFmode)
+ return TARGET_HPUX ? "u9__float80" : "e";
+ if (TYPE_MODE (type) == RFmode)
+ return "u7__fpreg";
+ return NULL;
+}
+
+/* Return the diagnostic message string if conversion from FROMTYPE to
+ TOTYPE is not allowed, NULL otherwise. */
+static const char *
+ia64_invalid_conversion (tree fromtype, tree totype)
+{
+ /* Reject nontrivial conversion to or from __fpreg. */
+ if (TYPE_MODE (fromtype) == RFmode
+ && TYPE_MODE (totype) != RFmode
+ && TYPE_MODE (totype) != VOIDmode)
+ return N_("invalid conversion from %<__fpreg%>");
+ if (TYPE_MODE (totype) == RFmode
+ && TYPE_MODE (fromtype) != RFmode)
+ return N_("invalid conversion to %<__fpreg%>");
+ return NULL;
+}
+
+/* Return the diagnostic message string if the unary operation OP is
+ not permitted on TYPE, NULL otherwise. */
+static const char *
+ia64_invalid_unary_op (int op, tree type)
+{
+ /* Reject operations on __fpreg other than unary + or &. */
+ if (TYPE_MODE (type) == RFmode
+ && op != CONVERT_EXPR
+ && op != ADDR_EXPR)
+ return N_("invalid operation on %<__fpreg%>");
+ return NULL;
+}
+
+/* Return the diagnostic message string if the binary operation OP is
+ not permitted on TYPE1 and TYPE2, NULL otherwise. */
+static const char *
+ia64_invalid_binary_op (int op ATTRIBUTE_UNUSED, tree type1, tree type2)
+{
+ /* Reject operations on __fpreg. */
+ if (TYPE_MODE (type1) == RFmode || TYPE_MODE (type2) == RFmode)
+ return N_("invalid operation on %<__fpreg%>");
+ return NULL;
+}
+
#include "gt-ia64.h"