static rtx gen_fr_spill_x (rtx, rtx, rtx);
static rtx gen_fr_restore_x (rtx, rtx, rtx);
-static enum machine_mode hfa_element_mode (tree, int);
+static enum machine_mode hfa_element_mode (tree, bool);
static void ia64_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
tree, int *, int);
static bool ia64_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
tree, bool);
+static int ia64_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
+ tree, bool);
static bool ia64_function_ok_for_sibcall (tree, tree);
static bool ia64_return_in_memory (tree, tree);
static bool ia64_rtx_costs (rtx, int, int, int *);
static rtx ia64_struct_value_rtx (tree, int);
static tree ia64_gimplify_va_arg (tree, tree, tree *, tree *);
static bool ia64_scalar_mode_supported_p (enum machine_mode mode);
+static bool ia64_vector_mode_supported_p (enum machine_mode mode);
\f
/* Table of valid machine attributes. */
#define TARGET_FUNCTION_OK_FOR_SIBCALL ia64_function_ok_for_sibcall
#undef TARGET_PASS_BY_REFERENCE
#define TARGET_PASS_BY_REFERENCE ia64_pass_by_reference
+#undef TARGET_ARG_PARTIAL_BYTES
+#define TARGET_ARG_PARTIAL_BYTES ia64_arg_partial_bytes
#undef TARGET_ASM_OUTPUT_MI_THUNK
#define TARGET_ASM_OUTPUT_MI_THUNK ia64_output_mi_thunk
#undef TARGET_SCALAR_MODE_SUPPORTED_P
#define TARGET_SCALAR_MODE_SUPPORTED_P ia64_scalar_mode_supported_p
+#undef TARGET_VECTOR_MODE_SUPPORTED_P
+#define TARGET_VECTOR_MODE_SUPPORTED_P ia64_vector_mode_supported_p
+
+/* ia64 architecture manual 4.4.7: ... reads, writes, and flushes may occur
+ in an order different from the specified program order. */
+#undef TARGET_RELAXED_ORDERING
+#define TARGET_RELAXED_ORDERING true
struct gcc_target targetm = TARGET_INITIALIZER;
\f
}
static tree
-ia64_handle_model_attribute (tree *node, tree name, tree args, int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
+ia64_handle_model_attribute (tree *node, tree name, tree args,
+ int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
{
ia64_addr_area addr_area = ADDR_AREA_NORMAL;
ia64_addr_area area;
}
else
{
- warning ("invalid argument of `%s' attribute",
+ warning ("invalid argument of %qs attribute",
IDENTIFIER_POINTER (name));
*no_add_attrs = true;
}
break;
default:
- warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
+ warning ("%qs attribute ignored", IDENTIFIER_POINTER (name));
*no_add_attrs = true;
break;
}
ia64_encode_addr_area (decl, XEXP (rtl, 0));
}
\f
+/* Implement CONST_OK_FOR_LETTER_P. */
+
+bool
+ia64_const_ok_for_letter_p (HOST_WIDE_INT value, char c)
+{
+ switch (c)
+ {
+ case 'I':
+ return CONST_OK_FOR_I (value);
+ case 'J':
+ return CONST_OK_FOR_J (value);
+ case 'K':
+ return CONST_OK_FOR_K (value);
+ case 'L':
+ return CONST_OK_FOR_L (value);
+ case 'M':
+ return CONST_OK_FOR_M (value);
+ case 'N':
+ return CONST_OK_FOR_N (value);
+ case 'O':
+ return CONST_OK_FOR_O (value);
+ case 'P':
+ return CONST_OK_FOR_P (value);
+ default:
+ return false;
+ }
+}
+
+/* Implement CONST_DOUBLE_OK_FOR_LETTER_P. */
+
+bool
+ia64_const_double_ok_for_letter_p (rtx value, char c)
+{
+ switch (c)
+ {
+ case 'G':
+ return CONST_DOUBLE_OK_FOR_G (value);
+ default:
+ return false;
+ }
+}
+
+/* Implement EXTRA_CONSTRAINT. */
+
+bool
+ia64_extra_constraint (rtx value, char c)
+{
+ switch (c)
+ {
+ case 'Q':
+ /* Non-volatile memory for FP_REG loads/stores. */
+ return memory_operand(value, VOIDmode) && !MEM_VOLATILE_P (value);
+
+ case 'R':
+ /* 1..4 for shladd arguments. */
+ return (GET_CODE (value) == CONST_INT
+ && INTVAL (value) >= 1 && INTVAL (value) <= 4);
+
+ case 'S':
+ /* Non-post-inc memory for asms and other unsavory creatures. */
+ return (GET_CODE (value) == MEM
+ && GET_RTX_CLASS (GET_CODE (XEXP (value, 0))) != RTX_AUTOINC
+ && (reload_in_progress || memory_operand (value, VOIDmode)));
+
+ case 'T':
+ /* Symbol ref to small-address-area. */
+ return (GET_CODE (value) == SYMBOL_REF
+ && SYMBOL_REF_SMALL_ADDR_P (value));
+
+ case 'U':
+ /* Vector zero. */
+ return value == CONST0_RTX (GET_MODE (value));
+
+ case 'W':
+ /* An integer vector, such that conversion to an integer yields a
+ value appropriate for an integer 'J' constraint. */
+ if (GET_CODE (value) == CONST_VECTOR
+ && GET_MODE_CLASS (GET_MODE (value)) == MODE_VECTOR_INT)
+ {
+ value = simplify_subreg (DImode, value, GET_MODE (value), 0);
+ return ia64_const_ok_for_letter_p (INTVAL (value), 'J');
+ }
+ return false;
+
+ case 'Y':
+ /* A V2SF vector containing elements that satisfy 'G'. */
+ return
+ (GET_CODE (value) == CONST_VECTOR
+ && GET_MODE (value) == V2SFmode
+ && ia64_const_double_ok_for_letter_p (XVECEXP (value, 0, 0), 'G')
+ && ia64_const_double_ok_for_letter_p (XVECEXP (value, 0, 1), 'G'));
+
+ default:
+ return false;
+ }
+}
+\f
/* Return 1 if the operands of a move are ok. */
int
return gen_rtx_fmt_ee (code, mode, cmp, const0_rtx);
}
+/* Generate an integral vector comparison. */
+
+static bool
+ia64_expand_vecint_compare (enum rtx_code code, enum machine_mode mode,
+ rtx dest, rtx op0, rtx op1)
+{
+ bool negate = false;
+ rtx x;
+
+ switch (code)
+ {
+ case EQ:
+ case GT:
+ break;
+
+ case NE:
+ code = EQ;
+ negate = true;
+ break;
+
+ case LE:
+ code = GT;
+ negate = true;
+ break;
+
+ case GE:
+ negate = true;
+ /* FALLTHRU */
+
+ case LT:
+ x = op0;
+ op0 = op1;
+ op1 = x;
+ code = GT;
+ break;
+
+ case GTU:
+ case GEU:
+ case LTU:
+ case LEU:
+ {
+ rtx w0h, w0l, w1h, w1l, ch, cl;
+ enum machine_mode wmode;
+ rtx (*unpack_l) (rtx, rtx, rtx);
+ rtx (*unpack_h) (rtx, rtx, rtx);
+ rtx (*pack) (rtx, rtx, rtx);
+
+ /* We don't have native unsigned comparisons, but we can generate
+ them better than generic code can. */
+
+ if (mode == V2SImode)
+ abort ();
+ else if (mode == V8QImode)
+ {
+ wmode = V4HImode;
+ pack = gen_pack2_sss;
+ unpack_l = gen_unpack1_l;
+ unpack_h = gen_unpack1_h;
+ }
+ else if (mode == V4HImode)
+ {
+ wmode = V2SImode;
+ pack = gen_pack4_sss;
+ unpack_l = gen_unpack2_l;
+ unpack_h = gen_unpack2_h;
+ }
+ else
+ abort ();
+
+ /* Unpack into wider vectors, zero extending the elements. */
+
+ w0l = gen_reg_rtx (wmode);
+ w0h = gen_reg_rtx (wmode);
+ w1l = gen_reg_rtx (wmode);
+ w1h = gen_reg_rtx (wmode);
+ emit_insn (unpack_l (gen_lowpart (mode, w0l), op0, CONST0_RTX (mode)));
+ emit_insn (unpack_h (gen_lowpart (mode, w0h), op0, CONST0_RTX (mode)));
+ emit_insn (unpack_l (gen_lowpart (mode, w1l), op1, CONST0_RTX (mode)));
+ emit_insn (unpack_h (gen_lowpart (mode, w1h), op1, CONST0_RTX (mode)));
+
+ /* Compare in the wider mode. */
+
+ cl = gen_reg_rtx (wmode);
+ ch = gen_reg_rtx (wmode);
+ code = signed_condition (code);
+ ia64_expand_vecint_compare (code, wmode, cl, w0l, w1l);
+ negate = ia64_expand_vecint_compare (code, wmode, ch, w0h, w1h);
+
+ /* Repack into a single narrower vector. */
+
+ emit_insn (pack (dest, cl, ch));
+ }
+ return negate;
+
+ default:
+ abort ();
+ }
+
+ x = gen_rtx_fmt_ee (code, mode, op0, op1);
+ emit_insn (gen_rtx_SET (VOIDmode, dest, x));
+
+ return negate;
+}
+
+static void
+ia64_expand_vcondu_v2si (enum rtx_code code, rtx operands[])
+{
+ rtx dl, dh, bl, bh, op1l, op1h, op2l, op2h, op4l, op4h, op5l, op5h, x;
+
+ /* In this case, we extract the two SImode quantities and generate
+ normal comparisons for each of them. */
+
+ op1l = gen_lowpart (SImode, operands[1]);
+ op2l = gen_lowpart (SImode, operands[2]);
+ op4l = gen_lowpart (SImode, operands[4]);
+ op5l = gen_lowpart (SImode, operands[5]);
+
+ op1h = gen_reg_rtx (SImode);
+ op2h = gen_reg_rtx (SImode);
+ op4h = gen_reg_rtx (SImode);
+ op5h = gen_reg_rtx (SImode);
+
+ emit_insn (gen_lshrdi3 (gen_lowpart (DImode, op1h),
+ gen_lowpart (DImode, operands[1]), GEN_INT (32)));
+ emit_insn (gen_lshrdi3 (gen_lowpart (DImode, op2h),
+ gen_lowpart (DImode, operands[2]), GEN_INT (32)));
+ emit_insn (gen_lshrdi3 (gen_lowpart (DImode, op4h),
+ gen_lowpart (DImode, operands[4]), GEN_INT (32)));
+ emit_insn (gen_lshrdi3 (gen_lowpart (DImode, op5h),
+ gen_lowpart (DImode, operands[5]), GEN_INT (32)));
+
+ bl = gen_reg_rtx (BImode);
+ x = gen_rtx_fmt_ee (code, BImode, op4l, op5l);
+ emit_insn (gen_rtx_SET (VOIDmode, bl, x));
+
+ bh = gen_reg_rtx (BImode);
+ x = gen_rtx_fmt_ee (code, BImode, op4h, op5h);
+ emit_insn (gen_rtx_SET (VOIDmode, bh, x));
+
+ /* With the results of the comparisons, emit conditional moves. */
+
+ dl = gen_reg_rtx (SImode);
+ x = gen_rtx_IF_THEN_ELSE (SImode, bl, op1l, op2l);
+ emit_insn (gen_rtx_SET (VOIDmode, dl, x));
+
+ dh = gen_reg_rtx (SImode);
+ x = gen_rtx_IF_THEN_ELSE (SImode, bh, op1h, op2h);
+ emit_insn (gen_rtx_SET (VOIDmode, dh, x));
+
+ /* Merge the two partial results back into a vector. */
+
+ x = gen_rtx_VEC_CONCAT (V2SImode, dl, dh);
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
+}
+
+/* Emit an integral vector conditional move. */
+
+void
+ia64_expand_vecint_cmov (rtx operands[])
+{
+ enum machine_mode mode = GET_MODE (operands[0]);
+ enum rtx_code code = GET_CODE (operands[3]);
+ bool negate;
+ rtx cmp, x, ot, of;
+
+ /* Since we don't have unsigned V2SImode comparisons, it's more efficient
+ to special-case them entirely. */
+ if (mode == V2SImode
+ && (code == GTU || code == GEU || code == LEU || code == LTU))
+ {
+ ia64_expand_vcondu_v2si (code, operands);
+ return;
+ }
+
+ cmp = gen_reg_rtx (mode);
+ negate = ia64_expand_vecint_compare (code, mode, cmp,
+ operands[4], operands[5]);
+
+ ot = operands[1+negate];
+ of = operands[2-negate];
+
+ if (ot == CONST0_RTX (mode))
+ {
+ if (of == CONST0_RTX (mode))
+ {
+ emit_move_insn (operands[0], ot);
+ return;
+ }
+
+ x = gen_rtx_NOT (mode, cmp);
+ x = gen_rtx_AND (mode, x, of);
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
+ }
+ else if (of == CONST0_RTX (mode))
+ {
+ x = gen_rtx_AND (mode, cmp, ot);
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
+ }
+ else
+ {
+ rtx t, f;
+
+ t = gen_reg_rtx (mode);
+ x = gen_rtx_AND (mode, cmp, operands[1+negate]);
+ emit_insn (gen_rtx_SET (VOIDmode, t, x));
+
+ f = gen_reg_rtx (mode);
+ x = gen_rtx_NOT (mode, cmp);
+ x = gen_rtx_AND (mode, x, operands[2-negate]);
+ emit_insn (gen_rtx_SET (VOIDmode, f, x));
+
+ x = gen_rtx_IOR (mode, t, f);
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
+ }
+}
+
+/* Emit an integral vector min or max operation. Return true if all done. */
+
+bool
+ia64_expand_vecint_minmax (enum rtx_code code, enum machine_mode mode,
+ rtx operands[])
+{
+ rtx xops[5];
+
+ /* These four combinations are supported directly. */
+ if (mode == V8QImode && (code == UMIN || code == UMAX))
+ return false;
+ if (mode == V4HImode && (code == SMIN || code == SMAX))
+ return false;
+
+ /* Everything else implemented via vector comparisons. */
+ xops[0] = operands[0];
+ xops[4] = xops[1] = operands[1];
+ xops[5] = xops[2] = operands[2];
+
+ switch (code)
+ {
+ case UMIN:
+ code = LTU;
+ break;
+ case UMAX:
+ code = GTU;
+ break;
+ case SMIN:
+ code = LT;
+ break;
+ case SMAX:
+ code = GT;
+ break;
+ default:
+ abort ();
+ }
+ xops[3] = gen_rtx_fmt_ee (code, VOIDmode, operands[1], operands[2]);
+
+ ia64_expand_vecint_cmov (xops);
+ return true;
+}
+
/* Emit the appropriate sequence for a call. */
void
ia64_assemble_integer (rtx x, unsigned int size, int aligned_p)
{
if (size == POINTER_SIZE / BITS_PER_UNIT
- && aligned_p
&& !(TARGET_NO_PIC || TARGET_AUTO_PIC)
&& GET_CODE (x) == SYMBOL_REF
&& SYMBOL_REF_FUNCTION_P (x))
{
- if (POINTER_SIZE == 32)
- fputs ("\tdata4\t@fptr(", asm_out_file);
- else
- fputs ("\tdata8\t@fptr(", asm_out_file);
+ static const char * const directive[2][2] = {
+ /* 64-bit pointer */ /* 32-bit pointer */
+ { "\tdata8.ua\t@fptr(", "\tdata4.ua\t@fptr("}, /* unaligned */
+ { "\tdata8\t@fptr(", "\tdata4\t@fptr("} /* aligned */
+ };
+ fputs (directive[(aligned_p != 0)][POINTER_SIZE == 32], asm_out_file);
output_addr_const (asm_out_file, x);
fputs (")\n", asm_out_file);
return true;
An aggregate is a homogeneous floating point aggregate is if all
fields/elements in it have the same floating point type (e.g,
- SFmode). 128-bit quad-precision floats are excluded. */
+ SFmode). 128-bit quad-precision floats are excluded.
+
+ Variable sized aggregates should never arrive here, since we should
+ have already decided to pass them by reference. Top-level zero-sized
+ aggregates are excluded because our parallels crash the middle-end. */
static enum machine_mode
-hfa_element_mode (tree type, int nested)
+hfa_element_mode (tree type, bool nested)
{
enum machine_mode element_mode = VOIDmode;
enum machine_mode mode;
int know_element_mode = 0;
tree t;
+ if (!nested && (!TYPE_SIZE (type) || integer_zerop (TYPE_SIZE (type))))
+ return VOIDmode;
+
switch (code)
{
case VOID_TYPE: case INTEGER_TYPE: case ENUMERAL_TYPE:
case BOOLEAN_TYPE: case CHAR_TYPE: case POINTER_TYPE:
case OFFSET_TYPE: case REFERENCE_TYPE: case METHOD_TYPE:
- case FILE_TYPE: case SET_TYPE: case LANG_TYPE:
- case FUNCTION_TYPE:
+ case FILE_TYPE: case LANG_TYPE: case FUNCTION_TYPE:
return VOIDmode;
/* Fortran complex types are supposed to be HFAs, so we need to handle
}
}
-/* Return number of words, at the beginning of the argument, that must be
+/* Return number of bytes, at the beginning of the argument, that must be
put in registers. 0 is the argument is entirely in registers or entirely
in memory. */
-int
-ia64_function_arg_partial_nregs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
- tree type, int named ATTRIBUTE_UNUSED)
+static int
+ia64_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
+ tree type, bool named ATTRIBUTE_UNUSED)
{
int words = ia64_function_arg_words (type, mode);
int offset = ia64_function_arg_offset (cum, type, words);
if (words + cum->words + offset <= MAX_ARGUMENT_SLOTS)
return 0;
- return MAX_ARGUMENT_SLOTS - cum->words - offset;
+ return (MAX_ARGUMENT_SLOTS - cum->words - offset) * UNITS_PER_WORD;
}
/* Update CUM to point after this argument. This is patterned after
return gen_rtx_REG (mode, FR_ARG_FIRST);
else
{
+ bool need_parallel = false;
+
+ /* In big-endian mode, we need to manage the layout of aggregates
+ in the registers so that we get the bits properly aligned in
+ the highpart of the registers. */
if (BYTES_BIG_ENDIAN
&& (mode == BLKmode || (valtype && AGGREGATE_TYPE_P (valtype))))
+ need_parallel = true;
+
+ /* Something like struct S { long double x; char a[0] } is not an
+ HFA structure, and therefore doesn't go in fp registers. But
+ the middle-end will give it XFmode anyway, and XFmode values
+ don't normally fit in integer registers. So we need to smuggle
+ the value inside a parallel. */
+ else if (mode == XFmode || mode == XCmode)
+ need_parallel = true;
+
+ if (need_parallel)
{
rtx loc[8];
int offset;
}
return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
}
- else
- return gen_rtx_REG (mode, GR_RET_FIRST);
+
+ return gen_rtx_REG (mode, GR_RET_FIRST);
}
}
U Print an 8-bit sign extended number (K) as a 64-bit unsigned number
for Intel assembler.
r Print register name, or constant 0 as r0. HP compatibility for
- Linux kernel. */
+ Linux kernel.
+ v Print vector constant value as an 8-byte integer value. */
+
void
ia64_print_operand (FILE * file, rtx x, int code)
{
output_operand_lossage ("invalid %%r value");
return;
+ case 'v':
+ gcc_assert (GET_CODE (x) == CONST_VECTOR);
+ x = simplify_subreg (DImode, x, GET_MODE (x), 0);
+ break;
+
case '+':
{
const char *which;
return 2;
}
+/* Implement PREFERRED_RELOAD_CLASS. Place additional restrictions on CLASS
+ to use when copying X into that class. */
+
+enum reg_class
+ia64_preferred_reload_class (rtx x, enum reg_class class)
+{
+ switch (class)
+ {
+ case FR_REGS:
+ /* Don't allow volatile mem reloads into floating point registers.
+ This is defined to force reload to choose the r/m case instead
+ of the f/f case when reloading (set (reg fX) (mem/v)). */
+ if (MEM_P (x) && MEM_VOLATILE_P (x))
+ return NO_REGS;
+
+ /* Force all unrecognized constants into the constant pool. */
+ if (CONSTANT_P (x))
+ return NO_REGS;
+ break;
+
+ case AR_M_REGS:
+ case AR_I_REGS:
+ if (!OBJECT_P (x))
+ return NO_REGS;
+ break;
+
+ default:
+ break;
+ }
+
+ return class;
+}
+
/* This function returns the register class required for a secondary
register when copying between one of the registers in CLASS, and X,
using MODE. A return value of NO_REGS means that no secondary register
for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
{
rtx pat = XVECEXP (x, 0, i);
- if (GET_CODE (pat) == SET)
+ switch (GET_CODE (pat))
{
+ case SET:
update_set_flags (pat, &new_flags, &pred, &cond);
- need_barrier |= set_src_needs_barrier (pat, new_flags, pred, cond);
+ need_barrier |= set_src_needs_barrier (pat, new_flags,
+ pred, cond);
+ break;
+
+ case USE:
+ case CALL:
+ case ASM_OPERANDS:
+ need_barrier |= rtx_needs_barrier (pat, flags, pred);
+ break;
+
+ case CLOBBER:
+ case RETURN:
+ break;
+
+ default:
+ gcc_unreachable ();
}
- else if (GET_CODE (pat) == USE
- || GET_CODE (pat) == CALL
- || GET_CODE (pat) == ASM_OPERANDS)
- need_barrier |= rtx_needs_barrier (pat, flags, pred);
- else if (GET_CODE (pat) != CLOBBER && GET_CODE (pat) != RETURN)
- abort ();
}
for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
{
need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
break;
- case CONST_INT: case CONST_DOUBLE:
+ case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR:
case SYMBOL_REF: case LABEL_REF: case CONST:
break;
need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
break;
+ case VEC_SELECT:
+ /* VEC_SELECT's second argument is a PARALLEL with integers that
+ describe the elements selected. On ia64, those integers are
+ always constants. Avoid walking the PARALLEL so that we don't
+ get confused with "normal" parallels and abort. */
+ need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
+ break;
+
case UNSPEC:
switch (XINT (x, 1))
{
HOST_WIDE_INT bit = (offset >> 3) & 63;
need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
- new_flags.is_write = (XINT (x, 1) == 1);
+ new_flags.is_write = (XINT (x, 1) == UNSPEC_GR_SPILL);
need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + bit,
new_flags, pred);
break;
break;
case UNSPEC_FR_RECIP_APPROX:
+ case UNSPEC_SHRP:
need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
break;
if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
{
const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
+
if (strcmp (section, ".sdata") == 0
- || strcmp (section, ".sbss") == 0)
+ || strncmp (section, ".sdata.", 7) == 0
+ || strncmp (section, ".gnu.linkonce.s.", 16) == 0
+ || strcmp (section, ".sbss") == 0
+ || strncmp (section, ".sbss.", 6) == 0
+ || strncmp (section, ".gnu.linkonce.sb.", 17) == 0)
return true;
}
else
followed by a new prologue. If the procedure doesn't
have a memory-stack frame, we'll issue a dummy ".restore
sp" now. */
- if (current_frame_info.total_size == 0)
+ if (current_frame_info.total_size == 0 && !frame_pointer_needed)
/* if haven't done process_epilogue() yet, do it now */
process_epilogue ();
fprintf (asm_out_file, "\t.prologue\n");
}
}
+static bool
+ia64_vector_mode_supported_p (enum machine_mode mode)
+{
+ switch (mode)
+ {
+ case V8QImode:
+ case V4HImode:
+ case V2SImode:
+ return true;
+
+ case V2SFmode:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
#include "gt-ia64.h"