+enum reload_reg_type {
+ GPR_REGISTER_TYPE,
+ VECTOR_REGISTER_TYPE,
+ OTHER_REGISTER_TYPE
+};
+
+static enum reload_reg_type
+rs6000_reload_register_type (enum reg_class rclass)
+{
+ switch (rclass)
+ {
+ case GENERAL_REGS:
+ case BASE_REGS:
+ return GPR_REGISTER_TYPE;
+
+ case FLOAT_REGS:
+ case ALTIVEC_REGS:
+ case VSX_REGS:
+ return VECTOR_REGISTER_TYPE;
+
+ default:
+ return OTHER_REGISTER_TYPE;
+ }
+}
+
+/* Inform reload about cases where moving X with a mode MODE to a register in
+ RCLASS requires an extra scratch or immediate register. Return the class
+ needed for the immediate register.
+
+ For VSX and Altivec, we may need a register to convert sp+offset into
+ reg+sp. */
+
+static enum reg_class
+rs6000_secondary_reload (bool in_p,
+ rtx x,
+ enum reg_class rclass,
+ enum machine_mode mode,
+ secondary_reload_info *sri)
+{
+ enum reg_class ret = ALL_REGS;
+ enum insn_code icode;
+ bool default_p = false;
+
+ sri->icode = CODE_FOR_nothing;
+
+ /* Convert vector loads and stores into gprs to use an additional base
+ register. */
+ icode = rs6000_vector_reload[mode][in_p != false];
+ if (icode != CODE_FOR_nothing)
+ {
+ ret = NO_REGS;
+ sri->icode = CODE_FOR_nothing;
+ sri->extra_cost = 0;
+
+ if (GET_CODE (x) == MEM)
+ {
+ rtx addr = XEXP (x, 0);
+
+ /* Loads to and stores from gprs can do reg+offset, and wouldn't need
+ an extra register in that case, but it would need an extra
+ register if the addressing is reg+reg or (reg+reg)&(-16). */
+ if (rclass == GENERAL_REGS || rclass == BASE_REGS)
+ {
+ if (!legitimate_indirect_address_p (addr, false)
+ && !rs6000_legitimate_offset_address_p (TImode, addr, false))
+ {
+ sri->icode = icode;
+ /* account for splitting the loads, and converting the
+ address from reg+reg to reg. */
+ sri->extra_cost = (((TARGET_64BIT) ? 3 : 5)
+ + ((GET_CODE (addr) == AND) ? 1 : 0));
+ }
+ }
+ /* Loads to and stores from vector registers can only do reg+reg
+ addressing. Altivec registers can also do (reg+reg)&(-16). */
+ else if (rclass == VSX_REGS || rclass == ALTIVEC_REGS
+ || rclass == FLOAT_REGS || rclass == NO_REGS)
+ {
+ if (!VECTOR_MEM_ALTIVEC_P (mode)
+ && GET_CODE (addr) == AND
+ && GET_CODE (XEXP (addr, 1)) == CONST_INT
+ && INTVAL (XEXP (addr, 1)) == -16
+ && (legitimate_indirect_address_p (XEXP (addr, 0), false)
+ || legitimate_indexed_address_p (XEXP (addr, 0), false)))
+ {
+ sri->icode = icode;
+ sri->extra_cost = ((GET_CODE (XEXP (addr, 0)) == PLUS)
+ ? 2 : 1);
+ }
+ else if (!legitimate_indirect_address_p (addr, false)
+ && (rclass == NO_REGS
+ || !legitimate_indexed_address_p (addr, false)))
+ {
+ sri->icode = icode;
+ sri->extra_cost = 1;
+ }
+ else
+ icode = CODE_FOR_nothing;
+ }
+ /* Any other loads, including to pseudo registers which haven't been
+ assigned to a register yet, default to require a scratch
+ register. */
+ else
+ {
+ sri->icode = icode;
+ sri->extra_cost = 2;
+ }
+ }
+ else if (REG_P (x))
+ {
+ int regno = true_regnum (x);
+
+ icode = CODE_FOR_nothing;
+ if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
+ default_p = true;
+ else
+ {
+ enum reg_class xclass = REGNO_REG_CLASS (regno);
+ enum reload_reg_type rtype1 = rs6000_reload_register_type (rclass);
+ enum reload_reg_type rtype2 = rs6000_reload_register_type (xclass);
+
+ /* If memory is needed, use default_secondary_reload to create the
+ stack slot. */
+ if (rtype1 != rtype2 || rtype1 == OTHER_REGISTER_TYPE)
+ default_p = true;
+ else
+ ret = NO_REGS;
+ }
+ }
+ else
+ default_p = true;
+ }
+ else
+ default_p = true;
+
+ if (default_p)
+ ret = default_secondary_reload (in_p, x, rclass, mode, sri);
+
+ gcc_assert (ret != ALL_REGS);
+
+ if (TARGET_DEBUG_ADDR)
+ {
+ fprintf (stderr,
+ "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
+ "mode = %s",
+ reg_class_names[ret],
+ in_p ? "true" : "false",
+ reg_class_names[rclass],
+ GET_MODE_NAME (mode));
+
+ if (default_p)
+ fprintf (stderr, ", default secondary reload");
+
+ if (sri->icode != CODE_FOR_nothing)
+ fprintf (stderr, ", reload func = %s, extra cost = %d\n",
+ insn_data[sri->icode].name, sri->extra_cost);
+ else
+ fprintf (stderr, "\n");
+
+ debug_rtx (x);
+ }
+
+ return ret;
+}
+
+/* Fixup reload addresses for Altivec or VSX loads/stores to change SP+offset
+ to SP+reg addressing. */
+
+void
+rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
+{
+ int regno = true_regnum (reg);
+ enum machine_mode mode = GET_MODE (reg);
+ enum reg_class rclass;
+ rtx addr;
+ rtx and_op2 = NULL_RTX;
+ rtx addr_op1;
+ rtx addr_op2;
+ rtx scratch_or_premodify = scratch;
+ rtx and_rtx;
+ rtx cc_clobber;
+
+ if (TARGET_DEBUG_ADDR)
+ {
+ fprintf (stderr, "\nrs6000_secondary_reload_inner, type = %s\n",
+ store_p ? "store" : "load");
+ fprintf (stderr, "reg:\n");
+ debug_rtx (reg);
+ fprintf (stderr, "mem:\n");
+ debug_rtx (mem);
+ fprintf (stderr, "scratch:\n");
+ debug_rtx (scratch);
+ }
+
+ gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
+ gcc_assert (GET_CODE (mem) == MEM);
+ rclass = REGNO_REG_CLASS (regno);
+ addr = XEXP (mem, 0);
+
+ switch (rclass)
+ {
+ /* GPRs can handle reg + small constant, all other addresses need to use
+ the scratch register. */
+ case GENERAL_REGS:
+ case BASE_REGS:
+ if (GET_CODE (addr) == AND)
+ {
+ and_op2 = XEXP (addr, 1);
+ addr = XEXP (addr, 0);
+ }
+
+ if (GET_CODE (addr) == PRE_MODIFY)
+ {
+ scratch_or_premodify = XEXP (addr, 0);
+ gcc_assert (REG_P (scratch_or_premodify));
+ gcc_assert (GET_CODE (XEXP (addr, 1)) == PLUS);
+ addr = XEXP (addr, 1);
+ }
+
+ if (GET_CODE (addr) == PLUS
+ && (!rs6000_legitimate_offset_address_p (TImode, addr, false)
+ || and_op2 != NULL_RTX))
+ {
+ addr_op1 = XEXP (addr, 0);
+ addr_op2 = XEXP (addr, 1);
+ gcc_assert (legitimate_indirect_address_p (addr_op1, false));
+
+ if (!REG_P (addr_op2)
+ && (GET_CODE (addr_op2) != CONST_INT
+ || !satisfies_constraint_I (addr_op2)))
+ {
+ if (TARGET_DEBUG_ADDR)
+ {
+ fprintf (stderr,
+ "\nMove plus addr to register %s, mode = %s: ",
+ rs6000_reg_names[REGNO (scratch)],
+ GET_MODE_NAME (mode));
+ debug_rtx (addr_op2);
+ }
+ rs6000_emit_move (scratch, addr_op2, Pmode);
+ addr_op2 = scratch;
+ }
+
+ emit_insn (gen_rtx_SET (VOIDmode,
+ scratch_or_premodify,
+ gen_rtx_PLUS (Pmode,
+ addr_op1,
+ addr_op2)));
+
+ addr = scratch_or_premodify;
+ scratch_or_premodify = scratch;
+ }
+ else if (!legitimate_indirect_address_p (addr, false)
+ && !rs6000_legitimate_offset_address_p (TImode, addr, false))
+ {
+ if (TARGET_DEBUG_ADDR)
+ {
+ fprintf (stderr, "\nMove addr to register %s, mode = %s: ",
+ rs6000_reg_names[REGNO (scratch_or_premodify)],
+ GET_MODE_NAME (mode));
+ debug_rtx (addr);
+ }
+ rs6000_emit_move (scratch_or_premodify, addr, Pmode);
+ addr = scratch_or_premodify;
+ scratch_or_premodify = scratch;
+ }
+ break;
+
+ /* Float/Altivec registers can only handle reg+reg addressing. Move
+ other addresses into a scratch register. */
+ case FLOAT_REGS:
+ case VSX_REGS:
+ case ALTIVEC_REGS:
+
+ /* With float regs, we need to handle the AND ourselves, since we can't
+ use the Altivec instruction with an implicit AND -16. Allow scalar
+ loads to float registers to use reg+offset even if VSX. */
+ if (GET_CODE (addr) == AND
+ && (rclass != ALTIVEC_REGS || GET_MODE_SIZE (mode) != 16
+ || GET_CODE (XEXP (addr, 1)) != CONST_INT
+ || INTVAL (XEXP (addr, 1)) != -16
+ || !VECTOR_MEM_ALTIVEC_P (mode)))
+ {
+ and_op2 = XEXP (addr, 1);
+ addr = XEXP (addr, 0);
+ }
+
+ /* If we aren't using a VSX load, save the PRE_MODIFY register and use it
+ as the address later. */
+ if (GET_CODE (addr) == PRE_MODIFY
+ && (!VECTOR_MEM_VSX_P (mode)
+ || and_op2 != NULL_RTX
+ || !legitimate_indexed_address_p (XEXP (addr, 1), false)))
+ {
+ scratch_or_premodify = XEXP (addr, 0);
+ gcc_assert (legitimate_indirect_address_p (scratch_or_premodify,
+ false));
+ gcc_assert (GET_CODE (XEXP (addr, 1)) == PLUS);
+ addr = XEXP (addr, 1);
+ }
+
+ if (legitimate_indirect_address_p (addr, false) /* reg */
+ || legitimate_indexed_address_p (addr, false) /* reg+reg */
+ || GET_CODE (addr) == PRE_MODIFY /* VSX pre-modify */
+ || (GET_CODE (addr) == AND /* Altivec memory */
+ && GET_CODE (XEXP (addr, 1)) == CONST_INT
+ && INTVAL (XEXP (addr, 1)) == -16
+ && VECTOR_MEM_ALTIVEC_P (mode))
+ || (rclass == FLOAT_REGS /* legacy float mem */
+ && GET_MODE_SIZE (mode) == 8
+ && and_op2 == NULL_RTX
+ && scratch_or_premodify == scratch
+ && rs6000_legitimate_offset_address_p (mode, addr, false)))
+ ;
+
+ else if (GET_CODE (addr) == PLUS)
+ {
+ addr_op1 = XEXP (addr, 0);
+ addr_op2 = XEXP (addr, 1);
+ gcc_assert (REG_P (addr_op1));
+
+ if (TARGET_DEBUG_ADDR)
+ {
+ fprintf (stderr, "\nMove plus addr to register %s, mode = %s: ",
+ rs6000_reg_names[REGNO (scratch)], GET_MODE_NAME (mode));
+ debug_rtx (addr_op2);
+ }
+ rs6000_emit_move (scratch, addr_op2, Pmode);
+ emit_insn (gen_rtx_SET (VOIDmode,
+ scratch_or_premodify,
+ gen_rtx_PLUS (Pmode,
+ addr_op1,
+ scratch)));
+ addr = scratch_or_premodify;
+ scratch_or_premodify = scratch;
+ }
+
+ else if (GET_CODE (addr) == SYMBOL_REF || GET_CODE (addr) == CONST
+ || GET_CODE (addr) == CONST_INT || REG_P (addr))
+ {
+ if (TARGET_DEBUG_ADDR)
+ {
+ fprintf (stderr, "\nMove addr to register %s, mode = %s: ",
+ rs6000_reg_names[REGNO (scratch_or_premodify)],
+ GET_MODE_NAME (mode));
+ debug_rtx (addr);
+ }
+
+ rs6000_emit_move (scratch_or_premodify, addr, Pmode);
+ addr = scratch_or_premodify;
+ scratch_or_premodify = scratch;
+ }
+
+ else
+ gcc_unreachable ();
+
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ /* If the original address involved a pre-modify that we couldn't use the VSX
+ memory instruction with update, and we haven't taken care of already,
+ store the address in the pre-modify register and use that as the
+ address. */
+ if (scratch_or_premodify != scratch && scratch_or_premodify != addr)
+ {
+ emit_insn (gen_rtx_SET (VOIDmode, scratch_or_premodify, addr));
+ addr = scratch_or_premodify;
+ }
+
+ /* If the original address involved an AND -16 and we couldn't use an ALTIVEC
+ memory instruction, recreate the AND now, including the clobber which is
+ generated by the general ANDSI3/ANDDI3 patterns for the
+ andi. instruction. */
+ if (and_op2 != NULL_RTX)
+ {
+ if (! legitimate_indirect_address_p (addr, false))
+ {
+ emit_insn (gen_rtx_SET (VOIDmode, scratch, addr));
+ addr = scratch;
+ }
+
+ if (TARGET_DEBUG_ADDR)
+ {
+ fprintf (stderr, "\nAnd addr to register %s, mode = %s: ",
+ rs6000_reg_names[REGNO (scratch)], GET_MODE_NAME (mode));
+ debug_rtx (and_op2);
+ }
+
+ and_rtx = gen_rtx_SET (VOIDmode,
+ scratch,
+ gen_rtx_AND (Pmode,
+ addr,
+ and_op2));
+
+ cc_clobber = gen_rtx_CLOBBER (CCmode, gen_rtx_SCRATCH (CCmode));
+ emit_insn (gen_rtx_PARALLEL (VOIDmode,
+ gen_rtvec (2, and_rtx, cc_clobber)));
+ addr = scratch;
+ }
+
+ /* Adjust the address if it changed. */
+ if (addr != XEXP (mem, 0))
+ {
+ mem = change_address (mem, mode, addr);
+ if (TARGET_DEBUG_ADDR)
+ fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
+ }
+
+ /* Now create the move. */
+ if (store_p)
+ emit_insn (gen_rtx_SET (VOIDmode, mem, reg));
+ else
+ emit_insn (gen_rtx_SET (VOIDmode, reg, mem));
+
+ return;
+}
+
+/* Target hook to return the cover classes for Integrated Register Allocator.
+ Cover classes is a set of non-intersected register classes covering all hard
+ registers used for register allocation purpose. Any move between two
+ registers of a cover class should be cheaper than load or store of the
+ registers. The value is array of register classes with LIM_REG_CLASSES used
+ as the end marker.
+
+ We need two IRA_COVER_CLASSES, one for pre-VSX, and the other for VSX to
+ account for the Altivec and Floating registers being subsets of the VSX
+ register set under VSX, but distinct register sets on pre-VSX machines. */
+
+static const enum reg_class *
+rs6000_ira_cover_classes (void)
+{
+ static const enum reg_class cover_pre_vsx[] = IRA_COVER_CLASSES_PRE_VSX;
+ static const enum reg_class cover_vsx[] = IRA_COVER_CLASSES_VSX;
+
+ return (TARGET_VSX) ? cover_vsx : cover_pre_vsx;
+}