+\f
+
+/* This structure describes the data used by the backend to guide scheduling.
+ When the current scheduling point is switched, this data should be saved
+ and restored later, if the scheduler returns to this point. */
+struct _ia64_sched_context
+{
+ state_t prev_cycle_state;
+ rtx last_scheduled_insn;
+ struct reg_write_state rws_sum[NUM_REGS];
+ struct reg_write_state rws_insn[NUM_REGS];
+ int first_instruction;
+ int pending_data_specs;
+ int current_cycle;
+ char mem_ops_in_group[4];
+};
+typedef struct _ia64_sched_context *ia64_sched_context_t;
+
+/* Allocates a scheduling context. */
+static void *
+ia64_alloc_sched_context (void)
+{
+ return xmalloc (sizeof (struct _ia64_sched_context));
+}
+
+/* Initializes the _SC context with clean data, if CLEAN_P, and from
+ the global context otherwise. */
+static void
+ia64_init_sched_context (void *_sc, bool clean_p)
+{
+ ia64_sched_context_t sc = (ia64_sched_context_t) _sc;
+
+ sc->prev_cycle_state = xmalloc (dfa_state_size);
+ if (clean_p)
+ {
+ state_reset (sc->prev_cycle_state);
+ sc->last_scheduled_insn = NULL_RTX;
+ memset (sc->rws_sum, 0, sizeof (rws_sum));
+ memset (sc->rws_insn, 0, sizeof (rws_insn));
+ sc->first_instruction = 1;
+ sc->pending_data_specs = 0;
+ sc->current_cycle = 0;
+ memset (sc->mem_ops_in_group, 0, sizeof (mem_ops_in_group));
+ }
+ else
+ {
+ memcpy (sc->prev_cycle_state, prev_cycle_state, dfa_state_size);
+ sc->last_scheduled_insn = last_scheduled_insn;
+ memcpy (sc->rws_sum, rws_sum, sizeof (rws_sum));
+ memcpy (sc->rws_insn, rws_insn, sizeof (rws_insn));
+ sc->first_instruction = first_instruction;
+ sc->pending_data_specs = pending_data_specs;
+ sc->current_cycle = current_cycle;
+ memcpy (sc->mem_ops_in_group, mem_ops_in_group, sizeof (mem_ops_in_group));
+ }
+}
+
+/* Sets the global scheduling context to the one pointed to by _SC. */
+static void
+ia64_set_sched_context (void *_sc)
+{
+ ia64_sched_context_t sc = (ia64_sched_context_t) _sc;
+
+ gcc_assert (sc != NULL);
+
+ memcpy (prev_cycle_state, sc->prev_cycle_state, dfa_state_size);
+ last_scheduled_insn = sc->last_scheduled_insn;
+ memcpy (rws_sum, sc->rws_sum, sizeof (rws_sum));
+ memcpy (rws_insn, sc->rws_insn, sizeof (rws_insn));
+ first_instruction = sc->first_instruction;
+ pending_data_specs = sc->pending_data_specs;
+ current_cycle = sc->current_cycle;
+ memcpy (mem_ops_in_group, sc->mem_ops_in_group, sizeof (mem_ops_in_group));
+}
+
+/* Clears the data in the _SC scheduling context. */
+static void
+ia64_clear_sched_context (void *_sc)
+{
+ ia64_sched_context_t sc = (ia64_sched_context_t) _sc;
+
+ free (sc->prev_cycle_state);
+ sc->prev_cycle_state = NULL;
+}
+
+/* Frees the _SC scheduling context. */
+static void
+ia64_free_sched_context (void *_sc)
+{
+ gcc_assert (_sc != NULL);
+
+ free (_sc);
+}
+
+typedef rtx (* gen_func_t) (rtx, rtx);
+
+/* Return a function that will generate a load of mode MODE_NO
+ with speculation types TS. */
+static gen_func_t
+get_spec_load_gen_function (ds_t ts, int mode_no)
+{
+ static gen_func_t gen_ld_[] = {
+ gen_movbi,
+ gen_movqi_internal,
+ gen_movhi_internal,
+ gen_movsi_internal,
+ gen_movdi_internal,
+ gen_movsf_internal,
+ gen_movdf_internal,
+ gen_movxf_internal,
+ gen_movti_internal,
+ gen_zero_extendqidi2,
+ gen_zero_extendhidi2,
+ gen_zero_extendsidi2,
+ };
+
+ static gen_func_t gen_ld_a[] = {
+ gen_movbi_advanced,
+ gen_movqi_advanced,
+ gen_movhi_advanced,
+ gen_movsi_advanced,
+ gen_movdi_advanced,
+ gen_movsf_advanced,
+ gen_movdf_advanced,
+ gen_movxf_advanced,
+ gen_movti_advanced,
+ gen_zero_extendqidi2_advanced,
+ gen_zero_extendhidi2_advanced,
+ gen_zero_extendsidi2_advanced,
+ };
+ static gen_func_t gen_ld_s[] = {
+ gen_movbi_speculative,
+ gen_movqi_speculative,
+ gen_movhi_speculative,
+ gen_movsi_speculative,
+ gen_movdi_speculative,
+ gen_movsf_speculative,
+ gen_movdf_speculative,
+ gen_movxf_speculative,
+ gen_movti_speculative,
+ gen_zero_extendqidi2_speculative,
+ gen_zero_extendhidi2_speculative,
+ gen_zero_extendsidi2_speculative,
+ };
+ static gen_func_t gen_ld_sa[] = {
+ gen_movbi_speculative_advanced,
+ gen_movqi_speculative_advanced,
+ gen_movhi_speculative_advanced,
+ gen_movsi_speculative_advanced,
+ gen_movdi_speculative_advanced,
+ gen_movsf_speculative_advanced,
+ gen_movdf_speculative_advanced,
+ gen_movxf_speculative_advanced,
+ gen_movti_speculative_advanced,
+ gen_zero_extendqidi2_speculative_advanced,
+ gen_zero_extendhidi2_speculative_advanced,
+ gen_zero_extendsidi2_speculative_advanced,
+ };
+ static gen_func_t gen_ld_s_a[] = {
+ gen_movbi_speculative_a,
+ gen_movqi_speculative_a,
+ gen_movhi_speculative_a,
+ gen_movsi_speculative_a,
+ gen_movdi_speculative_a,
+ gen_movsf_speculative_a,
+ gen_movdf_speculative_a,
+ gen_movxf_speculative_a,
+ gen_movti_speculative_a,
+ gen_zero_extendqidi2_speculative_a,
+ gen_zero_extendhidi2_speculative_a,
+ gen_zero_extendsidi2_speculative_a,
+ };
+
+ gen_func_t *gen_ld;
+
+ if (ts & BEGIN_DATA)
+ {
+ if (ts & BEGIN_CONTROL)
+ gen_ld = gen_ld_sa;
+ else
+ gen_ld = gen_ld_a;
+ }
+ else if (ts & BEGIN_CONTROL)
+ {
+ if ((spec_info->flags & SEL_SCHED_SPEC_DONT_CHECK_CONTROL)
+ || ia64_needs_block_p (ts))
+ gen_ld = gen_ld_s;
+ else
+ gen_ld = gen_ld_s_a;
+ }
+ else if (ts == 0)
+ gen_ld = gen_ld_;
+ else
+ gcc_unreachable ();
+
+ return gen_ld[mode_no];
+}